summaryrefslogtreecommitdiffstats
path: root/ansible_collections/cisco/nxos/plugins/module_utils
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/cisco/nxos/plugins/module_utils')
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/acl_interfaces.py82
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/acls.py314
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/bfd_interfaces.py61
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/bgp_address_family.py250
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/bgp_global.py549
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/bgp_neighbor_address_family.py377
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/facts.py25
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/hostname.py50
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/hsrp_interfaces.py60
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/interfaces.py67
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/l2_interfaces.py73
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/l3_interfaces.py83
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/lacp.py71
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/lacp_interfaces.py76
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/lag_interfaces.py70
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/lldp_global.py88
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/lldp_interfaces.py67
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/logging_global.py276
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/ntp_global.py139
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/ospf_interfaces.py142
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/ospfv2.py622
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/ospfv3.py488
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/prefix_lists.py80
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/route_maps.py412
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/snmp_server.py411
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/static_routes.py89
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/telemetry.py115
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/vlans.py64
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/telemetry.py147
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/acl_interfaces.py321
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/acls.py674
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/bfd_interfaces.py311
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/bgp_address_family.py253
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/bgp_global.py410
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/bgp_neighbor_address_family.py232
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/hostname.py75
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/hsrp_interfaces.py286
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/interfaces.py492
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/l2_interfaces.py351
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/l3_interfaces.py545
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/lacp.py234
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/lacp_interfaces.py323
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/lag_interfaces.py318
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/lldp_global.py277
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/lldp_interfaces.py312
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py199
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/ntp_global.py161
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/ospf_interfaces.py204
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/ospfv2.py216
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/ospfv3.py230
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/prefix_lists.py146
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/route_maps.py192
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/snmp_server.py243
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/static_routes.py567
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/telemetry.py593
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/vlans.py334
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/acl_interfaces.py129
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/acls.py327
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/bfd_interfaces.py104
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/bgp_address_family.py142
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/bgp_global.py130
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/bgp_neighbor_address_family.py133
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/facts.py188
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/hostname.py70
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/hsrp_interfaces.py96
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/interfaces.py110
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/l2_interfaces.py104
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/l3_interfaces.py135
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/lacp.py89
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/lacp_interfaces.py115
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/lag_interfaces.py104
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/base.py793
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/lldp_global.py107
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/lldp_interfaces.py128
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/logging_global.py91
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/ntp_global.py89
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/ospf_interfaces.py94
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/ospfv2.py94
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/ospfv3.py91
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/prefix_lists.py79
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/route_maps.py74
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/snmp_server.py85
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/static_routes.py230
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/telemetry.py185
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/vlans.py197
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/nxos.py1031
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_address_family.py798
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_global.py1536
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_neighbor_address_family.py894
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/hostname.py44
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/logging_global.py480
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ntp_global.py320
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospf_interfaces.py510
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv2.py1101
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv3.py945
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/prefix_lists.py102
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/route_maps.py1367
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/snmp_server.py1550
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/__init__.py0
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/telemetry.py264
-rw-r--r--ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/utils.py214
194 files changed, 29316 insertions, 0 deletions
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/acl_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/acl_interfaces.py
new file mode 100644
index 00000000..ad2d59d3
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,82 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_acl_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Acl_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_acl_interfaces module"""
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "access_groups": {
+ "elements": "dict",
+ "options": {
+ "acls": {
+ "elements": "dict",
+ "options": {
+ "direction": {
+ "choices": ["in", "out"],
+ "required": True,
+ "type": "str",
+ },
+ "name": {"required": True, "type": "str"},
+ "port": {"type": "bool"},
+ },
+ "type": "list",
+ },
+ "afi": {
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ "type": "str",
+ },
+ },
+ "type": "list",
+ },
+ "name": {"required": True, "type": "str"},
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "deleted",
+ "gathered",
+ "merged",
+ "overridden",
+ "rendered",
+ "replaced",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/acls.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/acls.py
new file mode 100644
index 00000000..3618248b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/acls/acls.py
@@ -0,0 +1,314 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_acls module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class AclsArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_acls module"""
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "acls": {
+ "elements": "dict",
+ "options": {
+ "aces": {
+ "elements": "dict",
+ "mutually_exclusive": [["grant", "remark"]],
+ "options": {
+ "destination": {
+ "mutually_exclusive": [
+ ["address", "any", "host", "prefix"],
+ [
+ "wildcard_bits",
+ "any",
+ "host",
+ "prefix",
+ ],
+ ],
+ "options": {
+ "address": {"type": "str"},
+ "any": {"type": "bool"},
+ "host": {"type": "str"},
+ "port_protocol": {
+ "mutually_exclusive": [
+ [
+ "eq",
+ "lt",
+ "neq",
+ "gt",
+ "range",
+ ],
+ ],
+ "options": {
+ "eq": {"type": "str"},
+ "gt": {"type": "str"},
+ "lt": {"type": "str"},
+ "neq": {"type": "str"},
+ "range": {
+ "options": {
+ "end": {"type": "str"},
+ "start": {"type": "str"},
+ },
+ "required_together": [["start", "end"]],
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "prefix": {"type": "str"},
+ "wildcard_bits": {"type": "str"},
+ },
+ "required_together": [["address", "wildcard_bits"]],
+ "type": "dict",
+ },
+ "dscp": {"type": "str"},
+ "fragments": {"type": "bool"},
+ "grant": {
+ "choices": ["permit", "deny"],
+ "type": "str",
+ },
+ "log": {"type": "bool"},
+ "precedence": {"type": "str"},
+ "protocol": {"type": "str"},
+ "protocol_options": {
+ "mutually_exclusive": [["icmp", "igmp", "tcp"]],
+ "options": {
+ "icmp": {
+ "options": {
+ "administratively_prohibited": {"type": "bool"},
+ "alternate_address": {"type": "bool"},
+ "conversion_error": {"type": "bool"},
+ "dod_host_prohibited": {"type": "bool"},
+ "dod_net_prohibited": {"type": "bool"},
+ "echo": {"type": "bool"},
+ "echo_reply": {"type": "bool"},
+ "echo_request": {"type": "bool"},
+ "general_parameter_problem": {"type": "bool"},
+ "host_isolated": {"type": "bool"},
+ "host_precedence_unreachable": {"type": "bool"},
+ "host_redirect": {"type": "bool"},
+ "host_tos_redirect": {"type": "bool"},
+ "host_tos_unreachable": {"type": "bool"},
+ "host_unknown": {"type": "bool"},
+ "host_unreachable": {"type": "bool"},
+ "information_reply": {"type": "bool"},
+ "information_request": {"type": "bool"},
+ "mask_reply": {"type": "bool"},
+ "mask_request": {"type": "bool"},
+ "message_code": {"type": "int"},
+ "message_type": {"type": "int"},
+ "mobile_redirect": {"type": "bool"},
+ "net_redirect": {"type": "bool"},
+ "net_tos_redirect": {"type": "bool"},
+ "net_tos_unreachable": {"type": "bool"},
+ "net_unreachable": {"type": "bool"},
+ "network_unknown": {"type": "bool"},
+ "no_room_for_option": {"type": "bool"},
+ "option_missing": {"type": "bool"},
+ "packet_too_big": {"type": "bool"},
+ "parameter_problem": {"type": "bool"},
+ "port_unreachable": {"type": "bool"},
+ "precedence_unreachable": {"type": "bool"},
+ "protocol_unreachable": {"type": "bool"},
+ "reassembly_timeout": {"type": "bool"},
+ "redirect": {"type": "bool"},
+ "router_advertisement": {"type": "bool"},
+ "router_solicitation": {"type": "bool"},
+ "source_quench": {"type": "bool"},
+ "source_route_failed": {"type": "bool"},
+ "time_exceeded": {"type": "bool"},
+ "timestamp_reply": {"type": "bool"},
+ "timestamp_request": {"type": "bool"},
+ "traceroute": {"type": "bool"},
+ "ttl_exceeded": {"type": "bool"},
+ "unreachable": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "icmpv6": {
+ "type": "dict",
+ "options": {
+ "beyond_scope": {"type": "bool"},
+ "destination_unreachable": {
+ "type": "bool",
+ },
+ "echo_reply": {"type": "bool"},
+ "echo_request": {"type": "bool"},
+ "fragments": {"type": "bool"},
+ "header": {"type": "bool"},
+ "hop_limit": {"type": "bool"},
+ "mld_query": {"type": "bool"},
+ "mld_reduction": {"type": "bool"},
+ "mld_report": {"type": "bool"},
+ "mldv2": {"type": "bool"},
+ "nd_na": {"type": "bool"},
+ "nd_ns": {"type": "bool"},
+ "next_header": {"type": "bool"},
+ "no_admin": {"type": "bool"},
+ "no_route": {"type": "bool"},
+ "packet_too_big": {"type": "bool"},
+ "parameter_option": {
+ "type": "bool",
+ },
+ "parameter_problem": {
+ "type": "bool",
+ },
+ "port_unreachable": {
+ "type": "bool",
+ },
+ "reassembly_timeout": {
+ "type": "bool",
+ },
+ "renum_command": {"type": "bool"},
+ "renum_result": {"type": "bool"},
+ "renum_seq_number": {
+ "type": "bool",
+ },
+ "router_advertisement": {
+ "type": "bool",
+ },
+ "router_renumbering": {
+ "type": "bool",
+ },
+ "router_solicitation": {
+ "type": "bool",
+ },
+ "time_exceeded": {"type": "bool"},
+ "unreachable": {"type": "bool"},
+ "telemetry_path": {"type": "bool"},
+ "telemetry_queue": {
+ "type": "bool",
+ },
+ },
+ },
+ "igmp": {
+ "mutually_exclusive": [
+ [
+ "dvmrp",
+ "host_query",
+ "host_report",
+ ],
+ ],
+ "options": {
+ "dvmrp": {"type": "bool"},
+ "host_query": {"type": "bool"},
+ "host_report": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "tcp": {
+ "options": {
+ "ack": {"type": "bool"},
+ "established": {"type": "bool"},
+ "fin": {"type": "bool"},
+ "psh": {"type": "bool"},
+ "rst": {"type": "bool"},
+ "syn": {"type": "bool"},
+ "urg": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "remark": {"type": "str"},
+ "sequence": {"type": "int"},
+ "source": {
+ "mutually_exclusive": [
+ ["address", "any", "host", "prefix"],
+ [
+ "wildcard_bits",
+ "host",
+ "any",
+ "prefix",
+ ],
+ ],
+ "options": {
+ "address": {"type": "str"},
+ "any": {"type": "bool"},
+ "host": {"type": "str"},
+ "port_protocol": {
+ "mutually_exclusive": [
+ ["eq", "lt", "neq", "range"],
+ ["eq", "gt", "neq", "range"],
+ ],
+ "options": {
+ "eq": {"type": "str"},
+ "gt": {"type": "str"},
+ "lt": {"type": "str"},
+ "neq": {"type": "str"},
+ "range": {
+ "options": {
+ "end": {"type": "str"},
+ "start": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "prefix": {"type": "str"},
+ "wildcard_bits": {"type": "str"},
+ },
+ "required_together": [["address", "wildcard_bits"]],
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ },
+ "name": {"required": True, "type": "str"},
+ },
+ "type": "list",
+ },
+ "afi": {
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ "type": "str",
+ },
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "deleted",
+ "gathered",
+ "merged",
+ "overridden",
+ "rendered",
+ "replaced",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/bfd_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/bfd_interfaces.py
new file mode 100644
index 00000000..6c47d2e5
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bfd_interfaces/bfd_interfaces.py
@@ -0,0 +1,61 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The arg spec for the nxos_bfd_interfaces module
+"""
+
+
+class Bfd_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_bfd_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "name": {"type": "str"},
+ "bfd": {"choices": ["enable", "disable"], "type": "str"},
+ "echo": {"choices": ["enable", "disable"], "type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/bgp_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/bgp_address_family.py
new file mode 100644
index 00000000..f77843d7
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_address_family/bgp_address_family.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_bgp_address_family module
+"""
+
+
+class Bgp_address_familyArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_bgp_address_family module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "as_number": {"type": "str"},
+ "address_family": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "type": "str",
+ "choices": [
+ "ipv4",
+ "ipv6",
+ "link-state",
+ "vpnv4",
+ "vpnv6",
+ "l2vpn",
+ ],
+ "required": True,
+ },
+ "safi": {
+ "type": "str",
+ "choices": [
+ "unicast",
+ "multicast",
+ "mvpn",
+ "evpn",
+ ],
+ },
+ "additional_paths": {
+ "type": "dict",
+ "options": {
+ "install_backup": {"type": "bool"},
+ "receive": {"type": "bool"},
+ "selection": {
+ "type": "dict",
+ "options": {"route_map": {"type": "str"}},
+ },
+ "send": {"type": "bool"},
+ },
+ },
+ "advertise_l2vpn_evpn": {"type": "bool"},
+ "advertise_pip": {"type": "bool"},
+ "advertise_system_mac": {"type": "bool"},
+ "allow_vni_in_ethertag": {"type": "bool"},
+ "aggregate_address": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "prefix": {"type": "str"},
+ "advertise_map": {"type": "str"},
+ "as_set": {"type": "bool"},
+ "attribute_map": {"type": "str"},
+ "summary_only": {"type": "bool"},
+ "suppress_map": {"type": "str"},
+ },
+ },
+ "client_to_client": {
+ "type": "dict",
+ "options": {"no_reflection": {"type": "bool"}},
+ },
+ "dampen_igp_metric": {"type": "int"},
+ "dampening": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "decay_half_life": {"type": "int"},
+ "start_reuse_route": {"type": "int"},
+ "start_suppress_route": {"type": "int"},
+ "max_suppress_time": {"type": "int"},
+ "route_map": {"type": "str"},
+ },
+ },
+ "default_information": {
+ "type": "dict",
+ "options": {"originate": {"type": "bool"}},
+ },
+ "default_metric": {"type": "int"},
+ "distance": {
+ "type": "dict",
+ "options": {
+ "ebgp_routes": {"type": "int"},
+ "ibgp_routes": {"type": "int"},
+ "local_routes": {"type": "int"},
+ },
+ },
+ "export_gateway_ip": {"type": "bool"},
+ "inject_map": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "route_map": {"type": "str"},
+ "exist_map": {"type": "str"},
+ "copy_attributes": {"type": "bool"},
+ },
+ },
+ "maximum_paths": {
+ "type": "dict",
+ "options": {
+ "parallel_paths": {"type": "int"},
+ "ibgp": {
+ "type": "dict",
+ "options": {"parallel_paths": {"type": "int"}},
+ },
+ "eibgp": {
+ "type": "dict",
+ "options": {"parallel_paths": {"type": "int"}},
+ },
+ "local": {
+ "type": "dict",
+ "options": {"parallel_paths": {"type": "int"}},
+ },
+ "mixed": {
+ "type": "dict",
+ "options": {"parallel_paths": {"type": "int"}},
+ },
+ },
+ },
+ "networks": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "prefix": {"type": "str"},
+ "route_map": {"type": "str"},
+ },
+ },
+ "nexthop": {
+ "type": "dict",
+ "options": {
+ "route_map": {"type": "str"},
+ "trigger_delay": {
+ "type": "dict",
+ "options": {
+ "critical_delay": {"type": "int"},
+ "non_critical_delay": {"type": "int"},
+ },
+ },
+ },
+ },
+ "redistribute": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "protocol": {
+ "type": "str",
+ "choices": [
+ "am",
+ "direct",
+ "eigrp",
+ "isis",
+ "lisp",
+ "ospf",
+ "ospfv3",
+ "rip",
+ "static",
+ "hmm",
+ ],
+ "required": True,
+ },
+ "id": {"type": "str"},
+ "route_map": {"type": "str", "required": True},
+ },
+ },
+ "retain": {
+ "type": "dict",
+ "options": {
+ "route_target": {
+ "type": "dict",
+ "options": {
+ "retain_all": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ },
+ },
+ "suppress_inactive": {"type": "bool"},
+ "table_map": {
+ "type": "dict",
+ "options": {
+ "name": {"type": "str", "required": True},
+ "filter": {"type": "bool"},
+ },
+ },
+ "timers": {
+ "type": "dict",
+ "options": {
+ "bestpath_defer": {
+ "type": "dict",
+ "options": {
+ "defer_time": {"type": "int"},
+ "maximum_defer_time": {"type": "int"},
+ },
+ },
+ },
+ },
+ "wait_igp_convergence": {"type": "bool"},
+ "vrf": {"type": "str"},
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/bgp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/bgp_global.py
new file mode 100644
index 00000000..d47f7e42
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_global/bgp_global.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_bgp_global module
+"""
+
+
+class Bgp_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_bgp_global module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "as_number": {"type": "str"},
+ "affinity_group": {
+ "type": "dict",
+ "options": {"group_id": {"type": "int"}},
+ },
+ "bestpath": {
+ "type": "dict",
+ "options": {
+ "always_compare_med": {"type": "bool"},
+ "as_path": {
+ "type": "dict",
+ "options": {
+ "ignore": {"type": "bool"},
+ "multipath_relax": {"type": "bool"},
+ },
+ },
+ "compare_neighborid": {"type": "bool"},
+ "compare_routerid": {"type": "bool"},
+ "cost_community_ignore": {"type": "bool"},
+ "igp_metric_ignore": {"type": "bool"},
+ "med": {
+ "type": "dict",
+ "options": {
+ "confed": {"type": "bool"},
+ "missing_as_worst": {"type": "bool"},
+ "non_deterministic": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "cluster_id": {"type": "str"},
+ "confederation": {
+ "type": "dict",
+ "options": {
+ "identifier": {"type": "str"},
+ "peers": {"type": "list", "elements": "str"},
+ },
+ },
+ "disable_policy_batching": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "ipv4": {
+ "type": "dict",
+ "options": {"prefix_list": {"type": "str"}},
+ },
+ "ipv6": {
+ "type": "dict",
+ "options": {"prefix_list": {"type": "str"}},
+ },
+ "nexthop": {"type": "bool"},
+ },
+ },
+ "dynamic_med_interval": {"type": "int"},
+ "enforce_first_as": {"type": "bool"},
+ "enhanced_error": {"type": "bool"},
+ "fabric_soo": {"type": "str"},
+ "fast_external_fallover": {"type": "bool"},
+ "flush_routes": {"type": "bool"},
+ "graceful_restart": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "restart_time": {"type": "int"},
+ "stalepath_time": {"type": "int"},
+ "helper": {"type": "bool"},
+ },
+ },
+ "graceful_shutdown": {
+ "type": "dict",
+ "options": {
+ "activate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ "aware": {"type": "bool"},
+ },
+ },
+ "isolate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "include_local": {"type": "bool"},
+ },
+ },
+ "log_neighbor_changes": {"type": "bool"},
+ "maxas_limit": {"type": "int"},
+ "neighbors": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "neighbor_address": {"type": "str", "required": True},
+ "bfd": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "singlehop": {"type": "bool"},
+ "multihop": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "interval": {
+ "type": "dict",
+ "options": {
+ "tx_interval": {"type": "int"},
+ "min_rx_interval": {"type": "int"},
+ "multiplier": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "neighbor_affinity_group": {
+ "type": "dict",
+ "options": {"group_id": {"type": "int"}},
+ },
+ "bmp_activate_server": {"type": "int"},
+ "capability": {
+ "type": "dict",
+ "options": {"suppress_4_byte_as": {"type": "bool"}},
+ },
+ "description": {"type": "str"},
+ "disable_connected_check": {"type": "bool"},
+ "dont_capability_negotiate": {"type": "bool"},
+ "dscp": {"type": "str"},
+ "dynamic_capability": {"type": "bool"},
+ "ebgp_multihop": {"type": "int"},
+ "graceful_shutdown": {
+ "type": "dict",
+ "options": {
+ "activate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ },
+ },
+ "inherit": {
+ "type": "dict",
+ "options": {
+ "peer": {"type": "str"},
+ "peer_session": {"type": "str"},
+ },
+ },
+ "local_as": {"type": "str"},
+ "log_neighbor_changes": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "disable": {"type": "bool"},
+ },
+ },
+ "low_memory": {
+ "type": "dict",
+ "options": {"exempt": {"type": "bool"}},
+ },
+ "password": {
+ "type": "dict",
+ "no_log": False,
+ "options": {
+ "encryption": {"type": "int"},
+ "key": {"type": "str", "no_log": True},
+ },
+ },
+ "path_attribute": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "action": {
+ "type": "str",
+ "choices": [
+ "discard",
+ "treat-as-withdraw",
+ ],
+ },
+ "type": {"type": "int"},
+ "range": {
+ "type": "dict",
+ "options": {
+ "start": {"type": "int"},
+ "end": {"type": "int"},
+ },
+ },
+ },
+ },
+ "peer_type": {
+ "type": "str",
+ "choices": [
+ "fabric-border-leaf",
+ "fabric-external",
+ ],
+ },
+ "remote_as": {"type": "str"},
+ "remove_private_as": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "replace_as": {"type": "bool"},
+ "all": {"type": "bool"},
+ },
+ },
+ "shutdown": {"type": "bool"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "keepalive": {"type": "int"},
+ "holdtime": {"type": "int"},
+ },
+ },
+ "transport": {
+ "type": "dict",
+ "options": {
+ "connection_mode": {
+ "type": "dict",
+ "options": {"passive": {"type": "bool"}},
+ },
+ },
+ },
+ "ttl_security": {
+ "type": "dict",
+ "options": {"hops": {"type": "int"}},
+ },
+ "update_source": {"type": "str"},
+ },
+ },
+ "neighbor_down": {
+ "type": "dict",
+ "options": {"fib_accelerate": {"type": "bool"}},
+ },
+ "nexthop": {
+ "type": "dict",
+ "options": {"suppress_default_resolution": {"type": "bool"}},
+ },
+ "rd": {
+ "type": "dict",
+ "options": {
+ "dual": {"type": "bool"},
+ "id": {"type": "int"},
+ },
+ },
+ "reconnect_interval": {"type": "int"},
+ "router_id": {"type": "str"},
+ "shutdown": {"type": "bool"},
+ "suppress_fib_pending": {"type": "bool"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "bestpath_limit": {
+ "type": "dict",
+ "options": {
+ "timeout": {"type": "int"},
+ "always": {"type": "bool"},
+ },
+ },
+ "bgp": {
+ "type": "dict",
+ "options": {
+ "keepalive": {"type": "int"},
+ "holdtime": {"type": "int"},
+ },
+ },
+ "prefix_peer_timeout": {"type": "int"},
+ "prefix_peer_wait": {"type": "int"},
+ },
+ },
+ "vrfs": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "vrf": {"type": "str"},
+ "allocate_index": {"type": "int"},
+ "bestpath": {
+ "type": "dict",
+ "options": {
+ "always_compare_med": {"type": "bool"},
+ "as_path": {
+ "type": "dict",
+ "options": {
+ "ignore": {"type": "bool"},
+ "multipath_relax": {"type": "bool"},
+ },
+ },
+ "compare_neighborid": {"type": "bool"},
+ "compare_routerid": {"type": "bool"},
+ "cost_community_ignore": {"type": "bool"},
+ "igp_metric_ignore": {"type": "bool"},
+ "med": {
+ "type": "dict",
+ "options": {
+ "confed": {"type": "bool"},
+ "missing_as_worst": {"type": "bool"},
+ "non_deterministic": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "cluster_id": {"type": "str"},
+ "confederation": {
+ "type": "dict",
+ "options": {
+ "identifier": {"type": "str"},
+ "peers": {"type": "list", "elements": "str"},
+ },
+ },
+ "graceful_restart": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "restart_time": {"type": "int"},
+ "stalepath_time": {"type": "int"},
+ "helper": {"type": "bool"},
+ },
+ },
+ "local_as": {"type": "str"},
+ "log_neighbor_changes": {"type": "bool"},
+ "maxas_limit": {"type": "int"},
+ "neighbors": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "neighbor_address": {
+ "type": "str",
+ "required": True,
+ },
+ "bfd": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "singlehop": {"type": "bool"},
+ "multihop": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "interval": {
+ "type": "dict",
+ "options": {
+ "tx_interval": {"type": "int"},
+ "min_rx_interval": {"type": "int"},
+ "multiplier": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "neighbor_affinity_group": {
+ "type": "dict",
+ "options": {"group_id": {"type": "int"}},
+ },
+ "bmp_activate_server": {"type": "int"},
+ "capability": {
+ "type": "dict",
+ "options": {"suppress_4_byte_as": {"type": "bool"}},
+ },
+ "description": {"type": "str"},
+ "disable_connected_check": {"type": "bool"},
+ "dont_capability_negotiate": {"type": "bool"},
+ "dscp": {"type": "str"},
+ "dynamic_capability": {"type": "bool"},
+ "ebgp_multihop": {"type": "int"},
+ "graceful_shutdown": {
+ "type": "dict",
+ "options": {
+ "activate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ },
+ },
+ "inherit": {
+ "type": "dict",
+ "options": {
+ "peer": {"type": "str"},
+ "peer_session": {"type": "str"},
+ },
+ },
+ "local_as": {"type": "str"},
+ "log_neighbor_changes": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "disable": {"type": "bool"},
+ },
+ },
+ "low_memory": {
+ "type": "dict",
+ "options": {"exempt": {"type": "bool"}},
+ },
+ "password": {
+ "type": "dict",
+ "no_log": False,
+ "options": {
+ "encryption": {"type": "int"},
+ "key": {"type": "str", "no_log": True},
+ },
+ },
+ "path_attribute": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "action": {
+ "type": "str",
+ "choices": [
+ "discard",
+ "treat-as-withdraw",
+ ],
+ },
+ "type": {"type": "int"},
+ "range": {
+ "type": "dict",
+ "options": {
+ "start": {"type": "int"},
+ "end": {"type": "int"},
+ },
+ },
+ },
+ },
+ "peer_type": {
+ "type": "str",
+ "choices": [
+ "fabric-border-leaf",
+ "fabric-external",
+ ],
+ },
+ "remote_as": {"type": "str"},
+ "remove_private_as": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "replace_as": {"type": "bool"},
+ "all": {"type": "bool"},
+ },
+ },
+ "shutdown": {"type": "bool"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "keepalive": {"type": "int"},
+ "holdtime": {"type": "int"},
+ },
+ },
+ "transport": {
+ "type": "dict",
+ "options": {
+ "connection_mode": {
+ "type": "dict",
+ "options": {"passive": {"type": "bool"}},
+ },
+ },
+ },
+ "ttl_security": {
+ "type": "dict",
+ "options": {"hops": {"type": "int"}},
+ },
+ "update_source": {"type": "str"},
+ },
+ },
+ "neighbor_down": {
+ "type": "dict",
+ "options": {"fib_accelerate": {"type": "bool"}},
+ },
+ "reconnect_interval": {"type": "int"},
+ "router_id": {"type": "str"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "bestpath_limit": {
+ "type": "dict",
+ "options": {
+ "timeout": {"type": "int"},
+ "always": {"type": "bool"},
+ },
+ },
+ "bgp": {
+ "type": "dict",
+ "options": {
+ "keepalive": {"type": "int"},
+ "holdtime": {"type": "int"},
+ },
+ },
+ "prefix_peer_timeout": {"type": "int"},
+ "prefix_peer_wait": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "deleted",
+ "purged",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/bgp_neighbor_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/bgp_neighbor_address_family.py
new file mode 100644
index 00000000..4c97d294
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/bgp_neighbor_address_family/bgp_neighbor_address_family.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_bgp_neighbor_address_family module
+"""
+
+
+class Bgp_neighbor_address_familyArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_bgp_neighbor_address_family module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "as_number": {"type": "str"},
+ "neighbors": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "neighbor_address": {"type": "str", "required": True},
+ "address_family": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "type": "str",
+ "choices": [
+ "ipv4",
+ "ipv6",
+ "link-state",
+ "vpnv4",
+ "vpnv6",
+ "l2vpn",
+ ],
+ "required": True,
+ },
+ "safi": {
+ "type": "str",
+ "choices": [
+ "unicast",
+ "multicast",
+ "mvpn",
+ "evpn",
+ ],
+ },
+ "advertise_map": {
+ "type": "dict",
+ "options": {
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ "exist_map": {"type": "str"},
+ "non_exist_map": {"type": "str"},
+ },
+ },
+ "advertisement_interval": {"type": "int"},
+ "allowas_in": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_occurences": {"type": "int"},
+ },
+ },
+ "as_override": {"type": "bool"},
+ "capability": {
+ "type": "dict",
+ "options": {
+ "additional_paths": {
+ "type": "dict",
+ "options": {
+ "receive": {
+ "type": "str",
+ "choices": [
+ "enable",
+ "disable",
+ ],
+ },
+ "send": {
+ "type": "str",
+ "choices": [
+ "enable",
+ "disable",
+ ],
+ },
+ },
+ },
+ },
+ },
+ "default_originate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ "disable_peer_as_check": {"type": "bool"},
+ "filter_list": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "inherit": {
+ "type": "dict",
+ "options": {
+ "template": {"type": "str"},
+ "sequence": {"type": "int"},
+ },
+ },
+ "maximum_prefix": {
+ "type": "dict",
+ "options": {
+ "max_prefix_limit": {"type": "int"},
+ "generate_warning_threshold": {"type": "int"},
+ "restart_interval": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ },
+ "next_hop_self": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "all_routes": {"type": "bool"},
+ },
+ },
+ "next_hop_third_party": {"type": "bool"},
+ "prefix_list": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "rewrite_evpn_rt_asn": {"type": "bool"},
+ "route_map": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "route_reflector_client": {"type": "bool"},
+ "send_community": {
+ "type": "dict",
+ "mutually_exclusive": [
+ ["both", "set"],
+ ["extended", "both"],
+ ["standard", "both"],
+ ["standard", "set"],
+ ],
+ "options": {
+ "set": {"type": "bool"},
+ "extended": {"type": "bool"},
+ "standard": {"type": "bool"},
+ "both": {"type": "bool"},
+ },
+ },
+ "soft_reconfiguration_inbound": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "always": {"type": "bool"},
+ },
+ },
+ "soo": {"type": "str"},
+ "suppress_inactive": {"type": "bool"},
+ "unsuppress_map": {"type": "str"},
+ "weight": {"type": "int"},
+ },
+ },
+ },
+ },
+ "vrfs": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "vrf": {"type": "str"},
+ "neighbors": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "neighbor_address": {
+ "type": "str",
+ "required": True,
+ },
+ "address_family": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "type": "str",
+ "choices": [
+ "ipv4",
+ "ipv6",
+ "link-state",
+ "vpnv4",
+ "vpnv6",
+ "l2vpn",
+ ],
+ "required": True,
+ },
+ "safi": {
+ "type": "str",
+ "choices": [
+ "unicast",
+ "multicast",
+ "mvpn",
+ "evpn",
+ ],
+ },
+ "advertise_map": {
+ "type": "dict",
+ "options": {
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ "exist_map": {"type": "str"},
+ "non_exist_map": {"type": "str"},
+ },
+ },
+ "advertisement_interval": {"type": "int"},
+ "allowas_in": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_occurences": {"type": "int"},
+ },
+ },
+ "as_override": {"type": "bool"},
+ "capability": {
+ "type": "dict",
+ "options": {
+ "additional_paths": {
+ "type": "dict",
+ "options": {
+ "receive": {
+ "type": "str",
+ "choices": [
+ "enable",
+ "disable",
+ ],
+ },
+ "send": {
+ "type": "str",
+ "choices": [
+ "enable",
+ "disable",
+ ],
+ },
+ },
+ },
+ },
+ },
+ "default_originate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ "disable_peer_as_check": {"type": "bool"},
+ "filter_list": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "inherit": {
+ "type": "dict",
+ "options": {
+ "template": {"type": "str"},
+ "sequence": {"type": "int"},
+ },
+ },
+ "maximum_prefix": {
+ "type": "dict",
+ "options": {
+ "max_prefix_limit": {"type": "int"},
+ "generate_warning_threshold": {"type": "int"},
+ "restart_interval": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ },
+ "next_hop_self": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "all_routes": {"type": "bool"},
+ },
+ },
+ "next_hop_third_party": {"type": "bool"},
+ "prefix_list": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "rewrite_evpn_rt_asn": {"type": "bool"},
+ "route_map": {
+ "type": "dict",
+ "options": {
+ "inbound": {"type": "str"},
+ "outbound": {"type": "str"},
+ },
+ },
+ "route_reflector_client": {"type": "bool"},
+ "send_community": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "extended": {"type": "bool"},
+ "standard": {"type": "bool"},
+ "both": {"type": "bool"},
+ },
+ },
+ "soft_reconfiguration_inbound": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "always": {"type": "bool"},
+ },
+ },
+ "soo": {"type": "str"},
+ "suppress_inactive": {"type": "bool"},
+ "unsuppress_map": {"type": "str"},
+ "weight": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/facts.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/facts.py
new file mode 100644
index 00000000..ae12a8d6
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/facts/facts.py
@@ -0,0 +1,25 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+"""
+The arg spec for the nxos facts module.
+"""
+
+
+class FactsArgs(object):
+ """The arg spec for the nxos facts module"""
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "gather_subset": dict(default=["min"], type="list", elements="str"),
+ "gather_network_resources": dict(type="list", elements="str"),
+ "available_network_resources": {"type": "bool", "default": False},
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/hostname.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/hostname.py
new file mode 100644
index 00000000..3a712071
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hostname/hostname.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_hostname module
+"""
+
+
+class HostnameArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_hostname module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {"type": "dict", "options": {"hostname": {"type": "str"}}},
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/hsrp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/hsrp_interfaces.py
new file mode 100644
index 00000000..ac302636
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/hsrp_interfaces/hsrp_interfaces.py
@@ -0,0 +1,60 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The arg spec for the nxos_hsrp_interfaces module
+"""
+
+
+class Hsrp_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_hsrp_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "name": {"type": "str"},
+ "bfd": {"choices": ["enable", "disable"], "type": "str"},
+ },
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "rendered",
+ "gathered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/interfaces.py
new file mode 100644
index 00000000..ba8d7923
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/interfaces/interfaces.py
@@ -0,0 +1,67 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class InterfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "description": {"type": "str"},
+ "duplex": {"choices": ["full", "half", "auto"], "type": "str"},
+ "enabled": {"type": "bool"},
+ "fabric_forwarding_anycast_gateway": {"type": "bool"},
+ "ip_forward": {"type": "bool"},
+ "mode": {"choices": ["layer2", "layer3"], "type": "str"},
+ "mtu": {"type": "str"},
+ "name": {"required": True, "type": "str"},
+ "speed": {"type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ "purged",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/l2_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/l2_interfaces.py
new file mode 100644
index 00000000..046511c7
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l2_interfaces/l2_interfaces.py
@@ -0,0 +1,73 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_l2_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class L2_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_l2_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "access": {
+ "options": {"vlan": {"type": "int"}},
+ "type": "dict",
+ },
+ "mode": {
+ "type": "str",
+ "choices": ["access", "dot1q-tunnel", "trunk", "fex-fabric", "fabricpath"],
+ },
+ "name": {"required": True, "type": "str"},
+ "trunk": {
+ "options": {
+ "allowed_vlans": {"type": "str"},
+ "native_vlan": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "rendered",
+ "parsed",
+ "gathered",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/l3_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/l3_interfaces.py
new file mode 100644
index 00000000..7a5163e0
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/l3_interfaces/l3_interfaces.py
@@ -0,0 +1,83 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_l3_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class L3_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_l3_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "dot1q": {"type": "int"},
+ "ipv4": {
+ "elements": "dict",
+ "options": {
+ "address": {"type": "str"},
+ "secondary": {"type": "bool"},
+ "tag": {"type": "int"},
+ },
+ "type": "list",
+ },
+ "ipv6": {
+ "elements": "dict",
+ "options": {
+ "address": {"type": "str"},
+ "tag": {"type": "int"},
+ },
+ "type": "list",
+ },
+ "name": {"required": True, "type": "str"},
+ "redirects": {"type": "bool"},
+ "ipv6_redirects": {"type": "bool"},
+ "unreachables": {"type": "bool"},
+ "evpn_multisite_tracking": {
+ "type": "str",
+ "choices": ["fabric-tracking", "dci-tracking"],
+ },
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/lacp.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/lacp.py
new file mode 100644
index 00000000..3c2ac06a
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp/lacp.py
@@ -0,0 +1,71 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_lacp module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class LacpArgs(object):
+ """The arg spec for the nxos_lacp module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "options": {
+ "system": {
+ "options": {
+ "mac": {
+ "options": {
+ "address": {"type": "str"},
+ "role": {
+ "choices": ["primary", "secondary"],
+ "type": "str",
+ },
+ },
+ "type": "dict",
+ },
+ "priority": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/lacp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/lacp_interfaces.py
new file mode 100644
index 00000000..119432bf
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lacp_interfaces/lacp_interfaces.py
@@ -0,0 +1,76 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_lacp_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Lacp_interfacesArgs(object):
+ """The arg spec for the nxos_lacp_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "convergence": {
+ "options": {
+ "graceful": {"type": "bool"},
+ "vpc": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "links": {
+ "options": {
+ "max": {"type": "int"},
+ "min": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "mode": {"choices": ["delay"], "type": "str"},
+ "name": {"required": True, "type": "str"},
+ "port_priority": {"type": "int"},
+ "rate": {"choices": ["fast", "normal"], "type": "str"},
+ "suspend_individual": {"type": "bool"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "rendered",
+ "gathered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/lag_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/lag_interfaces.py
new file mode 100644
index 00000000..12330b84
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lag_interfaces/lag_interfaces.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the nxos_lag_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Lag_interfacesArgs(object):
+ """The arg spec for the nxos_lag_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "members": {
+ "elements": "dict",
+ "options": {
+ "member": {"type": "str"},
+ "mode": {
+ "type": "str",
+ "choices": ["active", "on", "passive"],
+ },
+ "force": {"type": "bool"},
+ },
+ "type": "list",
+ },
+ "name": {"required": True, "type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "rendered",
+ "parsed",
+ "gathered",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/lldp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/lldp_global.py
new file mode 100644
index 00000000..8fad5eba
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_global/lldp_global.py
@@ -0,0 +1,88 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_lldp_global module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Lldp_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_lldp_global module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "options": {
+ "holdtime": {"type": "int"},
+ "port_id": {"choices": [0, 1], "type": "int"},
+ "reinit": {"type": "int"},
+ "timer": {"type": "int"},
+ "tlv_select": {
+ "options": {
+ "dcbxp": {"type": "bool"},
+ "management_address": {
+ "options": {
+ "v4": {"type": "bool"},
+ "v6": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "port": {
+ "options": {
+ "description": {"type": "bool"},
+ "vlan": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "power_management": {"type": "bool"},
+ "system": {
+ "options": {
+ "capabilities": {"type": "bool"},
+ "description": {"type": "bool"},
+ "name": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "deleted",
+ "gathered",
+ "parsed",
+ "rendered",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/lldp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..0552901e
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,67 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_lldp_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Lldp_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_lldp_interfaces module"""
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "name": {"required": True, "type": "str"},
+ "receive": {"type": "bool"},
+ "tlv_set": {
+ "options": {
+ "management_address": {"type": "str"},
+ "vlan": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "transmit": {"type": "bool"},
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "deleted",
+ "gathered",
+ "merged",
+ "overridden",
+ "rendered",
+ "replaced",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/logging_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/logging_global.py
new file mode 100644
index 00000000..58a9052f
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/logging_global/logging_global.py
@@ -0,0 +1,276 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_logging_global module
+"""
+
+
+class Logging_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_logging_global module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "console": {
+ "type": "dict",
+ "options": {
+ "state": {
+ "type": "str",
+ "choices": ["enabled", "disabled"],
+ },
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ },
+ },
+ "event": {
+ "type": "dict",
+ "options": {
+ "link_status": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "default": {"type": "bool"},
+ },
+ },
+ "trunk_status": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "default": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "history": {
+ "type": "dict",
+ "options": {
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ "size": {"type": "int"},
+ },
+ },
+ "ip": {
+ "type": "dict",
+ "options": {
+ "access_list": {
+ "type": "dict",
+ "options": {
+ "cache": {
+ "type": "dict",
+ "options": {
+ "entries": {"type": "int"},
+ "interval": {"type": "int"},
+ "threshold": {"type": "int"},
+ },
+ },
+ "detailed": {"type": "bool"},
+ "include": {
+ "type": "dict",
+ "options": {"sgt": {"type": "bool"}},
+ },
+ },
+ },
+ },
+ },
+ "facilities": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "facility": {"type": "str"},
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ },
+ },
+ "logfile": {
+ "type": "dict",
+ "options": {
+ "state": {
+ "type": "str",
+ "choices": ["enabled", "disabled"],
+ },
+ "name": {"type": "str"},
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ "persistent_threshold": {"type": "int"},
+ "size": {"type": "int"},
+ },
+ },
+ "module": {
+ "type": "dict",
+ "options": {
+ "state": {
+ "type": "str",
+ "choices": ["enabled", "disabled"],
+ },
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ },
+ },
+ "monitor": {
+ "type": "dict",
+ "options": {
+ "state": {
+ "type": "str",
+ "choices": ["enabled", "disabled"],
+ },
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ },
+ },
+ "origin_id": {
+ "type": "dict",
+ "options": {
+ "hostname": {"type": "bool"},
+ "ip": {"type": "str"},
+ "string": {"type": "str"},
+ },
+ },
+ "rate_limit": {
+ "type": "str",
+ "choices": ["enabled", "disabled"],
+ },
+ "rfc_strict": {"type": "bool"},
+ "hosts": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "host": {"type": "str"},
+ "severity": {
+ "type": "str",
+ "choices": [
+ "emergency",
+ "alert",
+ "critical",
+ "error",
+ "warning",
+ "notification",
+ "informational",
+ "debugging",
+ ],
+ },
+ "facility": {"type": "str"},
+ "port": {"type": "int"},
+ "secure": {
+ "type": "dict",
+ "options": {
+ "trustpoint": {
+ "type": "dict",
+ "options": {"client_identity": {"type": "str"}},
+ },
+ },
+ },
+ "use_vrf": {"type": "str"},
+ },
+ },
+ "source_interface": {"type": "str"},
+ "timestamp": {
+ "type": "str",
+ "choices": ["microseconds", "milliseconds", "seconds"],
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/ntp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/ntp_global.py
new file mode 100644
index 00000000..a680e58d
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ntp_global/ntp_global.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_ntp_global module
+"""
+
+
+class Ntp_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_ntp_global module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "access_group": {
+ "type": "dict",
+ "options": {
+ "match_all": {"type": "bool"},
+ "peer": {
+ "type": "list",
+ "elements": "dict",
+ "options": {"access_list": {"type": "str"}},
+ },
+ "query_only": {
+ "type": "list",
+ "elements": "dict",
+ "options": {"access_list": {"type": "str"}},
+ },
+ "serve": {
+ "type": "list",
+ "elements": "dict",
+ "options": {"access_list": {"type": "str"}},
+ },
+ "serve_only": {
+ "type": "list",
+ "elements": "dict",
+ "options": {"access_list": {"type": "str"}},
+ },
+ },
+ },
+ "allow": {
+ "type": "dict",
+ "options": {
+ "control": {
+ "type": "dict",
+ "options": {"rate_limit": {"type": "int"}},
+ },
+ "private": {"type": "bool"},
+ },
+ },
+ "authenticate": {"type": "bool"},
+ "authentication_keys": {
+ "type": "list",
+ "elements": "dict",
+ "no_log": False,
+ "options": {
+ "id": {"type": "int"},
+ "key": {"type": "str", "no_log": True},
+ "encryption": {"type": "int"},
+ },
+ },
+ "logging": {"type": "bool"},
+ "master": {
+ "type": "dict",
+ "options": {"stratum": {"type": "int"}},
+ },
+ "passive": {"type": "bool"},
+ "peers": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "peer": {"type": "str"},
+ "key_id": {"type": "int"},
+ "maxpoll": {"type": "int"},
+ "minpoll": {"type": "int"},
+ "prefer": {"type": "bool"},
+ "vrf": {"type": "str", "aliases": ["use_vrf"]},
+ },
+ },
+ "servers": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "server": {"type": "str"},
+ "key_id": {"type": "int"},
+ "maxpoll": {"type": "int"},
+ "minpoll": {"type": "int"},
+ "prefer": {"type": "bool"},
+ "vrf": {"type": "str", "aliases": ["use_vrf"]},
+ },
+ },
+ "source": {"type": "str"},
+ "source_interface": {"type": "str"},
+ "trusted_keys": {
+ "type": "list",
+ "elements": "dict",
+ "no_log": False,
+ "options": {"key_id": {"type": "int"}},
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/ospf_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/ospf_interfaces.py
new file mode 100644
index 00000000..30d65035
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospf_interfaces/ospf_interfaces.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_ospf_interfaces module
+"""
+
+
+class Ospf_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_ospf_interfaces module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "name": {"type": "str", "required": True},
+ "address_family": {
+ "type": "list",
+ "elements": "dict",
+ "mutually_exclusive": [["passive_interface", "default_passive_interface"]],
+ "options": {
+ "afi": {
+ "type": "str",
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ },
+ "processes": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "process_id": {
+ "type": "str",
+ "required": True,
+ },
+ "area": {
+ "type": "dict",
+ "options": {
+ "area_id": {
+ "type": "str",
+ "required": True,
+ },
+ "secondaries": {"type": "bool"},
+ },
+ },
+ "multi_areas": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "multi_areas": {"type": "list", "elements": "str"},
+ "authentication": {
+ "type": "dict",
+ "options": {
+ "key_chain": {"type": "str", "no_log": False},
+ "message_digest": {"type": "bool"},
+ "enable": {"type": "bool"},
+ "null_auth": {"type": "bool"},
+ },
+ },
+ "authentication_key": {
+ "type": "dict",
+ "no_log": False,
+ "options": {
+ "encryption": {"type": "int"},
+ "key": {
+ "type": "str",
+ "required": True,
+ "no_log": True,
+ },
+ },
+ },
+ "message_digest_key": {
+ "type": "dict",
+ "no_log": False,
+ "options": {
+ "key_id": {"type": "int", "required": True},
+ "encryption": {"type": "int"},
+ "key": {
+ "type": "str",
+ "required": True,
+ "no_log": True,
+ },
+ },
+ },
+ "cost": {"type": "int"},
+ "dead_interval": {"type": "int"},
+ "hello_interval": {"type": "int"},
+ "instance": {"type": "int"},
+ "mtu_ignore": {"type": "bool"},
+ "network": {
+ "type": "str",
+ "choices": ["broadcast", "point-to-point"],
+ },
+ "default_passive_interface": {"type": "bool"},
+ "passive_interface": {"type": "bool"},
+ "priority": {"type": "int"},
+ "retransmit_interval": {"type": "int"},
+ "shutdown": {"type": "bool"},
+ "transmit_delay": {"type": "int"},
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "parsed",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/ospfv2.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/ospfv2.py
new file mode 100644
index 00000000..2e5e62f1
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv2/ospfv2.py
@@ -0,0 +1,622 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The arg spec for the nxos_ospfv2 module
+"""
+
+
+class Ospfv2Args(object): # pylint: disable=R0903
+ """The arg spec for the nxos_ospfv2 module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "options": {
+ "processes": {
+ "elements": "dict",
+ "options": {
+ "areas": {
+ "required_one_of": [
+ [
+ "authentication",
+ "default_cost",
+ "filter_list",
+ "nssa",
+ "ranges",
+ "stub",
+ ],
+ ],
+ "elements": "dict",
+ "options": {
+ "area_id": {"type": "str", "required": True},
+ "authentication": {
+ "options": {
+ "set": {"type": "bool"},
+ "message_digest": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "default_cost": {"type": "int"},
+ "filter_list": {
+ "options": {
+ "direction": {
+ "choices": ["in", "out"],
+ "type": "str",
+ "required": True,
+ },
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "list",
+ "elements": "dict",
+ },
+ "nssa": {
+ "options": {
+ "default_information_originate": {"type": "bool"},
+ "no_redistribution": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ "set": {"type": "bool"},
+ "translate": {
+ "options": {
+ "type7": {
+ "mutually_exclusive": [["always", "never"]],
+ "options": {
+ "always": {"type": "bool"},
+ "never": {"type": "bool"},
+ "supress_fa": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "ranges": {
+ "elements": "dict",
+ "options": {
+ "cost": {"type": "int"},
+ "not_advertise": {"type": "bool"},
+ "prefix": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "list",
+ },
+ "stub": {
+ "options": {
+ "no_summary": {"type": "bool"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ },
+ "auto_cost": {
+ "options": {
+ "reference_bandwidth": {
+ "type": "int",
+ "required": True,
+ },
+ "unit": {
+ "choices": ["Gbps", "Mbps"],
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "dict",
+ },
+ "bfd": {"type": "bool"},
+ "default_information": {
+ "options": {
+ "originate": {
+ "options": {
+ "always": {"type": "bool"},
+ "route_map": {"type": "str"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "default_metric": {"type": "int"},
+ "distance": {"type": "int"},
+ "flush_routes": {"type": "bool"},
+ "graceful_restart": {
+ "options": {
+ "grace_period": {"type": "int"},
+ "helper_disable": {"type": "bool"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "isolate": {"type": "bool"},
+ "log_adjacency_changes": {
+ "options": {
+ "detail": {"type": "bool"},
+ "log": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "max_lsa": {
+ "options": {
+ "ignore_count": {"type": "int"},
+ "ignore_time": {"type": "int"},
+ "max_non_self_generated_lsa": {
+ "type": "int",
+ "required": True,
+ },
+ "reset_time": {"type": "int"},
+ "threshold": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "max_metric": {
+ "options": {
+ "router_lsa": {
+ "options": {
+ "external_lsa": {
+ "options": {
+ "max_metric_value": {"type": "int"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "include_stub": {"type": "bool"},
+ "on_startup": {
+ "options": {
+ "set": {"type": "bool"},
+ "wait_for_bgp_asn": {"type": "int"},
+ "wait_period": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "set": {"type": "bool"},
+ "summary_lsa": {
+ "options": {
+ "max_metric_value": {"type": "int"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "maximum_paths": {"type": "int"},
+ "mpls": {
+ "options": {
+ "traffic_eng": {
+ "options": {
+ "areas": {
+ "type": "list",
+ "elements": "dict",
+ "options": {"area_id": {"type": "str"}},
+ },
+ "multicast_intact": {"type": "bool"},
+ "router_id": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "name_lookup": {"type": "bool"},
+ "passive_interface": {
+ "options": {"default": {"type": "bool"}},
+ "type": "dict",
+ },
+ "process_id": {"required": True, "type": "str"},
+ "redistribute": {
+ "elements": "dict",
+ "options": {
+ "id": {"type": "str"},
+ "protocol": {
+ "choices": [
+ "bgp",
+ "direct",
+ "eigrp",
+ "isis",
+ "lisp",
+ "ospf",
+ "rip",
+ "static",
+ ],
+ "required": True,
+ "type": "str",
+ },
+ "route_map": {"type": "str", "required": True},
+ },
+ "type": "list",
+ },
+ "rfc1583compatibility": {"type": "bool"},
+ "router_id": {"type": "str"},
+ "shutdown": {"type": "bool"},
+ "summary_address": {
+ "elements": "dict",
+ "mutually_exclusive": [["not_advertise", "tag"]],
+ "options": {
+ "not_advertise": {"type": "bool"},
+ "prefix": {"type": "str", "required": True},
+ "tag": {"type": "int"},
+ },
+ "type": "list",
+ },
+ "table_map": {
+ "options": {
+ "filter": {"type": "bool"},
+ "name": {"type": "str", "required": True},
+ },
+ "type": "dict",
+ },
+ "timers": {
+ "options": {
+ "lsa_arrival": {"type": "int"},
+ "lsa_group_pacing": {"type": "int"},
+ "throttle": {
+ "options": {
+ "lsa": {
+ "options": {
+ "hold_interval": {"type": "int"},
+ "max_interval": {"type": "int"},
+ "start_interval": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "spf": {
+ "options": {
+ "initial_spf_delay": {"type": "int"},
+ "max_wait_time": {"type": "int"},
+ "min_hold_time": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "vrfs": {
+ "elements": "dict",
+ "options": {
+ "areas": {
+ "required_one_of": [
+ [
+ "authentication",
+ "default_cost",
+ "filter_list",
+ "nssa",
+ "ranges",
+ "stub",
+ ],
+ ],
+ "elements": "dict",
+ "options": {
+ "area_id": {
+ "type": "str",
+ "required": True,
+ },
+ "authentication": {
+ "options": {
+ "set": {"type": "bool"},
+ "message_digest": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "default_cost": {"type": "int"},
+ "filter_list": {
+ "options": {
+ "direction": {
+ "choices": ["in", "out"],
+ "type": "str",
+ "required": True,
+ },
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "list",
+ "elements": "dict",
+ },
+ "nssa": {
+ "options": {
+ "default_information_originate": {"type": "bool"},
+ "no_redistribution": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ "set": {"type": "bool"},
+ "translate": {
+ "options": {
+ "type7": {
+ "mutually_exclusive": [
+ [
+ "always",
+ "never",
+ ],
+ ],
+ "options": {
+ "always": {"type": "bool"},
+ "never": {"type": "bool"},
+ "supress_fa": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "ranges": {
+ "elements": "dict",
+ "options": {
+ "cost": {"type": "int"},
+ "not_advertise": {"type": "bool"},
+ "prefix": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "list",
+ },
+ "stub": {
+ "options": {
+ "no_summary": {"type": "bool"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ },
+ "auto_cost": {
+ "options": {
+ "reference_bandwidth": {
+ "type": "int",
+ "required": True,
+ },
+ "unit": {
+ "choices": ["Gbps", "Mbps"],
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "dict",
+ },
+ "bfd": {"type": "bool"},
+ "capability": {
+ "type": "dict",
+ "options": {
+ "vrf_lite": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "evpn": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "default_information": {
+ "options": {
+ "originate": {
+ "options": {
+ "always": {"type": "bool"},
+ "route_map": {"type": "str"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "default_metric": {"type": "int"},
+ "distance": {"type": "int"},
+ "down_bit_ignore": {"type": "bool"},
+ "graceful_restart": {
+ "options": {
+ "grace_period": {"type": "int"},
+ "helper_disable": {"type": "bool"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "log_adjacency_changes": {
+ "options": {
+ "detail": {"type": "bool"},
+ "log": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "max_lsa": {
+ "options": {
+ "ignore_count": {"type": "int"},
+ "ignore_time": {"type": "int"},
+ "max_non_self_generated_lsa": {
+ "type": "int",
+ "required": True,
+ },
+ "reset_time": {"type": "int"},
+ "threshold": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "max_metric": {
+ "options": {
+ "router_lsa": {
+ "options": {
+ "external_lsa": {
+ "options": {
+ "max_metric_value": {"type": "int"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "include_stub": {"type": "bool"},
+ "on_startup": {
+ "options": {
+ "set": {"type": "bool"},
+ "wait_for_bgp_asn": {"type": "int"},
+ "wait_period": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "set": {"type": "bool"},
+ "summary_lsa": {
+ "options": {
+ "max_metric_value": {"type": "int"},
+ "set": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "maximum_paths": {"type": "int"},
+ "name_lookup": {"type": "bool"},
+ "passive_interface": {
+ "options": {"default": {"type": "bool"}},
+ "type": "dict",
+ },
+ "redistribute": {
+ "elements": "dict",
+ "options": {
+ "id": {"type": "str"},
+ "protocol": {
+ "choices": [
+ "bgp",
+ "direct",
+ "eigrp",
+ "isis",
+ "lisp",
+ "ospf",
+ "rip",
+ "static",
+ ],
+ "required": True,
+ "type": "str",
+ },
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "list",
+ },
+ "rfc1583compatibility": {"type": "bool"},
+ "router_id": {"type": "str"},
+ "shutdown": {"type": "bool"},
+ "summary_address": {
+ "elements": "dict",
+ "options": {
+ "not_advertise": {"type": "bool"},
+ "prefix": {
+ "type": "str",
+ "required": True,
+ },
+ "tag": {"type": "int"},
+ },
+ "type": "list",
+ },
+ "table_map": {
+ "options": {
+ "filter": {"type": "bool"},
+ "name": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ "type": "dict",
+ },
+ "timers": {
+ "options": {
+ "lsa_arrival": {"type": "int"},
+ "lsa_group_pacing": {"type": "int"},
+ "throttle": {
+ "options": {
+ "lsa": {
+ "options": {
+ "hold_interval": {"type": "int"},
+ "max_interval": {"type": "int"},
+ "start_interval": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "spf": {
+ "options": {
+ "initial_spf_delay": {"type": "int"},
+ "max_wait_time": {"type": "int"},
+ "min_hold_time": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "vrf": {"required": True, "type": "str"},
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ },
+ "type": "dict",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/ospfv3.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/ospfv3.py
new file mode 100644
index 00000000..601bb618
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/ospfv3/ospfv3.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the nxos_ospfv3 module
+"""
+
+
+class Ospfv3Args(object): # pylint: disable=R0903
+ """The arg spec for the nxos_ospfv3 module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "processes": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "address_family": {
+ "type": "dict",
+ "options": {
+ "afi": {"type": "str", "choices": ["ipv6"]},
+ "safi": {
+ "type": "str",
+ "choices": ["unicast"],
+ },
+ "areas": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "area_id": {
+ "type": "str",
+ "required": True,
+ },
+ "default_cost": {"type": "int"},
+ "filter_list": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ "direction": {
+ "type": "str",
+ "choices": ["in", "out"],
+ "required": True,
+ },
+ },
+ },
+ "ranges": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "prefix": {
+ "type": "str",
+ "required": True,
+ },
+ "cost": {"type": "int"},
+ "not_advertise": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "default_information": {
+ "type": "dict",
+ "options": {
+ "originate": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "always": {"type": "bool"},
+ "route_map": {"type": "str"},
+ },
+ },
+ },
+ },
+ "distance": {"type": "int"},
+ "maximum_paths": {"type": "int"},
+ "redistribute": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "protocol": {
+ "type": "str",
+ "choices": [
+ "bgp",
+ "direct",
+ "eigrp",
+ "isis",
+ "lisp",
+ "ospfv3",
+ "rip",
+ "static",
+ ],
+ "required": True,
+ },
+ "id": {"type": "str"},
+ "route_map": {
+ "type": "str",
+ "required": True,
+ },
+ },
+ },
+ "summary_address": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "prefix": {
+ "type": "str",
+ "required": True,
+ },
+ "not_advertise": {"type": "bool"},
+ "tag": {"type": "int"},
+ },
+ },
+ "table_map": {
+ "type": "dict",
+ "options": {
+ "name": {
+ "type": "str",
+ "required": True,
+ },
+ "filter": {"type": "bool"},
+ },
+ },
+ "timers": {
+ "type": "dict",
+ "options": {
+ "throttle": {
+ "type": "dict",
+ "options": {
+ "spf": {
+ "type": "dict",
+ "options": {
+ "initial_spf_delay": {"type": "int"},
+ "min_hold_time": {"type": "int"},
+ "max_wait_time": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "areas": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "area_id": {"type": "str", "required": True},
+ "nssa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "default_information_originate": {"type": "bool"},
+ "no_redistribution": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ "route_map": {"type": "str"},
+ "translate": {
+ "type": "dict",
+ "options": {
+ "type7": {
+ "type": "dict",
+ "options": {
+ "always": {"type": "bool"},
+ "never": {"type": "bool"},
+ "supress_fa": {"type": "bool"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "stub": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "auto_cost": {
+ "type": "dict",
+ "options": {
+ "reference_bandwidth": {
+ "type": "int",
+ "required": True,
+ },
+ "unit": {
+ "type": "str",
+ "required": True,
+ "choices": ["Gbps", "Mbps"],
+ },
+ },
+ },
+ "flush_routes": {"type": "bool"},
+ "graceful_restart": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "grace_period": {"type": "int"},
+ "helper_disable": {"type": "bool"},
+ "planned_only": {"type": "bool"},
+ },
+ },
+ "isolate": {"type": "bool"},
+ "log_adjacency_changes": {
+ "type": "dict",
+ "options": {
+ "log": {"type": "bool"},
+ "detail": {"type": "bool"},
+ },
+ },
+ "max_lsa": {
+ "type": "dict",
+ "options": {
+ "max_non_self_generated_lsa": {
+ "type": "int",
+ "required": True,
+ },
+ "threshold": {"type": "int"},
+ "ignore_count": {"type": "int"},
+ "ignore_time": {"type": "int"},
+ "reset_time": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ },
+ "max_metric": {
+ "type": "dict",
+ "options": {
+ "router_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "external_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_metric_value": {"type": "int"},
+ },
+ },
+ "stub_prefix_lsa": {"type": "bool"},
+ "on_startup": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "wait_period": {"type": "int"},
+ "wait_for_bgp_asn": {"type": "int"},
+ },
+ },
+ "inter_area_prefix_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_metric_value": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "name_lookup": {"type": "bool"},
+ "passive_interface": {
+ "type": "dict",
+ "options": {"default": {"type": "bool"}},
+ },
+ "process_id": {"type": "str", "required": True},
+ "router_id": {"type": "str"},
+ "shutdown": {"type": "bool"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "lsa_arrival": {"type": "int"},
+ "lsa_group_pacing": {"type": "int"},
+ "throttle": {
+ "type": "dict",
+ "options": {
+ "lsa": {
+ "type": "dict",
+ "options": {
+ "start_interval": {"type": "int"},
+ "hold_interval": {"type": "int"},
+ "max_interval": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "vrfs": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "areas": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "area_id": {
+ "type": "str",
+ "required": True,
+ },
+ "nssa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "default_information_originate": {"type": "bool"},
+ "no_redistribution": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ "route_map": {"type": "str"},
+ "translate": {
+ "type": "dict",
+ "options": {
+ "type7": {
+ "type": "dict",
+ "options": {
+ "always": {"type": "bool"},
+ "never": {"type": "bool"},
+ "supress_fa": {"type": "bool"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "stub": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "no_summary": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "auto_cost": {
+ "type": "dict",
+ "options": {
+ "reference_bandwidth": {
+ "type": "int",
+ "required": True,
+ },
+ "unit": {
+ "type": "str",
+ "required": True,
+ "choices": ["Gbps", "Mbps"],
+ },
+ },
+ },
+ "graceful_restart": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "grace_period": {"type": "int"},
+ "helper_disable": {"type": "bool"},
+ "planned_only": {"type": "bool"},
+ },
+ },
+ "log_adjacency_changes": {
+ "type": "dict",
+ "options": {
+ "log": {"type": "bool"},
+ "detail": {"type": "bool"},
+ },
+ },
+ "max_lsa": {
+ "type": "dict",
+ "options": {
+ "max_non_self_generated_lsa": {
+ "type": "int",
+ "required": True,
+ },
+ "threshold": {"type": "int"},
+ "ignore_count": {"type": "int"},
+ "ignore_time": {"type": "int"},
+ "reset_time": {"type": "int"},
+ "warning_only": {"type": "bool"},
+ },
+ },
+ "max_metric": {
+ "type": "dict",
+ "options": {
+ "router_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "external_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_metric_value": {"type": "int"},
+ },
+ },
+ "stub_prefix_lsa": {"type": "bool"},
+ "on_startup": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "wait_period": {"type": "int"},
+ "wait_for_bgp_asn": {"type": "int"},
+ },
+ },
+ "inter_area_prefix_lsa": {
+ "type": "dict",
+ "options": {
+ "set": {"type": "bool"},
+ "max_metric_value": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "name_lookup": {"type": "bool"},
+ "passive_interface": {
+ "type": "dict",
+ "options": {"default": {"type": "bool"}},
+ },
+ "router_id": {"type": "str"},
+ "shutdown": {"type": "bool"},
+ "timers": {
+ "type": "dict",
+ "options": {
+ "lsa_arrival": {"type": "int"},
+ "lsa_group_pacing": {"type": "int"},
+ "throttle": {
+ "type": "dict",
+ "options": {
+ "lsa": {
+ "type": "dict",
+ "options": {
+ "start_interval": {"type": "int"},
+ "hold_interval": {"type": "int"},
+ "max_interval": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "vrf": {"type": "str", "required": True},
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "parsed",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/prefix_lists.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/prefix_lists.py
new file mode 100644
index 00000000..c29e0375
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/prefix_lists/prefix_lists.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_prefix_lists module
+"""
+
+
+class Prefix_listsArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_prefix_lists module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "afi": {"type": "str", "choices": ["ipv4", "ipv6"]},
+ "prefix_lists": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "name": {"type": "str"},
+ "description": {"type": "str"},
+ "entries": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "sequence": {"type": "int"},
+ "action": {
+ "type": "str",
+ "choices": ["permit", "deny"],
+ },
+ "prefix": {"type": "str"},
+ "eq": {"type": "int"},
+ "ge": {"type": "int"},
+ "le": {"type": "int"},
+ "mask": {"type": "str"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/route_maps.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/route_maps.py
new file mode 100644
index 00000000..25e40d6b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/route_maps/route_maps.py
@@ -0,0 +1,412 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_route_maps module
+"""
+
+
+class Route_mapsArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_route_maps module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "route_map": {"type": "str"},
+ "entries": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "sequence": {"type": "int"},
+ "action": {
+ "type": "str",
+ "choices": ["deny", "permit"],
+ },
+ "continue_sequence": {"type": "int"},
+ "description": {"type": "str"},
+ "match": {
+ "type": "dict",
+ "options": {
+ "as_number": {
+ "type": "dict",
+ "options": {
+ "asn": {
+ "type": "list",
+ "elements": "str",
+ },
+ "as_path_list": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "as_path": {"type": "list", "elements": "str"},
+ "community": {
+ "type": "dict",
+ "options": {
+ "community_list": {
+ "type": "list",
+ "elements": "str",
+ },
+ "exact_match": {"type": "bool"},
+ },
+ },
+ "evpn": {
+ "type": "dict",
+ "options": {
+ "route_types": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "extcommunity": {
+ "type": "dict",
+ "options": {
+ "extcommunity_list": {
+ "type": "list",
+ "elements": "str",
+ },
+ "exact_match": {"type": "bool"},
+ },
+ },
+ "interfaces": {
+ "type": "list",
+ "elements": "str",
+ },
+ "ip": {
+ "type": "dict",
+ "options": {
+ "address": {
+ "type": "dict",
+ "options": {
+ "access_list": {"type": "str"},
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "multicast": {
+ "type": "dict",
+ "options": {
+ "source": {"type": "str"},
+ "group": {
+ "type": "dict",
+ "options": {"prefix": {"type": "str"}},
+ },
+ "group_range": {
+ "type": "dict",
+ "options": {
+ "first": {"type": "str"},
+ "last": {"type": "str"},
+ },
+ },
+ "rp": {
+ "type": "dict",
+ "options": {
+ "prefix": {"type": "str"},
+ "rp_type": {
+ "type": "str",
+ "choices": [
+ "ASM",
+ "Bidir",
+ ],
+ },
+ },
+ },
+ },
+ },
+ "next_hop": {
+ "type": "dict",
+ "options": {
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "route_source": {
+ "type": "dict",
+ "options": {
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ },
+ },
+ "ipv6": {
+ "type": "dict",
+ "options": {
+ "address": {
+ "type": "dict",
+ "options": {
+ "access_list": {"type": "str"},
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "multicast": {
+ "type": "dict",
+ "options": {
+ "source": {"type": "str"},
+ "group": {
+ "type": "dict",
+ "options": {"prefix": {"type": "str"}},
+ },
+ "group_range": {
+ "type": "dict",
+ "options": {
+ "first": {"type": "str"},
+ "last": {"type": "str"},
+ },
+ },
+ "rp": {
+ "type": "dict",
+ "options": {
+ "prefix": {"type": "str"},
+ "rp_type": {
+ "type": "str",
+ "choices": [
+ "ASM",
+ "Bidir",
+ ],
+ },
+ },
+ },
+ },
+ },
+ "next_hop": {
+ "type": "dict",
+ "options": {
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "route_source": {
+ "type": "dict",
+ "options": {
+ "prefix_lists": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ },
+ },
+ "mac_list": {
+ "type": "list",
+ "elements": "str",
+ },
+ "metric": {"type": "list", "elements": "int"},
+ "ospf_area": {
+ "type": "list",
+ "elements": "int",
+ },
+ "route_types": {
+ "type": "list",
+ "elements": "str",
+ "choices": [
+ "external",
+ "inter-area",
+ "internal",
+ "intra-area",
+ "level-1",
+ "level-2",
+ "local",
+ "nssa-external",
+ "type-1",
+ "type-2",
+ ],
+ },
+ "source_protocol": {
+ "type": "list",
+ "elements": "str",
+ },
+ "tags": {"type": "list", "elements": "int"},
+ },
+ },
+ "set": {
+ "type": "dict",
+ "options": {
+ "as_path": {
+ "type": "dict",
+ "options": {
+ "prepend": {
+ "type": "dict",
+ "options": {
+ "as_number": {
+ "type": "list",
+ "elements": "str",
+ },
+ "last_as": {"type": "int"},
+ },
+ },
+ "tag": {"type": "bool"},
+ },
+ },
+ "comm_list": {"type": "str"},
+ "community": {
+ "type": "dict",
+ "options": {
+ "additive": {"type": "bool"},
+ "graceful_shutdown": {"type": "bool"},
+ "internet": {"type": "bool"},
+ "local_as": {"type": "bool"},
+ "no_advertise": {"type": "bool"},
+ "no_export": {"type": "bool"},
+ "number": {
+ "type": "list",
+ "elements": "str",
+ },
+ },
+ },
+ "dampening": {
+ "type": "dict",
+ "options": {
+ "half_life": {"type": "int"},
+ "start_reuse_route": {"type": "int"},
+ "start_suppress_route": {"type": "int"},
+ "max_suppress_time": {"type": "int"},
+ },
+ },
+ "distance": {
+ "type": "dict",
+ "options": {
+ "igp_ebgp_routes": {"type": "int"},
+ "internal_routes": {"type": "int"},
+ "local_routes": {"type": "int"},
+ },
+ },
+ "evpn": {
+ "type": "dict",
+ "options": {
+ "gateway_ip": {
+ "type": "dict",
+ "mutually_exclusive": [["ip", "use_nexthop"]],
+ "options": {
+ "ip": {"type": "str"},
+ "use_nexthop": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "extcomm_list": {"type": "str"},
+ "forwarding_address": {"type": "bool"},
+ "null_interface": {"type": "str"},
+ "ip": {
+ "type": "dict",
+ "options": {
+ "address": {
+ "type": "dict",
+ "options": {"prefix_list": {"type": "str"}},
+ },
+ "precedence": {"type": "str"},
+ },
+ },
+ "ipv6": {
+ "type": "dict",
+ "options": {
+ "address": {
+ "type": "dict",
+ "options": {"prefix_list": {"type": "str"}},
+ },
+ "precedence": {"type": "str"},
+ },
+ },
+ "label_index": {"type": "int"},
+ "level": {
+ "type": "str",
+ "choices": [
+ "level-1",
+ "level-1-2",
+ "level-2",
+ ],
+ },
+ "local_preference": {"type": "int"},
+ "metric": {
+ "type": "dict",
+ "options": {
+ "bandwidth": {"type": "int"},
+ "igrp_delay_metric": {"type": "int"},
+ "igrp_reliability_metric": {"type": "int"},
+ "igrp_effective_bandwidth_metric": {"type": "int"},
+ "igrp_mtu": {"type": "int"},
+ },
+ },
+ "metric_type": {
+ "type": "str",
+ "choices": [
+ "external",
+ "internal",
+ "type-1",
+ "type-2",
+ ],
+ },
+ "nssa_only": {"type": "bool"},
+ "origin": {
+ "type": "str",
+ "choices": ["egp", "igp", "incomplete"],
+ },
+ "path_selection": {
+ "type": "str",
+ "choices": [
+ "all",
+ "backup",
+ "best2",
+ "multipaths",
+ ],
+ },
+ "tag": {"type": "int"},
+ "weight": {"type": "int"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/snmp_server.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/snmp_server.py
new file mode 100644
index 00000000..a6b3d420
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/snmp_server/snmp_server.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the
+# cli_rm_builder.
+#
+# Manually editing this file is not advised.
+#
+# To update the argspec make the desired changes
+# in the module docstring and re-run
+# cli_rm_builder.
+#
+#############################################
+
+"""
+The arg spec for the nxos_snmp_server module
+"""
+
+
+class Snmp_serverArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_snmp_server module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "type": "dict",
+ "options": {
+ "aaa_user": {
+ "type": "dict",
+ "options": {"cache_timeout": {"type": "int"}},
+ },
+ "communities": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "name": {"type": "str", "aliases": ["community"]},
+ "group": {"type": "str"},
+ "ro": {"type": "bool"},
+ "rw": {"type": "bool"},
+ "use_ipv4acl": {"type": "str"},
+ "use_ipv6acl": {"type": "str"},
+ },
+ },
+ "contact": {"type": "str"},
+ "context": {
+ "type": "dict",
+ "options": {
+ "name": {"type": "str"},
+ "instance": {"type": "str"},
+ "topology": {"type": "str"},
+ "vrf": {"type": "str"},
+ },
+ },
+ "counter": {
+ "type": "dict",
+ "options": {
+ "cache": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "timeout": {"type": "int"},
+ },
+ },
+ },
+ },
+ "drop": {
+ "type": "dict",
+ "options": {
+ "unknown_engine_id": {"type": "bool"},
+ "unknown_user": {"type": "bool"},
+ },
+ },
+ "traps": {
+ "type": "dict",
+ "options": {
+ "aaa": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "server_state_change": {"type": "bool"},
+ },
+ },
+ "bgp": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ },
+ },
+ "bridge": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "newroot": {"type": "bool"},
+ "topologychange": {"type": "bool"},
+ },
+ },
+ "callhome": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "event_notify": {"type": "bool"},
+ "smtp_send_fail": {"type": "bool"},
+ },
+ },
+ "cfs": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "merge_failure": {"type": "bool"},
+ "state_change_notif": {"type": "bool"},
+ },
+ },
+ "config": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "ccmCLIRunningConfigChanged": {"type": "bool"},
+ },
+ },
+ "entity": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "cefcMIBEnableStatusNotification": {"type": "bool"},
+ "entity_fan_status_change": {"type": "bool"},
+ "entity_mib_change": {"type": "bool"},
+ "entity_module_inserted": {"type": "bool"},
+ "entity_module_removed": {"type": "bool"},
+ "entity_module_status_change": {"type": "bool"},
+ "entity_power_out_change": {"type": "bool"},
+ "entity_power_status_change": {"type": "bool"},
+ "entity_sensor": {"type": "bool"},
+ "entity_unrecognised_module": {"type": "bool"},
+ },
+ },
+ "feature_control": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "featureOpStatusChange": {"type": "bool"},
+ "ciscoFeatOpStatusChange": {"type": "bool"},
+ },
+ },
+ "generic": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "coldStart": {"type": "bool"},
+ "warmStart": {"type": "bool"},
+ },
+ },
+ "license": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "notify_license_expiry": {"type": "bool"},
+ "notify_license_expiry_warning": {"type": "bool"},
+ "notify_licensefile_missing": {"type": "bool"},
+ "notify_no_license_for_feature": {"type": "bool"},
+ },
+ },
+ "link": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "cErrDisableInterfaceEventRev1": {"type": "bool"},
+ "cieLinkDown": {"type": "bool"},
+ "cieLinkUp": {"type": "bool"},
+ "cisco_xcvr_mon_status_chg": {"type": "bool"},
+ "cmn_mac_move_notification": {"type": "bool"},
+ "delayed_link_state_change": {"type": "bool"},
+ "extended_linkDown": {"type": "bool"},
+ "extended_linkUp": {"type": "bool"},
+ "linkDown": {"type": "bool"},
+ "linkUp": {"type": "bool"},
+ },
+ },
+ "mmode": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "cseMaintModeChangeNotify": {"type": "bool"},
+ "cseNormalModeChangeNotify": {"type": "bool"},
+ },
+ },
+ "ospf": {
+ "type": "dict",
+ "options": {"enable": {"type": "bool"}},
+ },
+ "ospfv3": {
+ "type": "dict",
+ "options": {"enable": {"type": "bool"}},
+ },
+ "rf": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "redundancy_framework": {"type": "bool"},
+ },
+ },
+ "rmon": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "fallingAlarm": {"type": "bool"},
+ "hcFallingAlarm": {"type": "bool"},
+ "hcRisingAlarm": {"type": "bool"},
+ "risingAlarm": {"type": "bool"},
+ },
+ },
+ "snmp": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "authentication": {"type": "bool"},
+ },
+ },
+ "storm_control": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "cpscEventRev1": {"type": "bool"},
+ "trap_rate": {"type": "bool"},
+ },
+ },
+ "stpx": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "inconsistency": {"type": "bool"},
+ "loop_inconsistency": {"type": "bool"},
+ "root_inconsistency": {"type": "bool"},
+ },
+ },
+ "syslog": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "message_generated": {"type": "bool"},
+ },
+ },
+ "sysmgr": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "cseFailSwCoreNotifyExtended": {"type": "bool"},
+ },
+ },
+ "system": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "clock_change_notification": {"type": "bool"},
+ },
+ },
+ "upgrade": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "upgradeJobStatusNotify": {"type": "bool"},
+ "upgradeOpNotifyOnCompletion": {"type": "bool"},
+ },
+ },
+ "vtp": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "notifs": {"type": "bool"},
+ "vlancreate": {"type": "bool"},
+ "vlandelete": {"type": "bool"},
+ },
+ },
+ },
+ },
+ "engine_id": {
+ "type": "dict",
+ "options": {"local": {"type": "str"}},
+ },
+ "global_enforce_priv": {"type": "bool"},
+ "hosts": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "host": {"type": "str"},
+ "community": {"type": "str"},
+ "filter_vrf": {"type": "str"},
+ "informs": {"type": "bool"},
+ "source_interface": {"type": "str"},
+ "traps": {"type": "bool"},
+ "use_vrf": {"type": "str"},
+ "version": {
+ "type": "str",
+ "choices": ["1", "2c", "3"],
+ },
+ "auth": {"type": "str"},
+ "priv": {"type": "str"},
+ "udp_port": {"type": "int"},
+ },
+ },
+ "location": {"type": "str"},
+ "mib": {
+ "type": "dict",
+ "options": {
+ "community_map": {
+ "type": "dict",
+ "options": {
+ "community": {"type": "str"},
+ "context": {"type": "str"},
+ },
+ },
+ },
+ },
+ "packetsize": {"type": "int"},
+ "protocol": {
+ "type": "dict",
+ "options": {"enable": {"type": "bool"}},
+ },
+ "source_interface": {
+ "type": "dict",
+ "options": {
+ "informs": {"type": "str"},
+ "traps": {"type": "str"},
+ },
+ },
+ "system_shutdown": {"type": "bool"},
+ "tcp_session": {
+ "type": "dict",
+ "options": {
+ "enable": {"type": "bool"},
+ "auth": {"type": "bool"},
+ },
+ },
+ "users": {
+ "type": "dict",
+ "options": {
+ "auth": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "user": {"type": "str"},
+ "group": {"type": "str"},
+ "authentication": {
+ "type": "dict",
+ "options": {
+ "algorithm": {
+ "type": "str",
+ "choices": [
+ "md5",
+ "sha",
+ "sha-256",
+ ],
+ },
+ "password": {
+ "type": "str",
+ "no_log": False,
+ },
+ "engine_id": {"type": "str"},
+ "localized_key": {"type": "bool"},
+ "localizedv2_key": {"type": "bool"},
+ "priv": {
+ "type": "dict",
+ "options": {
+ "privacy_password": {
+ "type": "str",
+ "no_log": False,
+ },
+ "aes_128": {"type": "bool"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_acls": {
+ "type": "list",
+ "elements": "dict",
+ "options": {
+ "user": {"type": "str"},
+ "ipv4": {"type": "str"},
+ "ipv6": {"type": "str"},
+ },
+ },
+ },
+ },
+ },
+ },
+ "state": {
+ "type": "str",
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "parsed",
+ "gathered",
+ "rendered",
+ ],
+ "default": "merged",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/static_routes.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/static_routes.py
new file mode 100644
index 00000000..58a669bd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/static_routes/static_routes.py
@@ -0,0 +1,89 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the nxos_static_routes module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class Static_routesArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_static_routes module"""
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "address_families": {
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ "type": "str",
+ },
+ "routes": {
+ "elements": "dict",
+ "options": {
+ "dest": {"required": True, "type": "str"},
+ "next_hops": {
+ "elements": "dict",
+ "options": {
+ "admin_distance": {"type": "int"},
+ "dest_vrf": {"type": "str"},
+ "forward_router_address": {"type": "str"},
+ "interface": {"type": "str"},
+ "route_name": {"type": "str"},
+ "tag": {"type": "int"},
+ "track": {"type": "int"},
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ "vrf": {"type": "str"},
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "deleted",
+ "merged",
+ "overridden",
+ "replaced",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/telemetry.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/telemetry.py
new file mode 100644
index 00000000..7da72979
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/telemetry/telemetry.py
@@ -0,0 +1,115 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the nxos_telemetry module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class TelemetryArgs(object): # pylint: disable=R0903
+ """The arg spec for the nxos_telemetry module"""
+
+ argument_spec = {
+ "config": {
+ "options": {
+ "certificate": {
+ "options": {
+ "hostname": {"type": "str"},
+ "key": {"type": "str", "no_log": False},
+ },
+ "type": "dict",
+ },
+ "compression": {"choices": ["gzip"], "type": "str"},
+ "source_interface": {"type": "str"},
+ "vrf": {"type": "str"},
+ "destination_groups": {
+ "options": {
+ "destination": {
+ "options": {
+ "encoding": {
+ "choices": ["GPB", "JSON"],
+ "type": "str",
+ },
+ "ip": {"type": "str"},
+ "port": {"type": "int"},
+ "protocol": {
+ "choices": ["HTTP", "TCP", "UDP", "gRPC"],
+ "type": "str",
+ },
+ },
+ "type": "dict",
+ },
+ "id": {"type": "str"},
+ },
+ "type": "list",
+ "elements": "raw",
+ },
+ "sensor_groups": {
+ "options": {
+ "data_source": {
+ "choices": ["NX-API", "DME", "YANG"],
+ "type": "str",
+ },
+ "id": {"type": "str"},
+ "path": {
+ "options": {
+ "depth": {"type": "str"},
+ "filter_condition": {"type": "str"},
+ "name": {"type": "str"},
+ "query_condition": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ "elements": "raw",
+ },
+ "subscriptions": {
+ "options": {
+ "destination_group": {"type": "str"},
+ "id": {"type": "str"},
+ "sensor_group": {
+ "options": {
+ "id": {"type": "str"},
+ "sample_interval": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ "elements": "raw",
+ },
+ },
+ "type": "dict",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "deleted", "gathered"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/vlans.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/vlans.py
new file mode 100644
index 00000000..4f915f09
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/argspec/vlans/vlans.py
@@ -0,0 +1,64 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the nxos_vlans module
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+class VlansArgs(object):
+ """The arg spec for the nxos_vlans module"""
+
+ argument_spec = {
+ "running_config": {"type": "str"},
+ "config": {
+ "elements": "dict",
+ "options": {
+ "enabled": {"type": "bool"},
+ "mapped_vni": {"type": "int"},
+ "mode": {"choices": ["ce", "fabricpath"], "type": "str"},
+ "name": {"type": "str"},
+ "vlan_id": {"required": True, "type": "int"},
+ "state": {"choices": ["active", "suspend"], "type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "rendered",
+ "gathered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/telemetry.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/telemetry.py
new file mode 100644
index 00000000..f5844538
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/cmdref/telemetry/telemetry.py
@@ -0,0 +1,147 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Telemetry Command Reference File
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+TMS_GLOBAL = """
+# The cmd_ref is a yaml formatted list of module commands.
+# A leading underscore denotes a non-command variable; e.g. _template.
+# TMS does not have convenient global json data so this cmd_ref uses raw cli configs.
+---
+_template: # _template holds common settings for all commands
+ # Enable feature telemetry if disabled
+ feature: telemetry
+ # Common get syntax for TMS commands
+ get_command: show run telemetry all
+ # Parent configuration for TMS commands
+ context:
+ - telemetry
+certificate:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ kind: dict
+ getval: certificate (?P<key>\\S+) (?P<hostname>\\S+)$
+ setval: certificate {key} {hostname}
+ default:
+ key: ~
+ hostname: ~
+compression:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ kind: str
+ getval: use-compression (\\S+)$
+ setval: 'use-compression {0}'
+ default: ~
+ context: &dpcontext
+ - telemetry
+ - destination-profile
+source_interface:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ kind: str
+ getval: source-interface (\\S+)$
+ setval: 'source-interface {0}'
+ default: ~
+ context: *dpcontext
+vrf:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ kind: str
+ getval: use-vrf (\\S+)$
+ setval: 'use-vrf {0}'
+ default: ~
+ context: *dpcontext
+"""
+
+TMS_DESTGROUP = """
+# The cmd_ref is a yaml formatted list of module commands.
+# A leading underscore denotes a non-command variable; e.g. _template.
+# TBD: Use Structured Where Possible
+---
+_template: # _template holds common settings for all commands
+ # Enable feature telemetry if disabled
+ feature: telemetry
+ # Common get syntax for TMS commands
+ get_command: show run telemetry all
+ # Parent configuration for TMS commands
+ context:
+ - telemetry
+destination:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ multiple: true
+ kind: dict
+ getval: ip address (?P<ip>\\S+) port (?P<port>\\S+) protocol (?P<protocol>\\S+) encoding (?P<encoding>\\S+)
+ setval: ip address {ip} port {port} protocol {protocol} encoding {encoding}
+ default:
+ ip: ~
+ port: ~
+ protocol: ~
+ encoding: ~
+"""
+
+TMS_SENSORGROUP = """
+# The cmd_ref is a yaml formatted list of module commands.
+# A leading underscore denotes a non-command variable; e.g. _template.
+# TBD: Use Structured Where Possible
+---
+_template: # _template holds common settings for all commands
+ # Enable feature telemetry if disabled
+ feature: telemetry
+ # Common get syntax for TMS commands
+ get_command: show run telemetry all
+ # Parent configuration for TMS commands
+ context:
+ - telemetry
+data_source:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ kind: str
+ getval: data-source (\\S+)$
+ setval: 'data-source {0}'
+ default: ~
+path:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ multiple: true
+ kind: dict
+ getval: path (?P<name>(\\S+|".*"))( depth (?P<depth>\\S+))?( query-condition (?P<query_condition>\\S+))?( filter-condition (?P<filter_condition>\\S+))?$
+ setval: path {name} depth {depth} query-condition {query_condition} filter-condition {filter_condition}
+ default:
+ name: ~
+ depth: ~
+ query_condition: ~
+ filter_condition: ~
+"""
+
+TMS_SUBSCRIPTION = """
+# The cmd_ref is a yaml formatted list of module commands.
+# A leading underscore denotes a non-command variable; e.g. _template.
+# TBD: Use Structured Where Possible
+---
+_template: # _template holds common settings for all commands
+ # Enable feature telemetry if disabled
+ feature: telemetry
+ # Common get syntax for TMS commands
+ get_command: show run telemetry all
+ # Parent configuration for TMS commands
+ context:
+ - telemetry
+destination_group:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ multiple: true
+ kind: str
+ getval: dst-grp (\\S+)$
+ setval: 'dst-grp {0}'
+ default: ~
+sensor_group:
+ _exclude: ['N3K', 'N5K', 'N6k', 'N7k']
+ multiple: true
+ kind: dict
+ getval: snsr-grp (?P<id>\\S+) sample-interval (?P<sample_interval>\\S+)$
+ setval: snsr-grp {id} sample-interval {sample_interval}
+ default:
+ id: ~
+ sample_interval: ~
+"""
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/acl_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/acl_interfaces.py
new file mode 100644
index 00000000..5bd15062
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,321 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_acl_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Acl_interfaces(ConfigBase):
+ """
+ The nxos_acl_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["acl_interfaces"]
+
+ def __init__(self, module):
+ super(Acl_interfaces, self).__init__(module)
+
+ def get_acl_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ acl_interfaces_facts = facts["ansible_network_resources"].get("acl_interfaces")
+ if not acl_interfaces_facts:
+ return []
+ return acl_interfaces_facts
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = list()
+ commands = list()
+ self.state = self._module.params["state"]
+ action_states = ["merged", "replaced", "deleted", "overridden"]
+
+ if self.state == "gathered":
+ result["gathered"] = self.get_acl_interfaces_facts()
+ elif self.state == "rendered":
+ result["rendered"] = self.set_config({})
+ # no need to fetch facts for rendered
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.set_config({})
+ # no need to fetch facts for parsed
+ else:
+ existing_acl_interfaces_facts = self.get_acl_interfaces_facts()
+ commands.extend(self.set_config(existing_acl_interfaces_facts))
+ if commands and self.state in action_states:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+ result["before"] = existing_acl_interfaces_facts
+ result["commands"] = commands
+
+ changed_acl_interfaces_facts = self.get_acl_interfaces_facts()
+ if result["changed"]:
+ result["after"] = changed_acl_interfaces_facts
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_acl_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = []
+ if config:
+ for w in config:
+ if get_interface_type(w["name"]) == "loopback":
+ self._module.fail_json(
+ msg="This module works with ethernet, management or port-channe",
+ )
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(remove_empties(w))
+ have = existing_acl_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ if self.state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(self.state),
+ )
+
+ commands = []
+ if self.state == "overridden":
+ commands = self._state_overridden(want, have)
+ elif self.state == "deleted":
+ commands = self._state_deleted(want, have)
+ elif self.state == "rendered":
+ commands = self._state_rendered(want)
+ elif self.state == "parsed":
+ want = self._module.params["running_config"]
+ commands = self._state_parsed(want)
+ else:
+ for w in want:
+ if self.state == "merged":
+ commands.extend(self._state_merged(w, have))
+ elif self.state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_parsed(self, want):
+ return self.get_acl_interfaces_facts(want)
+
+ def _state_rendered(self, want):
+ commands = []
+ for w in want:
+ commands.extend(self.set_commands(w, {}))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_commands = []
+ del_dict = {"name": want["name"], "access_groups": []}
+ obj_in_have = search_obj_in_list(want["name"], have, "name")
+ if obj_in_have != want:
+ commands = []
+ if obj_in_have and "access_groups" in obj_in_have.keys():
+ for ag in obj_in_have["access_groups"]:
+ want_afi = []
+ if want.get("access_groups"):
+ want_afi = search_obj_in_list(ag["afi"], want["access_groups"], "afi")
+ if not want_afi:
+ # whatever in have is not in want
+ del_dict["access_groups"].append(ag)
+ else:
+ del_acl = []
+ for acl in ag["acls"]:
+ if want_afi.get("acls"):
+ if acl not in want_afi["acls"]:
+ del_acl.append(acl)
+ else:
+ del_acl.append(acl)
+ afi = want_afi["afi"]
+ del_dict["access_groups"].append({"afi": afi, "acls": del_acl})
+
+ commands.extend(self._state_deleted([del_dict], have))
+ commands.extend(self._state_merged(want, have))
+ new_commands.append(commands[0])
+ commands = [commands[i] for i in range(1, len(commands)) if commands[i] != commands[0]]
+ new_commands.extend(commands)
+ return new_commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ want_intf = [w["name"] for w in want]
+ for h in have:
+ if h["name"] not in want_intf:
+ commands.extend(self._state_deleted([h], have))
+ for w in want:
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def set_commands(self, want, have, deleted=False):
+ commands = []
+ have_name = search_obj_in_list(want["name"], have, "name")
+ if have_name and have_name.get("access_groups"):
+ if want.get("access_groups"):
+ for w_afi in want["access_groups"]:
+ ip = "ipv6"
+ if w_afi["afi"] == "ipv4":
+ ip = "ip"
+ have_afi = search_obj_in_list(w_afi["afi"], have_name["access_groups"], "afi")
+ if have_afi:
+ new_acls = []
+ if deleted:
+ if w_afi.get("acls") and have_afi.get("acls"):
+ new_acls = [
+ acl for acl in w_afi.get("acls") if acl in have_afi.get("acls")
+ ]
+ elif "acls" not in w_afi.keys():
+ new_acls = have_afi.get("acls")
+ else:
+ if w_afi.get("acls"):
+ new_acls = [
+ acl for acl in w_afi["acls"] if acl not in have_afi["acls"]
+ ]
+ commands.extend(self.process_acl(new_acls, ip, deleted))
+ else:
+ if not deleted:
+ if w_afi.get("acls"):
+ commands.extend(self.process_acl(w_afi["acls"], ip))
+ else:
+ # only name is given to delete
+ if deleted and "access_groups" in have_name.keys():
+ commands.extend(self.process_access_group(have_name, True))
+ else:
+ if not deleted: # and 'access_groups' in have_name.keys():
+ commands.extend(self.process_access_group(want))
+
+ if len(commands) > 0:
+ commands.insert(0, "interface " + want["name"])
+ return commands
+
+ def process_access_group(self, item, deleted=False):
+ commands = []
+ for ag in item["access_groups"]:
+ ip = "ipv6"
+ if ag["afi"] == "ipv4":
+ ip = "ip"
+ if ag.get("acls"):
+ commands.extend(self.process_acl(ag["acls"], ip, deleted))
+ return commands
+
+ def process_acl(self, acls, ip, deleted=False):
+ commands = []
+ no = ""
+ if deleted:
+ no = "no "
+ for acl in acls:
+ port = ""
+ if acl.get("port"):
+ port = " port"
+ ag = " access-group "
+ if ip == "ipv6":
+ ag = " traffic-filter "
+ commands.append(no + ip + port + ag + acl["name"] + " " + acl["direction"])
+ return commands
+
+ def _state_deleted(self, main_want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if main_want:
+ if self.state == "deleted":
+ for w in main_want:
+ h = search_obj_in_list(w["name"], have, "name") or {}
+ commands.extend(self.set_commands(h, have, deleted=True))
+ else:
+ for want in main_want:
+ commands.extend(self.set_commands(want, have, deleted=True))
+ else:
+ for h in have:
+ commands.extend(self.set_commands(h, have, deleted=True))
+
+ return commands
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/acls.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/acls.py
new file mode 100644
index 00000000..5e6f3c34
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/acls/acls.py
@@ -0,0 +1,674 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_acls class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.acls.acls import (
+ AclsArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ search_obj_in_list,
+)
+
+
+class Acls(ConfigBase):
+ """
+ The nxos_acls class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["acls"]
+
+ def __init__(self, module):
+ super(Acls, self).__init__(module)
+ self.state = self._module.params["state"]
+
+ def get_acls_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ acls_facts = facts["ansible_network_resources"].get("acls")
+ if not acls_facts:
+ return []
+ return acls_facts
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = list()
+ commands = list()
+ state = self.state
+ action_states = ["merged", "replaced", "deleted", "overridden"]
+
+ if state == "gathered":
+ result["gathered"] = self.get_acls_facts()
+ elif state == "rendered":
+ result["rendered"] = self.set_config({})
+ elif state == "parsed":
+ result["parsed"] = self.set_config({})
+ else:
+ existing_acls_facts = self.get_acls_facts()
+ commands.extend(self.set_config(existing_acls_facts))
+ result["before"] = existing_acls_facts
+ if commands and state in action_states:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+ result["commands"] = commands
+
+ changed_acls_facts = self.get_acls_facts()
+ if result["changed"]:
+ result["after"] = changed_acls_facts
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_acls_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = []
+ if config:
+ for w in config:
+ want.append(remove_empties(w))
+ have = existing_acls_facts
+ if want:
+ want = self.convert_values(want)
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def convert_values(self, want):
+ """
+ This method is used to map and convert the user given values with what will actually be present in the device configuation
+ """
+ port_protocol = {
+ 515: "lpd",
+ 517: "talk",
+ 7: "echo",
+ 9: "discard",
+ 12: "exec",
+ 13: "login",
+ 14: "cmd",
+ 109: "pop2",
+ 19: "chargen",
+ 20: "ftp-data",
+ 21: "ftp",
+ 23: "telnet",
+ 25: "smtp",
+ 540: "uucp",
+ 543: "klogin",
+ 544: "kshell",
+ 37: "time",
+ 43: "whois",
+ 49: "tacacs",
+ 179: "bgp",
+ 53: "domain",
+ 194: "irc",
+ 70: "gopher",
+ 79: "finger",
+ 80: "www",
+ 101: "hostname",
+ 3949: "drip",
+ 110: "pop3",
+ 111: "sunrpc",
+ 496: "pim-auto-rp",
+ 113: "ident",
+ 119: "nntp",
+ }
+ protocol = {
+ 1: "icmp",
+ 2: "igmp",
+ 4: "ip",
+ 6: "tcp",
+ 103: "pim",
+ 108: "pcp",
+ 47: "gre",
+ 17: "udp",
+ 50: "esp",
+ 51: "ahp",
+ 88: "eigrp",
+ 89: "ospf",
+ 94: "nos",
+ }
+ precedence = {
+ 0: "routine",
+ 1: "priority",
+ 2: "immediate",
+ 3: "flash",
+ 4: "flash-override",
+ 5: "critical",
+ 6: "internet",
+ 7: "network",
+ }
+ dscp = {
+ 10: "AF11",
+ 12: "AF12",
+ 14: "AF13",
+ 18: "AF21",
+ 20: "AF22",
+ 22: "AF23",
+ 26: "AF31",
+ 28: "AF32",
+ 30: "AF33",
+ 34: "AF41",
+ 36: "AF42",
+ 38: "AF43",
+ 8: "CS1",
+ 16: "CS2",
+ 24: "CS3",
+ 32: "CS4",
+ 40: "CS5",
+ 48: "CS6",
+ 56: "CS7",
+ 0: "Default",
+ 46: "EF",
+ }
+ # port_pro_num = list(protocol.keys())
+ for afi in want:
+ if "acls" in afi.keys():
+ for acl in afi["acls"]:
+ if "aces" in acl.keys():
+ for ace in acl["aces"]:
+ if "dscp" in ace.keys():
+ if ace["dscp"] in dscp:
+ ace["dscp"] = dscp[int(ace["dscp"])]
+ if not ace["dscp"].isdigit():
+ ace["dscp"] = ace["dscp"].lower()
+ if "precedence" in ace.keys():
+ if ace["precedence"].isdigit():
+ ace["precedence"] = precedence[int(ace["precedence"])]
+ if (
+ "protocol" in ace.keys()
+ and ace["protocol"].isdigit()
+ and int(ace["protocol"]) in protocol.keys()
+ ):
+ ace["protocol"] = protocol[int(ace["protocol"])]
+ # convert number to name
+ if "protocol" in ace.keys() and ace["protocol"] in ["tcp", "udp"]:
+ for x in ["source", "destination"]:
+ if "port_protocol" in ace[x].keys():
+ key = list(ace[x]["port_protocol"].keys())[0]
+ # key could be eq,gt,lt,neq or range
+ if key != "range":
+ val = ace[x]["port_protocol"][key]
+ if val.isdigit() and int(val) in port_protocol.keys():
+ ace[x]["port_protocol"][key] = port_protocol[
+ int(val)
+ ]
+ else:
+ st = int(ace[x]["port_protocol"]["range"]["start"])
+ end = int(ace[x]["port_protocol"]["range"]["end"])
+
+ if st in port_protocol.keys():
+ ace[x]["port_protocol"]["range"][
+ "start"
+ ] = port_protocol[st]
+ if end in port_protocol.keys():
+ ace[x]["port_protocol"]["range"][
+ "end"
+ ] = port_protocol[end]
+ return want
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self.state
+ commands = []
+ if state == "overridden":
+ commands = self._state_overridden(want, have)
+ elif state == "deleted":
+ commands = self._state_deleted(want, have)
+ elif state == "rendered":
+ commands = self._state_rendered(want)
+ elif state == "parsed":
+ want = self._module.params["running_config"]
+ commands = self._state_parsed(want)
+ else:
+ for w in want:
+ if state == "merged":
+ commands.extend(self._state_merged(w, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ if state != "parsed":
+ commands = [c.strip() for c in commands]
+ return commands
+
+ def _state_parsed(self, want):
+ return self.get_acls_facts(want)
+
+ def _state_rendered(self, want):
+ commands = []
+ for w in want:
+ commands.extend(self.set_commands(w, {}))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ have_afi = search_obj_in_list(want["afi"], have, "afi")
+ del_dict = {"acls": []}
+ want_names = []
+ if have_afi != want:
+ if have_afi:
+ del_dict.update({"afi": have_afi["afi"], "acls": []})
+ if want.get("acls"):
+ want_names = [w["name"] for w in want["acls"]]
+ have_names = [h["name"] for h in have_afi["acls"]]
+ want_acls = want.get("acls")
+ for w in want_acls:
+ acl_commands = []
+ if w["name"] not in have_names:
+ # creates new ACL in replaced state
+ merge_dict = {"afi": want["afi"], "acls": [w]}
+ commands.extend(self._state_merged(merge_dict, have))
+ else:
+ # acl in want exists in have
+ have_name = search_obj_in_list(w["name"], have_afi["acls"], "name")
+ have_aces = have_name.get("aces") if have_name.get("aces") else []
+ merge_aces = []
+ del_aces = []
+ w_aces = w.get("aces") if w.get("aces") else []
+
+ for ace in have_aces:
+ if ace not in w_aces:
+ del_aces.append(ace)
+ for ace in w_aces:
+ if ace not in have_aces:
+ merge_aces.append(ace)
+ merge_dict = {
+ "afi": want["afi"],
+ "acls": [{"name": w["name"], "aces": merge_aces}],
+ }
+ del_dict = {
+ "afi": want["afi"],
+ "acls": [{"name": w["name"], "aces": del_aces}],
+ }
+ if del_dict["acls"]:
+ acl_commands.extend(self._state_deleted([del_dict], have))
+ acl_commands.extend(self._state_merged(merge_dict, have))
+
+ for i in range(1, len(acl_commands)):
+ if acl_commands[i] == acl_commands[0]:
+ acl_commands[i] = ""
+ commands.extend(acl_commands)
+ else:
+ acls = []
+ # no acls given in want, so delete all have acls
+ for acl in have_afi["acls"]:
+ acls.append({"name": acl["name"]})
+ del_dict["acls"] = acls
+ if del_dict["acls"]:
+ commands.extend(self._state_deleted([del_dict], have))
+
+ else:
+ # want_afi is not present in have
+ commands.extend(self._state_merged(want, have))
+
+ commands = list(filter(None, commands))
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ want_afi = [w["afi"] for w in want]
+ for h in have:
+ if h["afi"] in want_afi:
+ w = search_obj_in_list(h["afi"], want, "afi")
+ for h_acl in h["acls"]:
+ w_acl = search_obj_in_list(h_acl["name"], w["acls"], "name")
+ if not w_acl:
+ del_dict = {
+ "afi": h["afi"],
+ "acls": [{"name": h_acl["name"]}],
+ }
+ commands.extend(self._state_deleted([del_dict], have))
+ else:
+ # if afi is not in want
+ commands.extend(self._state_deleted([{"afi": h["afi"]}], have))
+ for w in want:
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want: # and have != want:
+ for w in want:
+ ip = "ipv6" if w["afi"] == "ipv6" else "ip"
+ acl_names = []
+ have_afi = search_obj_in_list(w["afi"], have, "afi")
+ # if want['afi] not in have, ignore
+ if have_afi:
+ if w.get("acls"):
+ for acl in w["acls"]:
+ if "aces" in acl.keys() and self.state != "deleted":
+ have_name = search_obj_in_list(
+ acl["name"],
+ have_afi["acls"],
+ "name",
+ )
+ if have_name:
+ ace_commands = []
+ flag = 0
+ for ace in acl["aces"]:
+ if list(ace.keys()) == ["sequence"]:
+ # only sequence number is specified to be deleted
+ if "aces" in have_name.keys():
+ for h_ace in have_name["aces"]:
+ if h_ace["sequence"] == ace["sequence"]:
+ ace_commands.append(
+ "no " + str(ace["sequence"]),
+ )
+ flag = 1
+ else:
+ if "aces" in have_name.keys():
+ for h_ace in have_name["aces"]:
+ # when want['ace'] does not have seq number
+ if "sequence" not in ace.keys():
+ del h_ace["sequence"]
+ if ace == h_ace:
+ ace_commands.append(
+ "no " + self.process_ace(ace),
+ )
+ flag = 1
+ if flag:
+ ace_commands.insert(
+ 0,
+ ip + " access-list " + acl["name"],
+ )
+ commands.extend(ace_commands)
+ else:
+ # only name given
+ for h in have_afi["acls"]:
+ if h["name"] == acl["name"]:
+ acl_names.append(acl["name"])
+ for name in acl_names:
+ commands.append("no " + ip + " access-list " + name)
+
+ else:
+ # 'only afi is given'
+ if have_afi.get("acls"):
+ for h in have_afi["acls"]:
+ acl_names.append(h["name"])
+ for name in acl_names:
+ commands.append("no " + ip + " access-list " + name)
+ else:
+ v6 = []
+ v4 = []
+ v6_local = v4_local = None
+ for h in have:
+ if h["afi"] == "ipv6":
+ v6 = (acl["name"] for acl in h["acls"])
+ if "match_local_traffic" in h.keys():
+ v6_local = True
+ else:
+ v4 = (acl["name"] for acl in h["acls"])
+ if "match_local_traffic" in h.keys():
+ v4_local = True
+
+ self.no_commands(v4, commands, v4_local, "ip")
+ self.no_commands(v6, commands, v6_local, "ipv6")
+
+ for name in v6:
+ commands.append("no ipv6 access-list " + name)
+ if v4_local:
+ commands.append("no ipv6 access-list match-local-traffic")
+
+ return commands
+
+ def no_commands(self, v_list, commands, match_local, ip):
+ for name in v_list:
+ commands.append("no " + ip + " access-list " + name)
+ if match_local:
+ commands.append("no " + ip + " access-list match-local-traffic")
+
+ def set_commands(self, want, have):
+ commands = []
+ have_afi = search_obj_in_list(want["afi"], have, "afi")
+ ip = ""
+ if "v6" in want["afi"]:
+ ip = "ipv6 "
+ else:
+ ip = "ip "
+
+ if have_afi:
+ if want.get("acls"):
+ for w_acl in want["acls"]:
+ have_acl = search_obj_in_list(w_acl["name"], have_afi["acls"], "name")
+ name = w_acl["name"]
+ flag = 0
+ ace_commands = []
+ if have_acl != w_acl:
+ if have_acl:
+ ace_list = []
+ if w_acl.get("aces") and have_acl.get("aces"):
+ # case 1 --> sequence number not given in want --> new ace
+ # case 2 --> new sequence number in want --> new ace
+ # case 3 --> existing sequence number given --> update rule (only for merged state.
+ # For replaced and overridden, rule is deleted in the state's config)
+
+ ace_list = [
+ item for item in w_acl["aces"] if "sequence" not in item.keys()
+ ] # case 1
+
+ want_seq = [
+ item["sequence"]
+ for item in w_acl["aces"]
+ if "sequence" in item.keys()
+ ]
+
+ have_seq = [item["sequence"] for item in have_acl["aces"]]
+
+ new_seq = list(set(want_seq) - set(have_seq))
+ common_seq = list(set(want_seq).intersection(set(have_seq)))
+
+ temp_list = [
+ item
+ for item in w_acl["aces"]
+ if "sequence" in item.keys() and item["sequence"] in new_seq
+ ] # case 2
+ ace_list.extend(temp_list)
+ for w in w_acl["aces"]:
+ self.argument_spec = AclsArgs.argument_spec
+ params = utils.validate_config(
+ self.argument_spec,
+ {
+ "config": [
+ {
+ "afi": want["afi"],
+ "acls": [
+ {
+ "name": name,
+ "aces": ace_list,
+ },
+ ],
+ },
+ ],
+ },
+ )
+ if "sequence" in w.keys() and w["sequence"] in common_seq:
+ temp_obj = search_obj_in_list(
+ w["sequence"],
+ have_acl["aces"],
+ "sequence",
+ ) # case 3
+ if temp_obj != w:
+ ace_list.append(w)
+ if self.state == "merged":
+ # merged will never negate commands
+ self._module.fail_json(
+ msg="Cannot update existing ACE {0} of ACL {1} with state merged."
+ " Please use state replaced or overridden.".format(
+ name,
+ w["sequence"],
+ ),
+ )
+ elif w_acl.get("aces"):
+ # 'have' has ACL defined without any ACE
+ ace_list = list(w_acl["aces"])
+ for w_ace in ace_list:
+ ace_commands.append(self.process_ace(w_ace).strip())
+ flag = 1
+
+ if flag:
+ ace_commands.insert(0, ip + "access-list " + name)
+
+ else:
+ commands.append(ip + "access-list " + name)
+ if "aces" in w_acl.keys():
+ for w_ace in w_acl["aces"]:
+ commands.append(self.process_ace(w_ace).strip())
+ commands.extend(ace_commands)
+ else:
+ if want.get("acls"):
+ for w_acl in want["acls"]:
+ name = w_acl["name"]
+ commands.append(ip + "access-list " + name)
+ if "aces" in w_acl.keys():
+ for w_ace in w_acl["aces"]:
+ commands.append(self.process_ace(w_ace).strip())
+
+ return commands
+
+ def process_ace(self, w_ace):
+ command = ""
+ ace_keys = w_ace.keys()
+ if "remark" in ace_keys:
+ command += "remark " + w_ace["remark"] + " "
+ else:
+ command += w_ace["grant"] + " "
+ if "protocol" in ace_keys:
+ if w_ace["protocol"] == "icmpv6":
+ command += "icmp" + " "
+ else:
+ command += w_ace["protocol"] + " "
+ src = self.get_address(w_ace["source"], w_ace["protocol"])
+ dest = self.get_address(w_ace["destination"], w_ace["protocol"])
+ command += src + dest
+ if "protocol_options" in ace_keys:
+ pro = list(w_ace["protocol_options"].keys())[0]
+ if pro != w_ace["protocol"]:
+ self._module.fail_json(msg="protocol and protocol_options mismatch")
+ flags = ""
+ for k in w_ace["protocol_options"][pro].keys():
+ if k not in ["telemetry_queue", "telemetry_path"]:
+ k = re.sub("_", "-", k)
+ flags += k + " "
+ command += flags
+ if "dscp" in ace_keys:
+ command += "dscp " + w_ace["dscp"] + " "
+ if "fragments" in ace_keys:
+ command += "fragments "
+ if "precedence" in ace_keys:
+ command += "precedence " + w_ace["precedence"] + " "
+ if "log" in ace_keys:
+ command += "log "
+ if "sequence" in ace_keys:
+ command = str(w_ace["sequence"]) + " " + command
+ return command
+
+ def get_address(self, endpoint, pro=""):
+ ret_addr = ""
+ keys = list(endpoint.keys())
+ if "address" in keys:
+ if "wildcard_bits" not in keys:
+ self._module.fail_json(msg="wildcard bits not specified for address")
+ else:
+ ret_addr = endpoint["address"] + " " + endpoint["wildcard_bits"] + " "
+ elif "any" in keys:
+ ret_addr = "any "
+ elif "host" in keys:
+ ret_addr = "host " + endpoint["host"] + " "
+ elif "prefix" in keys:
+ ret_addr = endpoint["prefix"] + " "
+
+ if pro in ["tcp", "udp"]:
+ if "port_protocol" in keys:
+ options = self.get_options(endpoint["port_protocol"])
+ ret_addr += options
+ return ret_addr
+
+ def get_options(self, item):
+ com = ""
+ subkey = list(item.keys())
+ if "range" in subkey:
+ com = "range " + item["range"]["start"] + " " + item["range"]["end"] + " "
+ else:
+ com = subkey[0] + " " + item[subkey[0]] + " "
+ return com
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/bfd_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/bfd_interfaces.py
new file mode 100644
index 00000000..a9dc51fd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bfd_interfaces/bfd_interfaces.py
@@ -0,0 +1,311 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+nxos_bfd_interfaces class
+This class creates a command set to bring the current device configuration
+to a desired end-state. The command set is based on a comparison of the
+current configuration (as dict) and the provided configuration (as dict).
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ flatten_dict,
+ search_obj_in_list,
+)
+
+
+class Bfd_interfaces(ConfigBase):
+ """
+ The nxos_bfd_interfaces class
+ """
+
+ gather_subset = ["min"]
+ gather_network_resources = ["bfd_interfaces"]
+ # exclude_params = []
+
+ def __init__(self, module):
+ super(Bfd_interfaces, self).__init__(module)
+
+ def get_bfd_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :returns: A list of interface configs and a platform string
+ """
+ if self.state not in self.ACTION_STATES:
+ self.gather_subset = ["!all", "!min"]
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ bfd_interfaces_facts = facts["ansible_network_resources"].get("bfd_interfaces", [])
+
+ platform = facts.get("ansible_net_platform", "")
+ return bfd_interfaces_facts, platform
+
+ def edit_config(self, commands):
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = list()
+ commands = list()
+
+ if self.state in self.ACTION_STATES:
+ (
+ existing_bfd_interfaces_facts,
+ platform,
+ ) = self.get_bfd_interfaces_facts()
+ else:
+ existing_bfd_interfaces_facts, platform = [], ""
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_bfd_interfaces_facts, platform))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ (
+ changed_bfd_interfaces_facts,
+ platform,
+ ) = self.get_bfd_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"], platform = self.get_bfd_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_bfd_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_bfd_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_bfd_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_bfd_interfaces_facts, platform):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ if re.search("N[56]K", platform):
+ # Some platforms do not support the 'bfd' interface keyword;
+ # remove the 'bfd' key from each want/have interface.
+ orig_want = self._module.params["config"]
+ want = []
+ for w in orig_want:
+ del w["bfd"]
+ want.append(w)
+ orig_have = existing_bfd_interfaces_facts
+ have = []
+ for h in orig_have:
+ del h["bfd"]
+ have.append(h)
+ else:
+ want = self._module.params["config"]
+ have = existing_bfd_interfaces_facts
+
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ cmds = list()
+ if state == "overridden":
+ cmds.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ cmds.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ cmds.extend(self._state_merged(flatten_dict(w), have))
+ elif state == "replaced":
+ cmds.extend(self._state_replaced(flatten_dict(w), have))
+ return cmds
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ cmds = []
+ obj_in_have = search_obj_in_list(want["name"], have, "name")
+ if obj_in_have:
+ diff = dict_diff(want, obj_in_have)
+ else:
+ diff = want
+ merged_cmds = self.set_commands(want, have)
+ if "name" not in diff:
+ diff["name"] = want["name"]
+
+ replaced_cmds = []
+ if obj_in_have:
+ replaced_cmds = self.del_attribs(diff)
+ if replaced_cmds or merged_cmds:
+ for cmd in set(replaced_cmds).intersection(set(merged_cmds)):
+ merged_cmds.remove(cmd)
+ cmds.extend(replaced_cmds)
+ cmds.extend(merged_cmds)
+ return cmds
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ cmds = []
+ for h in have:
+ # Clean up bfd attrs for any interfaces not listed in the play
+ h = flatten_dict(h)
+ obj_in_want = flatten_dict(search_obj_in_list(h["name"], want, "name"))
+ if obj_in_want:
+ # Let the 'want' loop handle all vals for this interface
+ continue
+ cmds.extend(self.del_attribs(h))
+ for w in want:
+ # Update any want attrs if needed. The overridden state considers
+ # the play as the source of truth for the entire device, therefore
+ # set any unspecified attrs to their default state.
+ w = self.set_none_vals_to_defaults(flatten_dict(w))
+ cmds.extend(self.set_commands(w, have))
+ return cmds
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ if not (want or have):
+ return []
+ cmds = []
+ if want:
+ for w in want:
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ cmds.extend(self.del_attribs(obj_in_have))
+ else:
+ for h in have:
+ cmds.extend(self.del_attribs(flatten_dict(h)))
+ return cmds
+
+ def del_attribs(self, obj):
+ if not obj or len(obj.keys()) == 1:
+ return []
+ cmds = []
+ # 'bfd' and 'bfd echo' are enabled by default so the handling is
+ # counter-intuitive; we are enabling them to remove them. The end result
+ # is that they are removed from the interface config on the device.
+ if "bfd" in obj and "disable" in obj["bfd"]:
+ cmds.append("bfd")
+ if "echo" in obj and "disable" in obj["echo"]:
+ cmds.append("bfd echo")
+ if cmds:
+ cmds.insert(0, "interface " + obj["name"])
+ return cmds
+
+ def set_none_vals_to_defaults(self, want):
+ # Set dict None values to default states
+ if "bfd" in want and want["bfd"] is None:
+ want["bfd"] = "enable"
+ if "echo" in want and want["echo"] is None:
+ want["echo"] = "enable"
+ return want
+
+ def diff_of_dicts(self, want, obj_in_have):
+ diff = set(want.items()) - set(obj_in_have.items())
+ diff = dict(diff)
+ if diff and want["name"] == obj_in_have["name"]:
+ diff.update({"name": want["name"]})
+ return diff
+
+ def add_commands(self, want):
+ if not want:
+ return []
+ cmds = []
+ if "bfd" in want and want["bfd"] is not None:
+ cmd = "bfd" if want["bfd"] == "enable" else "no bfd"
+ cmds.append(cmd)
+ if "echo" in want and want["echo"] is not None:
+ cmd = "bfd echo" if want["echo"] == "enable" else "no bfd echo"
+ cmds.append(cmd)
+
+ if cmds:
+ cmds.insert(0, "interface " + want["name"])
+ return cmds
+
+ def set_commands(self, want, have):
+ cmds = []
+ obj_in_have = flatten_dict(search_obj_in_list(want["name"], have, "name"))
+ if not obj_in_have:
+ cmds = self.add_commands(want)
+ else:
+ diff = self.diff_of_dicts(want, obj_in_have)
+ cmds = self.add_commands(diff)
+ return cmds
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/bgp_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/bgp_address_family.py
new file mode 100644
index 00000000..4443039c
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_address_family/bgp_address_family.py
@@ -0,0 +1,253 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_bgp_address_family config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ remove_empties,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_address_family import (
+ Bgp_address_familyTemplate,
+)
+
+
+class Bgp_address_family(ResourceModule):
+ """
+ The nxos_bgp_address_family config class
+ """
+
+ def __init__(self, module):
+ super(Bgp_address_family, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="bgp_address_family",
+ tmplt=Bgp_address_familyTemplate(),
+ )
+ self.parsers = [
+ "additional_paths.install_backup",
+ "additional_paths.receive",
+ "additional_paths.selection.route_map",
+ "additional_paths.send",
+ "advertise_l2vpn_evpn",
+ "advertise_pip",
+ "advertise_system_mac",
+ "allow_vni_in_ethertag",
+ "client_to_client.no_reflection",
+ "dampen_igp_metric",
+ "dampening",
+ "default_information.originate",
+ "default_metric",
+ "distance",
+ "export_gateway_ip",
+ "maximum_paths.parallel_paths",
+ "maximum_paths.ibgp.parallel_paths",
+ "maximum_paths.eibgp.parallel_paths",
+ "maximum_paths.local.parallel_paths",
+ "maximum_paths.mixed.parallel_paths",
+ "nexthop.route_map",
+ "nexthop.trigger_delay",
+ "retain.route_target.retain_all",
+ "retain.route_target.route_map",
+ "suppress_inactive",
+ "table_map",
+ "timers.bestpath_defer",
+ "wait_igp_convergence",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = deepcopy(self.want)
+ haved = deepcopy(self.have)
+
+ self._bgp_af_list_to_dict(wantd)
+ self._bgp_af_list_to_dict(haved)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ wantd = remove_empties(wantd)
+ haved = remove_empties(haved)
+
+ have_af = haved.get("address_family", {})
+ want_af = wantd.get("address_family", {})
+ wvrfs = wantd.get("vrfs", {})
+ hvrfs = haved.get("vrfs", {})
+
+ # if state is overridden or deleted, remove superfluos config
+ if self.state in ["deleted", "overridden"]:
+ if (haved and haved["as_number"] == wantd.get("as_number")) or not wantd:
+ remove = True if self.state == "deleted" else False
+ purge = True if not wantd else False
+ self._remove_af(want_af, have_af, remove=remove, purge=purge)
+
+ for k, hvrf in iteritems(hvrfs):
+ wvrf = wvrfs.get(k, {})
+ self._remove_af(wvrf, hvrf, vrf=k, remove=remove, purge=purge)
+
+ if self.state in ["merged", "replaced", "overridden", "rendered"]:
+ for k, want in iteritems(want_af):
+ self._compare(want=want, have=have_af.pop(k, {}))
+
+ # handle vrf->af
+ for wk, wvrf in iteritems(wvrfs):
+ cur_ptr = len(self.commands)
+
+ hvrf = hvrfs.pop(wk, {})
+ for k, want in iteritems(wvrf):
+ self._compare(want=want, have=hvrf.pop(k, {}))
+
+ # add VRF command at correct position once
+ if cur_ptr != len(self.commands):
+ self.commands.insert(cur_ptr, "vrf {0}".format(wk))
+
+ if self.commands:
+ self.commands.insert(0, "router bgp {as_number}".format(**haved or wantd))
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Bgp_address_family network resource.
+ """
+ begin = len(self.commands)
+
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._compare_lists(want=want, have=have)
+
+ if len(self.commands) != begin or (not have and want):
+ self.commands.insert(
+ begin,
+ self._tmplt.render(want or have, "address_family", False),
+ )
+
+ def _compare_lists(self, want, have):
+ for attrib in [
+ "aggregate_address",
+ "inject_map",
+ "networks",
+ "redistribute",
+ ]:
+ wdict = want.get(attrib, {})
+ hdict = have.get(attrib, {})
+ for key, entry in iteritems(wdict):
+ if entry != hdict.pop(key, {}):
+ self.addcmd(entry, attrib.format(attrib), False)
+
+ # remove remaining items in have for replaced
+ for entry in hdict.values():
+ self.addcmd(entry, attrib.format(attrib), True)
+
+ def _bgp_af_list_to_dict(self, entry):
+ def _build_key(data):
+ """Build primary key for each dict
+
+ :params x: dictionary
+ :returns: primary key as tuple
+ """
+ # afi should always be present
+ # safi and vrf are optional
+ # a combination of these 3 uniquely
+ # identifies an AF context
+ afi = "afi_" + data["afi"]
+ safi = "safi_" + data.get("safi", "")
+ vrf = "vrf_" + data.get("vrf", "")
+
+ return (afi, safi, vrf)
+
+ # transform parameters which are
+ # list of dicts to dict of dicts
+ for item in entry.get("address_family", []):
+ item["aggregate_address"] = {x["prefix"]: x for x in item.get("aggregate_address", [])}
+ item["inject_map"] = {
+ (x["route_map"], x["exist_map"]): x for x in item.get("inject_map", [])
+ }
+ item["networks"] = {x["prefix"]: x for x in item.get("networks", [])}
+ item["redistribute"] = {
+ (x.get("id"), x["protocol"]): x for x in item.get("redistribute", [])
+ }
+
+ # transform all entries under
+ # config->address_family to dict of dicts
+ af = {_build_key(x): x for x in entry.get("address_family", [])}
+
+ temp = {}
+ entry["vrfs"] = {}
+ entry["address_family"] = {}
+
+ # group AFs by VRFs
+ # vrf_ denotes global AFs
+ for k in af.keys():
+ for x in k:
+ if x.startswith("vrf_"):
+ if x not in temp:
+ temp[x] = {}
+ temp[x][k] = af[k]
+
+ for k in temp.keys():
+ if k == "vrf_":
+ # populate global AFs
+ entry["address_family"][k] = temp[k]
+ else:
+ # populate VRF AFs
+ entry["vrfs"][k.replace("vrf_", "", 1)] = temp[k]
+
+ entry["address_family"] = entry["address_family"].get("vrf_", {})
+
+ # final structure: https://gist.github.com/NilashishC/628dae5fe39a4908e87c9e833bfbe57d
+
+ def _remove_af(self, want_af, have_af, vrf=None, remove=False, purge=False):
+ cur_ptr = len(self.commands)
+ for k, v in iteritems(have_af):
+ # first conditional is for deleted with config provided
+ # second conditional is for overridden
+ # third condition is for deleted with empty config
+ if any(
+ (
+ (remove and k in want_af),
+ (not remove and k not in want_af),
+ purge,
+ ),
+ ):
+ self.addcmd(v, "address_family", True)
+ if cur_ptr < len(self.commands) and vrf:
+ self.commands.insert(cur_ptr, "vrf {0}".format(vrf))
+ self.commands.append("exit")
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/bgp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/bgp_global.py
new file mode 100644
index 00000000..edb6e59b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_global/bgp_global.py
@@ -0,0 +1,410 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_bgp_global config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_global import (
+ Bgp_globalTemplate,
+)
+
+
+class Bgp_global(ResourceModule):
+ """
+ The nxos_bgp_global config class
+ """
+
+ def __init__(self, module):
+ super(Bgp_global, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="bgp_global",
+ tmplt=Bgp_globalTemplate(),
+ )
+ # VRF parsers = 29
+ self.parsers = [
+ "allocate_index",
+ "affinity_group.group_id",
+ "bestpath.always_compare_med",
+ "bestpath.as_path.ignore",
+ "bestpath.as_path.multipath_relax",
+ "bestpath.compare_neighborid",
+ "bestpath.compare_routerid",
+ "bestpath.cost_community_ignore",
+ "bestpath.igp_metric_ignore",
+ "bestpath.med.confed",
+ "bestpath.med.missing_as_worst",
+ "bestpath.med.non_deterministic",
+ "cluster_id",
+ "local_as",
+ "confederation.identifier",
+ "graceful_restart",
+ "graceful_restart.restart_time",
+ "graceful_restart.stalepath_time",
+ "graceful_restart.helper",
+ "log_neighbor_changes",
+ "maxas_limit",
+ "neighbor_down.fib_accelerate",
+ "reconnect_interval",
+ "router_id",
+ "timers.bestpath_limit",
+ "timers.bgp",
+ "timers.prefix_peer_timeout",
+ "timers.prefix_peer_wait",
+ # end VRF parsers
+ "disable_policy_batching",
+ "disable_policy_batching.ipv4.prefix_list",
+ "disable_policy_batching.ipv6.prefix_list",
+ "disable_policy_batching.nexthop",
+ "dynamic_med_interval",
+ "enforce_first_as",
+ "enhanced_error",
+ "fast_external_fallover",
+ "flush_routes",
+ "graceful_shutdown.activate",
+ "graceful_shutdown.aware",
+ "isolate",
+ "nexthop.suppress_default_resolution",
+ "shutdown",
+ "suppress_fib_pending",
+ "fabric_soo",
+ "rd",
+ ]
+ self._af_data = {}
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ # we fail early if state is merged or
+ # replaced and want ASN != have ASN
+ if self.state in ["merged", "replaced"]:
+ w_asn = self.want.get("as_number")
+ h_asn = self.have.get("as_number")
+
+ if h_asn and w_asn != h_asn:
+ self._module.fail_json(
+ msg="BGP is already configured with ASN {0}. "
+ "Please remove it with state purged before "
+ "configuring new ASN".format(h_asn),
+ )
+
+ if self.state in ["deleted", "replaced"]:
+ self._build_af_data()
+
+ for entry in self.want, self.have:
+ self._bgp_list_to_dict(entry)
+
+ # if state is deleted, clean up global params
+ if self.state == "deleted":
+ if not self.want or (self.have.get("as_number") == self.want.get("as_number")):
+ self._compare(want={}, have=self.have)
+
+ elif self.state == "purged":
+ if not self.want or (self.have.get("as_number") == self.want.get("as_number")):
+ self.addcmd(self.have or {}, "as_number", True)
+
+ else:
+ wantd = self.want
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(self.have, self.want)
+
+ self._compare(want=wantd, have=self.have)
+
+ def _compare(self, want, have, vrf=None):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Bgp_global network resource.
+ """
+ begin = len(self.commands)
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._compare_confederation_peers(want, have)
+ self._compare_neighbors(want, have, vrf=vrf)
+ self._vrfs_compare(want=want, have=have)
+
+ if len(self.commands) != begin or (not have and want):
+ self.commands.insert(
+ begin,
+ self._tmplt.render(
+ want or have,
+ "vrf" if "vrf" in (want.keys() or have.keys()) else "as_number",
+ False,
+ ),
+ )
+
+ def _compare_confederation_peers(self, want, have):
+ """Custom handling of confederation.peers option
+
+ :params want: the want BGP dictionary
+ :params have: the have BGP dictionary
+ """
+ w_cpeers = want.get("confederation", {}).get("peers", [])
+ h_cpeers = have.get("confederation", {}).get("peers", [])
+
+ if set(w_cpeers) != set(h_cpeers):
+ if self.state in ["replaced", "deleted"]:
+ # if there are peers already configured
+ # we need to remove those before we pass
+ # the new ones otherwise the device appends
+ # them to the existing ones
+ if h_cpeers:
+ self.addcmd(have, "confederation.peers", True)
+ if w_cpeers:
+ self.addcmd(want, "confederation.peers", False)
+
+ def _compare_neighbors(self, want, have, vrf=None):
+ """Custom handling of neighbors option
+
+ :params want: the want BGP dictionary
+ :params have: the have BGP dictionary
+ """
+ nbr_parsers = [
+ "remote_as",
+ "bfd",
+ "bfd.multihop.interval",
+ "neighbor_affinity_group.group_id",
+ "bmp_activate_server",
+ "capability",
+ "description",
+ "disable_connected_check",
+ "dont_capability_negotiate",
+ "dscp",
+ "dynamic_capability",
+ "ebgp_multihop",
+ "graceful_shutdown",
+ "inherit.peer",
+ "inherit.peer_session",
+ "local_as",
+ "log_neighbor_changes",
+ "low_memory",
+ "password",
+ "peer_type",
+ "remove_private_as",
+ "shutdown",
+ "timers",
+ "transport",
+ "ttl_security",
+ "update_source",
+ ]
+ wnbrs = want.get("neighbors", {})
+ hnbrs = have.get("neighbors", {})
+
+ # neighbors have separate contexts in NX-OS
+ for name, entry in iteritems(wnbrs):
+ begin = len(self.commands)
+ have_nbr = hnbrs.pop(name, {})
+
+ self.compare(parsers=nbr_parsers, want=entry, have=have_nbr)
+ self._compare_path_attribute(entry, have_nbr)
+
+ if len(self.commands) != begin:
+ self.commands.insert(begin, self._tmplt.render(entry, "neighbor_address", False))
+
+ # cleanup remaining neighbors
+ # but do not negate it entirely
+ # instead remove only those attributes
+ # that this module manages
+ for name, entry in iteritems(hnbrs):
+ if self._has_af(vrf=vrf, neighbor=name):
+ self._module.fail_json(
+ msg="Neighbor {0} has address-family configurations. "
+ "Please use the nxos_bgp_neighbor_af module to remove those first.".format(
+ name,
+ ),
+ )
+ else:
+ self.addcmd(entry, "neighbor_address", True)
+
+ def _compare_path_attribute(self, want, have):
+ """Custom handling of neighbor path_attribute
+ option.
+
+ :params want: the want neighbor dictionary
+ :params have: the have neighbor dictionary
+ """
+ w_p_attr = want.get("path_attribute", {})
+ h_p_attr = have.get("path_attribute", {})
+
+ for wkey, wentry in iteritems(w_p_attr):
+ if wentry != h_p_attr.pop(wkey, {}):
+ self.addcmd(wentry, "path_attribute", False)
+
+ # remove remaining items in have for replaced
+ for hkey, hentry in iteritems(h_p_attr):
+ self.addcmd(hentry, "path_attribute", True)
+
+ def _vrfs_compare(self, want, have):
+ """Custom handling of VRFs option
+
+ :params want: the want BGP dictionary
+ :params have: the have BGP dictionary
+ """
+ wvrfs = want.get("vrfs", {})
+ hvrfs = have.get("vrfs", {})
+ for name, entry in iteritems(wvrfs):
+ self._compare(want=entry, have=hvrfs.pop(name, {}), vrf=name)
+ # cleanup remaining VRFs
+ # but do not negate it entirely
+ # instead remove only those attributes
+ # that this module manages
+ for name, entry in iteritems(hvrfs):
+ if self._has_af(vrf=name):
+ self._module.fail_json(
+ msg="VRF {0} has address-family configurations. "
+ "Please use the nxos_bgp_af module to remove those first.".format(name),
+ )
+ else:
+ self.addcmd(entry, "vrf", True)
+
+ def _bgp_list_to_dict(self, entry):
+ """Convert list of items to dict of items
+ for efficient diff calculation.
+
+ :params entry: data dictionary
+ """
+
+ def _build_key(x):
+ """Build primary key for path_attribute
+ option.
+ :params x: path_attribute dictionary
+ :returns: primary key as tuple
+ """
+ key_1 = "start_{0}".format(x.get("range", {}).get("start", ""))
+ key_2 = "end_{0}".format(x.get("range", {}).get("end", ""))
+ key_3 = "type_{0}".format(x.get("type", ""))
+ key_4 = x["action"]
+
+ return (key_1, key_2, key_3, key_4)
+
+ if "neighbors" in entry:
+ for x in entry["neighbors"]:
+ if "path_attribute" in x:
+ x["path_attribute"] = {
+ _build_key(item): item for item in x.get("path_attribute", [])
+ }
+
+ entry["neighbors"] = {x["neighbor_address"]: x for x in entry.get("neighbors", [])}
+
+ if "vrfs" in entry:
+ entry["vrfs"] = {x["vrf"]: x for x in entry.get("vrfs", [])}
+ for _k, vrf in iteritems(entry["vrfs"]):
+ self._bgp_list_to_dict(vrf)
+
+ def _get_config(self):
+ return self._connection.get("show running-config | section '^router bgp'")
+
+ def _build_af_data(self):
+ """Build a dictionary with AF related information
+ from fetched BGP config.
+ _af_data = {
+ gbl_data = {'192.168.1.100', '192.168.1.101'},
+ vrf_data = {
+ 'vrf_1': {
+ 'has_af': True,
+ 'nbrs': {'192.0.1.1', '192.8.1.1'}
+ },
+ 'vrf_2': {
+ 'has_af': False,
+ 'nbrs': set()
+ }
+ }
+ }
+ """
+ data = self._get_config().split("\n")
+ cur_nbr = None
+ cur_vrf = None
+ gbl_data = set()
+ vrf_data = {}
+
+ for x in data:
+ if x.strip().startswith("vrf"):
+ cur_nbr = None
+ cur_vrf = x.split(" ")[-1]
+ vrf_data[cur_vrf] = {"nbrs": set(), "has_af": False}
+
+ elif x.strip().startswith("neighbor"):
+ cur_nbr = x.split(" ")[-1]
+
+ elif x.strip().startswith("address-family"):
+ if cur_nbr:
+ if cur_vrf:
+ vrf_data[cur_vrf]["nbrs"].add(cur_nbr)
+ else:
+ gbl_data.add(cur_nbr)
+ else:
+ if cur_vrf:
+ vrf_data[cur_vrf]["has_af"] = True
+
+ self._af_data["global"] = gbl_data
+ self._af_data["vrf"] = vrf_data
+
+ def _has_af(self, vrf=None, neighbor=None):
+ """Determine if the given vrf + neighbor
+ combination has AF configurations.
+
+ :params vrf: vrf name
+ :params neighbor: neighbor name
+ :returns: bool
+ """
+ has_af = False
+
+ if self._af_data:
+ vrf_af_data = self._af_data.get("vrf", {})
+ global_af_data = self._af_data.get("global", set())
+ if vrf:
+ vrf_nbr_has_af = vrf_af_data.get(vrf, {}).get("nbrs", set())
+ vrf_has_af = vrf_af_data.get(vrf, {}).get("has_af", False)
+ if neighbor and neighbor in vrf_nbr_has_af:
+ # we are inspecting neighbor within a VRF
+ # if the given neighbor has AF we return True
+ has_af = True
+ else:
+ # we are inspecting VRF as a whole
+ # if there is at least one neighbor
+ # with AF or VRF has AF itself return True
+ if vrf_nbr_has_af or vrf_has_af:
+ has_af = True
+ else:
+ # we are inspecting top level neighbors
+ # if the given neighbor has AF we return True
+ if neighbor and neighbor in global_af_data:
+ has_af = True
+
+ return has_af
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/bgp_neighbor_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/bgp_neighbor_address_family.py
new file mode 100644
index 00000000..96902987
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/bgp_neighbor_address_family/bgp_neighbor_address_family.py
@@ -0,0 +1,232 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_bgp_neighbor_address_family config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_neighbor_address_family import (
+ Bgp_neighbor_address_familyTemplate,
+)
+
+
+class Bgp_neighbor_address_family(ResourceModule):
+ """
+ The nxos_bgp_neighbor_address_family config class
+ """
+
+ def __init__(self, module):
+ super(Bgp_neighbor_address_family, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="bgp_neighbor_address_family",
+ tmplt=Bgp_neighbor_address_familyTemplate(),
+ )
+ self.parsers = [
+ "advertise_map.exist_map",
+ "advertise_map.non_exist_map",
+ "advertisement_interval",
+ "allowas_in",
+ "as_override",
+ "capability.additional_paths.receive",
+ "capability.additional_paths.send",
+ "default_originate",
+ "disable_peer_as_check",
+ "filter_list.inbound",
+ "filter_list.outbound",
+ "inherit",
+ "maximum_prefix",
+ "next_hop_self",
+ "next_hop_third_party",
+ "prefix_list.inbound",
+ "prefix_list.outbound",
+ "rewrite_evpn_rt_asn",
+ "route_map.inbound",
+ "route_map.outbound",
+ "route_reflector_client",
+ "send_community.extended",
+ "send_community.standard",
+ "soft_reconfiguration_inbound",
+ "soo",
+ "suppress_inactive",
+ "unsuppress_map",
+ "weight",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = deepcopy(self.want)
+ haved = deepcopy(self.have)
+
+ for entry in wantd, haved:
+ self._bgp_list_to_dict(entry)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to elements to delete
+ if self.state == "deleted":
+ if wantd:
+ to_del = {
+ "neighbors": self._set_to_delete(haved, wantd),
+ "vrfs": {},
+ }
+
+ for k, hvrf in iteritems(haved.get("vrfs", {})):
+ wvrf = wantd.get("vrfs", {}).get(k, {})
+ to_del["vrfs"][k] = {
+ "neighbors": self._set_to_delete(hvrf, wvrf),
+ "vrf": k,
+ }
+ haved.update(to_del)
+
+ wantd = {}
+
+ self._compare(want=wantd, have=haved)
+
+ if self.commands:
+ self.commands.insert(0, "router bgp {as_number}".format(**haved or wantd))
+
+ def _compare(self, want, have, vrf=""):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Bgp_neighbor_address_family network resource.
+ """
+ w_nbrs = want.get("neighbors", {})
+ h_nbrs = have.get("neighbors", {})
+
+ if vrf:
+ begin_vrf = len(self.commands)
+
+ for k, w_nbr in iteritems(w_nbrs):
+ begin = len(self.commands)
+ h_nbr = h_nbrs.pop(k, {})
+ want_afs = w_nbr.get("address_family", {})
+ have_afs = h_nbr.get("address_family", {})
+
+ for k, want_af in iteritems(want_afs):
+ begin_af = len(self.commands)
+ have_af = have_afs.pop(k, {})
+
+ # swap `both` and `set` for idempotence
+ if "send_community" in want_af:
+ if want_af["send_community"].get("both"):
+ want_af["send_community"] = {
+ "extended": True,
+ "standard": True,
+ }
+ elif want_af["send_community"].get("set"):
+ want_af["send_community"].update({"standard": True})
+
+ self.compare(parsers=self.parsers, want=want_af, have=have_af)
+
+ if len(self.commands) != begin_af or (not have_af and want_af):
+ self.commands.insert(
+ begin_af,
+ self._tmplt.render(want_af, "address_family", False),
+ )
+
+ # remove remaining items in have for replaced
+ for k, have_af in iteritems(have_afs):
+ self.addcmd(have_af, "address_family", True)
+
+ if len(self.commands) != begin:
+ self.commands.insert(begin, "neighbor {0}".format(w_nbr["neighbor_address"]))
+
+ if self.state in ["overridden", "deleted"]:
+ for k, h_nbr in iteritems(h_nbrs):
+ begin = len(self.commands)
+ if not w_nbrs.pop(k, {}):
+ have_afs = h_nbr.get("address_family", {})
+ for k, have_af in iteritems(have_afs):
+ self.addcmd(have_af, "address_family", True)
+ if len(self.commands) != begin:
+ self.commands.insert(begin, "neighbor {0}".format(h_nbr["neighbor_address"]))
+
+ if vrf:
+ if len(self.commands) != begin_vrf:
+ self.commands.insert(begin_vrf, "vrf {0}".format(vrf))
+ else:
+ self._vrfs_compare(want, have)
+
+ def _vrfs_compare(self, want, have):
+ wvrfs = want.get("vrfs", {})
+ hvrfs = have.get("vrfs", {})
+ for k, wvrf in iteritems(wvrfs):
+ h_vrf = hvrfs.pop(k, {})
+ self._compare(want=wvrf, have=h_vrf, vrf=k)
+ # remove remaining items in have
+ for k, h_vrf in iteritems(hvrfs):
+ self._compare(want={}, have=h_vrf, vrf=k)
+
+ def _bgp_list_to_dict(self, data):
+ if "neighbors" in data:
+ for nbr in data["neighbors"]:
+ if "address_family" in nbr:
+ nbr["address_family"] = {
+ (x["afi"], x.get("safi")): x for x in nbr["address_family"]
+ }
+ data["neighbors"] = {x["neighbor_address"]: x for x in data["neighbors"]}
+
+ if "vrfs" in data:
+ for vrf in data["vrfs"]:
+ self._bgp_list_to_dict(vrf)
+ data["vrfs"] = {x["vrf"]: x for x in data["vrfs"]}
+
+ def _set_to_delete(self, haved, wantd):
+ neighbors = {}
+ h_nbrs = haved.get("neighbors", {})
+ w_nbrs = wantd.get("neighbors", {})
+
+ for k, h_nbr in iteritems(h_nbrs):
+ w_nbr = w_nbrs.pop(k, {})
+ if w_nbr:
+ neighbors[k] = h_nbr
+ afs_to_del = {}
+ h_addrs = h_nbr.get("address_family", {})
+ w_addrs = w_nbr.get("address_family", {})
+ for af, h_addr in iteritems(h_addrs):
+ if af in w_addrs:
+ afs_to_del[af] = h_addr
+ neighbors[k]["address_family"] = afs_to_del
+
+ return neighbors
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/hostname.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/hostname.py
new file mode 100644
index 00000000..42ed0694
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hostname/hostname.py
@@ -0,0 +1,75 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_hostname config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.hostname import (
+ HostnameTemplate,
+)
+
+
+class Hostname(ResourceModule):
+ """
+ The nxos_hostname config class
+ """
+
+ def __init__(self, module):
+ super(Hostname, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="hostname",
+ tmplt=HostnameTemplate(),
+ )
+ self.parsers = ["hostname"]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ want = self.want
+ have = self.have
+
+ if self.state == "deleted":
+ want = {}
+
+ self._compare(want, have)
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Bgp_global network resource.
+ """
+ self.compare(parsers=self.parsers, want=want, have=have)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/hsrp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/hsrp_interfaces.py
new file mode 100644
index 00000000..757505e7
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/hsrp_interfaces/hsrp_interfaces.py
@@ -0,0 +1,286 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos hsrp_interfaces class
+This class creates a command set to bring the current device configuration
+to a desired end-state. The command set is based on a comparison of the
+current configuration (as dict) and the provided configuration (as dict).
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ flatten_dict,
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Hsrp_interfaces(ConfigBase):
+ """
+ The nxos_hsrp_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["hsrp_interfaces"]
+
+ def __init__(self, module):
+ super(Hsrp_interfaces, self).__init__(module)
+
+ def get_hsrp_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ hsrp_interfaces_facts = facts["ansible_network_resources"].get("hsrp_interfaces", [])
+ return hsrp_interfaces_facts
+
+ def edit_config(self, commands):
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = []
+ commands = []
+
+ if self.state in self.ACTION_STATES:
+ existing_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
+ else:
+ existing_hsrp_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_hsrp_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_hsrp_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_hsrp_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_hsrp_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_hsrp_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_hsrp_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = []
+ if config:
+ for w in config:
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(w)
+ have = existing_hsrp_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ # check for 'config' keyword in play
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ cmds = list()
+ if state == "overridden":
+ cmds.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ cmds.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ cmds.extend(self._state_merged(flatten_dict(w), have))
+ elif state == "replaced":
+ cmds.extend(self._state_replaced(flatten_dict(w), have))
+ return cmds
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ cmds = []
+ obj_in_have = search_obj_in_list(want["name"], have, "name")
+ if obj_in_have:
+ diff = dict_diff(want, obj_in_have)
+ else:
+ diff = want
+ merged_cmds = self.set_commands(want, have)
+ if "name" not in diff:
+ diff["name"] = want["name"]
+
+ replaced_cmds = []
+ if obj_in_have:
+ replaced_cmds = self.del_attribs(diff)
+ if replaced_cmds or merged_cmds:
+ for cmd in set(replaced_cmds).intersection(set(merged_cmds)):
+ merged_cmds.remove(cmd)
+ cmds.extend(replaced_cmds)
+ cmds.extend(merged_cmds)
+ return cmds
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ cmds = []
+ for h in have:
+ # Check existing states, set to default if not in want or different than want
+ h = flatten_dict(h)
+ obj_in_want = search_obj_in_list(h["name"], want, "name")
+ if obj_in_want:
+ # Let the 'want' loop handle all vals for this interface
+ continue
+ cmds.extend(self.del_attribs(h))
+ for w in want:
+ # Update any want attrs if needed. The overridden state considers
+ # the play as the source of truth for the entire device, therefore
+ # set any unspecified attrs to their default state.
+ w = self.set_none_vals_to_defaults(flatten_dict(w))
+ cmds.extend(self.set_commands(w, have))
+ return cmds
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ if not (want or have):
+ return []
+ cmds = []
+ if want:
+ for w in want:
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ cmds.extend(self.del_attribs(obj_in_have))
+ else:
+ for h in have:
+ cmds.extend(self.del_attribs(flatten_dict(h)))
+ return cmds
+
+ def del_attribs(self, obj):
+ if not obj or len(obj.keys()) == 1:
+ return []
+ cmds = []
+ if "bfd" in obj:
+ cmds.append("no hsrp bfd")
+ if cmds:
+ cmds.insert(0, "interface " + obj["name"])
+ return cmds
+
+ def set_none_vals_to_defaults(self, want):
+ # Set dict None values to default states
+ if "bfd" in want and want["bfd"] is None:
+ want["bfd"] = "disable"
+ return want
+
+ def diff_of_dicts(self, want, obj_in_have):
+ diff = set(want.items()) - set(obj_in_have.items())
+ diff = dict(diff)
+ if diff and want["name"] == obj_in_have["name"]:
+ diff.update({"name": want["name"]})
+ return diff
+
+ def add_commands(self, want, obj_in_have):
+ if not want:
+ return []
+ cmds = []
+ if "bfd" in want and want["bfd"] is not None:
+ if want["bfd"] == "enable":
+ cmd = "hsrp bfd"
+ cmds.append(cmd)
+ elif want["bfd"] == "disable" and obj_in_have and obj_in_have.get("bfd") == "enable":
+ cmd = "no hsrp bfd"
+ cmds.append(cmd)
+
+ if cmds:
+ cmds.insert(0, "interface " + want["name"])
+ return cmds
+
+ def set_commands(self, want, have):
+ cmds = []
+ obj_in_have = search_obj_in_list(want["name"], have, "name")
+ if not obj_in_have:
+ cmds = self.add_commands(want, obj_in_have)
+ else:
+ diff = self.diff_of_dicts(want, obj_in_have)
+ cmds = self.add_commands(diff, obj_in_have)
+ return cmds
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/interfaces.py
new file mode 100644
index 00000000..32c5f6fe
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/interfaces/interfaces.py
@@ -0,0 +1,492 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
+ default_intf_enabled,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Interfaces(ConfigBase):
+ """
+ The nxos_interfaces class
+ """
+
+ gather_subset = ["min"]
+
+ gather_network_resources = ["interfaces"]
+
+ exclude_params = ["description", "mtu", "speed", "duplex"]
+
+ def __init__(self, module):
+ super(Interfaces, self).__init__(module)
+
+ def get_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :data: Mocked running-config data for state `parsed`
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ self.facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ interfaces_facts = self.facts["ansible_network_resources"].get("interfaces")
+
+ return interfaces_facts
+
+ def get_platform(self):
+ """Wrapper method for getting platform info
+ This method exists solely to allow the unit test framework to mock calls.
+ """
+ return self.facts.get("ansible_net_platform", "")
+
+ def get_system_defaults(self):
+ """Wrapper method for `_connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.get("show running-config all | incl 'system default switchport'")
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = []
+ warnings = []
+
+ if self.state in self.ACTION_STATES:
+ existing_interfaces_facts = self.get_interfaces_facts()
+ else:
+ existing_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES:
+ self.intf_defs = self.render_interface_defaults(
+ self.get_system_defaults(),
+ existing_interfaces_facts,
+ )
+ commands.extend(self.set_config(existing_interfaces_facts))
+
+ if self.state == "rendered":
+ # Hardcode the system defaults for "rendered"
+ # This can be made a configurable option in the future
+ self.intf_defs = {
+ "sysdefs": {
+ "L2_enabled": False,
+ "L3_enabled": False,
+ "mode": "layer3",
+ },
+ }
+ commands.extend(self.set_config(existing_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_interfaces_facts = self.get_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params.get("config")
+ want = []
+ if config:
+ for w in config:
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(remove_empties(w))
+ have = deepcopy(existing_interfaces_facts)
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ commands = list()
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ elif state == "purged":
+ commands.extend(self._state_purged(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(w, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_replaced(self, w, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ name = w["name"]
+ obj_in_have = search_obj_in_list(name, have, "name")
+ if obj_in_have:
+ # If 'w' does not specify mode then intf may need to change to its
+ # default mode, however default mode may depend on sysdef.
+ if not w.get("mode") and re.search("Ethernet|port-channel", name):
+ sysdefs = self.intf_defs["sysdefs"]
+ sysdef_mode = sysdefs["mode"]
+ if obj_in_have.get("mode") != sysdef_mode:
+ w["mode"] = sysdef_mode
+ diff = dict_diff(w, obj_in_have)
+ else:
+ diff = w
+
+ merged_commands = self.set_commands(w, have)
+ # merged_commands:
+ # - These commands are changes specified by the playbook.
+ # - merged_commands apply to both existing and new objects
+ # replaced_commands:
+ # - These are the unspecified commands, used to reset any params
+ # that are not already set to default states
+ # - replaced_commands should only be used on 'have' objects
+ # (interfaces that already exist)
+ if obj_in_have:
+ if "name" not in diff:
+ diff["name"] = name
+ wkeys = w.keys()
+ dkeys = diff.keys()
+ for k in wkeys:
+ if k in self.exclude_params and k in dkeys:
+ del diff[k]
+ replaced_commands = self.del_attribs(diff)
+ cmds = set(replaced_commands).intersection(set(merged_commands))
+ for cmd in cmds:
+ merged_commands.remove(cmd)
+ commands.extend(replaced_commands)
+
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ # overridden is the same as replaced behavior except for the scope.
+ cmds = []
+ existing_interfaces = []
+ for h in have:
+ existing_interfaces.append(h["name"])
+ obj_in_want = search_obj_in_list(h["name"], want, "name")
+ if obj_in_want:
+ if h != obj_in_want:
+ replaced_cmds = self._state_replaced(obj_in_want, [h])
+ if replaced_cmds:
+ cmds.extend(replaced_cmds)
+ else:
+ cmds.extend(self.del_attribs(h))
+
+ for w in want:
+ if w["name"] not in existing_interfaces:
+ # This is an object that was excluded from the 'have' list
+ # because all of its params are currently set to default states
+ # -OR- it's a new object that does not exist on the device yet.
+ cmds.extend(self.add_commands(w))
+ return cmds
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(w, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ commands.extend(self.del_attribs(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_attribs(h))
+ return commands
+
+ def _state_purged(self, want, have):
+ """The command generator when state is purged
+
+ :rtype: A list
+ :returns: the commands necessary to purge interfaces from running
+ configuration
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ if obj_in_have:
+ commands.append("no interface {0}".format(w["name"]))
+ return commands
+
+ def default_enabled(self, want=None, have=None, action=""):
+ # 'enabled' default state depends on the interface type and L2 state.
+ # Note that the current default could change when changing L2/L3 modes.
+ if self.state == "rendered":
+ # For "rendered", we always assume that
+ # the default enabled state is False
+ return False
+ if want is None:
+ want = {}
+ if have is None:
+ have = {}
+ name = have.get("name")
+ if name is None:
+ return None
+
+ sysdefs = self.intf_defs["sysdefs"]
+ sysdef_mode = sysdefs["mode"]
+
+ # Get the default enabled state for this interface. This was collected
+ # during Facts gathering.
+ intf_def_enabled = self.intf_defs.get(name)
+
+ have_mode = have.get("mode", sysdef_mode)
+ if action == "delete" and not want:
+ want_mode = sysdef_mode
+ else:
+ want_mode = want.get("mode", have_mode)
+ if (
+ (want_mode and have_mode) is None
+ or (want_mode != have_mode)
+ or intf_def_enabled is None
+ ):
+ # L2-L3 is changing or this is a new virtual intf. Get new default.
+ intf_def_enabled = default_intf_enabled(name=name, sysdefs=sysdefs, mode=want_mode)
+ return intf_def_enabled
+
+ def del_attribs(self, obj):
+ commands = []
+ if not obj or len(obj.keys()) == 1:
+ return commands
+ # mode/switchport changes should occur before other changes
+ sysdef_mode = self.intf_defs["sysdefs"]["mode"]
+ if "mode" in obj and obj["mode"] != sysdef_mode:
+ no_cmd = "no " if sysdef_mode == "layer3" else ""
+ commands.append(no_cmd + "switchport")
+ if "description" in obj:
+ commands.append("no description")
+ if "speed" in obj:
+ commands.append("no speed")
+ if "duplex" in obj:
+ commands.append("no duplex")
+ if "enabled" in obj:
+ sysdef_enabled = self.default_enabled(have=obj, action="delete")
+ if obj["enabled"] is False and sysdef_enabled is True:
+ commands.append("no shutdown")
+ elif obj["enabled"] is True and sysdef_enabled is False:
+ commands.append("shutdown")
+ if "mtu" in obj:
+ commands.append("no mtu")
+ if "ip_forward" in obj and obj["ip_forward"] is True:
+ commands.append("no ip forward")
+ if (
+ "fabric_forwarding_anycast_gateway" in obj
+ and obj["fabric_forwarding_anycast_gateway"] is True
+ ):
+ commands.append("no fabric forwarding mode anycast-gateway")
+ if commands:
+ commands.insert(0, "interface " + obj["name"])
+
+ return commands
+
+ def diff_of_dicts(self, w, obj):
+ diff = set(w.items()) - set(obj.items())
+ diff = dict(diff)
+ if diff and w["name"] == obj["name"]:
+ diff.update({"name": w["name"]})
+ return diff
+
+ def add_commands(self, d, obj_in_have=None):
+ commands = []
+ if obj_in_have is None:
+ obj_in_have = {}
+ # mode/switchport changes should occur before other changes
+ if "mode" in d:
+ sysdef_mode = self.intf_defs["sysdefs"]["mode"]
+ have_mode = obj_in_have.get("mode", sysdef_mode)
+ want_mode = d["mode"]
+ if have_mode == "layer2":
+ if want_mode == "layer3":
+ commands.append("no switchport")
+ elif want_mode == "layer2":
+ commands.append("switchport")
+ if "description" in d:
+ commands.append("description " + d["description"])
+ if "speed" in d:
+ commands.append("speed " + str(d["speed"]))
+ if "duplex" in d:
+ commands.append("duplex " + d["duplex"])
+ if "enabled" in d:
+ have_enabled = obj_in_have.get("enabled", self.default_enabled(d, obj_in_have)) or False
+ if d["enabled"] is False and have_enabled is True:
+ commands.append("shutdown")
+ elif d["enabled"] is True and have_enabled is False:
+ commands.append("no shutdown")
+ if "mtu" in d:
+ commands.append("mtu " + str(d["mtu"]))
+ if "ip_forward" in d:
+ if d["ip_forward"] is True:
+ commands.append("ip forward")
+ else:
+ commands.append("no ip forward")
+ if "fabric_forwarding_anycast_gateway" in d:
+ if d["fabric_forwarding_anycast_gateway"] is True:
+ commands.append("fabric forwarding mode anycast-gateway")
+ else:
+ commands.append("no fabric forwarding mode anycast-gateway")
+ if commands or not obj_in_have:
+ commands.insert(0, "interface" + " " + d["name"])
+ return commands
+
+ def set_commands(self, w, have):
+ commands = []
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ if not obj_in_have:
+ commands = self.add_commands(w)
+ else:
+ diff = self.diff_of_dicts(w, obj_in_have)
+ commands = self.add_commands(diff, obj_in_have)
+ return commands
+
+ def render_interface_defaults(self, config, intfs):
+ """Collect user-defined-default states for 'system default switchport'
+ configurations. These configurations determine default L2/L3 modes
+ and enabled/shutdown states. The default values for user-defined-default
+ configurations may be different for legacy platforms.
+ Notes:
+ - L3 enabled default state is False on N9K,N7K but True for N3K,N6K
+ - Changing L2-L3 modes may change the default enabled value.
+ - '(no) system default switchport shutdown' only applies to L2 interfaces.
+ Run through the gathered interfaces and tag their default enabled state.
+ """
+ intf_defs = {}
+ L3_enabled = True if re.search("N[356]K", self.get_platform()) else False
+ intf_defs = {
+ "sysdefs": {
+ "mode": None,
+ "L2_enabled": None,
+ "L3_enabled": L3_enabled,
+ },
+ }
+ pat = "(no )*system default switchport$"
+ m = re.search(pat, config, re.MULTILINE)
+ if m:
+ intf_defs["sysdefs"]["mode"] = "layer3" if "no " in m.groups() else "layer2"
+
+ pat = "(no )*system default switchport shutdown$"
+ m = re.search(pat, config, re.MULTILINE)
+ if m:
+ intf_defs["sysdefs"]["L2_enabled"] = True if "no " in m.groups() else False
+
+ for item in intfs:
+ intf_defs[item["name"]] = default_intf_enabled(
+ name=item["name"],
+ sysdefs=intf_defs["sysdefs"],
+ mode=item.get("mode"),
+ )
+
+ return intf_defs
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/l2_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/l2_interfaces.py
new file mode 100644
index 00000000..e7d91498
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l2_interfaces/l2_interfaces.py
@@ -0,0 +1,351 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_l2_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ flatten_dict,
+ normalize_interface,
+ search_obj_in_list,
+ vlan_list_to_range,
+ vlan_range_to_list,
+)
+
+
+class L2_interfaces(ConfigBase):
+ """
+ The nxos_l2_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["l2_interfaces"]
+
+ exclude_params = ["vlan", "allowed_vlans", "native_vlans"]
+
+ def __init__(self, module):
+ super(L2_interfaces, self).__init__(module)
+
+ def get_l2_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ l2_interfaces_facts = facts["ansible_network_resources"].get("l2_interfaces")
+ if not l2_interfaces_facts:
+ return []
+ return l2_interfaces_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = []
+ warnings = []
+
+ if self.state in self.ACTION_STATES:
+ existing_l2_interfaces_facts = self.get_l2_interfaces_facts()
+ else:
+ existing_l2_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_l2_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_l2_interfaces_facts = self.get_l2_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_l2_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_l2_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_l2_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_l2_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_l2_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params.get("config")
+ want = []
+ if config:
+ for w in config:
+ w.update({"name": normalize_interface(w["name"])})
+ self.expand_trunk_allowed_vlans(w)
+ want.append(remove_empties(w))
+ have = existing_l2_interfaces_facts
+ for h in have:
+ self.expand_trunk_allowed_vlans(h)
+ resp = self.set_state(want, have) or []
+ self._reconstruct_commands(resp)
+
+ return resp
+
+ def expand_trunk_allowed_vlans(self, d):
+ if not d:
+ return None
+ if "trunk" in d and d["trunk"]:
+ if "allowed_vlans" in d["trunk"]:
+ allowed_vlans = vlan_range_to_list(d["trunk"]["allowed_vlans"])
+ vlans_list = [str(l) for l in sorted(allowed_vlans)]
+ d["trunk"]["allowed_vlans"] = ",".join(vlans_list)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ commands = list()
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(flatten_dict(w), have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(flatten_dict(w), have))
+ return commands
+
+ def _state_replaced(self, w, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ if obj_in_have:
+ diff = dict_diff(w, obj_in_have)
+ else:
+ diff = w
+ merged_commands = self.set_commands(w, have, True)
+ if "name" not in diff:
+ diff["name"] = w["name"]
+
+ dkeys = diff.keys()
+ for k in w.copy():
+ if k in self.exclude_params and k in dkeys:
+ del diff[k]
+ replaced_commands = self.del_attribs(diff)
+
+ if merged_commands or replaced_commands:
+ cmds = set(replaced_commands).intersection(set(merged_commands))
+ for cmd in cmds:
+ merged_commands.remove(cmd)
+ commands.extend(replaced_commands)
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ for h in have:
+ h = flatten_dict(h)
+ obj_in_want = flatten_dict(search_obj_in_list(h["name"], want, "name"))
+ if h == obj_in_want:
+ continue
+ for w in want:
+ w = flatten_dict(w)
+ if h["name"] == w["name"]:
+ wkeys = w.keys()
+ hkeys = h.keys()
+ for k in wkeys:
+ if k in self.exclude_params and k in hkeys:
+ del h[k]
+ commands.extend(self.del_attribs(h))
+ for w in want:
+ commands.extend(self.set_commands(flatten_dict(w), have, True))
+ return commands
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(w, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ commands.extend(self.del_attribs(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_attribs(flatten_dict(h)))
+ return commands
+
+ def del_attribs(self, obj):
+ commands = []
+ if not obj or len(obj.keys()) == 1:
+ return commands
+
+ cmd = "no switchport "
+ if "vlan" in obj:
+ commands.append(cmd + "access vlan")
+ if "mode" in obj:
+ commands.append(cmd + "mode")
+ if "allowed_vlans" in obj:
+ commands.append(cmd + "trunk allowed vlan")
+ if "native_vlan" in obj:
+ commands.append(cmd + "trunk native vlan")
+ if commands:
+ commands.insert(0, "interface " + obj["name"])
+ return commands
+
+ def diff_of_dicts(self, w, obj):
+ diff = set(w.items()) - set(obj.items())
+ diff = dict(diff)
+ if diff and w["name"] == obj["name"]:
+ diff.update({"name": w["name"]})
+ return diff
+
+ def add_commands(self, d, vlan_exists=False):
+ commands = []
+ if not d:
+ return commands
+
+ cmd = "switchport "
+ if "mode" in d:
+ commands.append(cmd + "mode {0}".format(d["mode"]))
+ if "vlan" in d:
+ commands.append(cmd + "access vlan " + str(d["vlan"]))
+ if "allowed_vlans" in d:
+ if vlan_exists:
+ commands.append(cmd + "trunk allowed vlan add " + str(d["allowed_vlans"]))
+ else:
+ commands.append(cmd + "trunk allowed vlan " + str(d["allowed_vlans"]))
+ if "native_vlan" in d:
+ commands.append(cmd + "trunk native vlan " + str(d["native_vlan"]))
+ if commands:
+ commands.insert(0, "interface " + d["name"])
+ return commands
+
+ def set_commands(self, w, have, replace=False):
+ commands = []
+
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ if not obj_in_have:
+ commands = self.add_commands(w)
+ else:
+ diff = self.diff_of_dicts(w, obj_in_have)
+ if diff and not replace:
+ if "mode" in diff.keys() and diff["mode"]:
+ commands = self.add_commands(diff)
+ if "allowed_vlans" in diff.keys() and diff["allowed_vlans"]:
+ vlan_tobe_added = diff["allowed_vlans"].split(",")
+ vlan_list = vlan_tobe_added[:]
+ if obj_in_have.get("allowed_vlans"):
+ have_vlans = obj_in_have["allowed_vlans"].split(",")
+ else:
+ have_vlans = []
+ for w_vlans in vlan_list:
+ if w_vlans in have_vlans:
+ vlan_tobe_added.pop(vlan_tobe_added.index(w_vlans))
+ if vlan_tobe_added:
+ diff.update({"allowed_vlans": ",".join(vlan_tobe_added)})
+ if have_vlans:
+ commands = self.add_commands(diff, True)
+ else:
+ commands = self.add_commands(diff)
+ return commands
+ commands = self.add_commands(diff)
+ return commands
+
+ def _reconstruct_commands(self, cmds):
+ for idx, cmd in enumerate(cmds):
+ match = re.search(
+ r"^(?P<cmd>(no\s)?switchport trunk allowed vlan(\sadd)?)\s(?P<vlans>.+)",
+ cmd,
+ )
+ if match:
+ data = match.groupdict()
+ unparsed = vlan_list_to_range(data["vlans"].split(","))
+ cmds[idx] = data["cmd"] + " " + unparsed
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/l3_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/l3_interfaces.py
new file mode 100644
index 00000000..55ac2266
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/l3_interfaces/l3_interfaces.py
@@ -0,0 +1,545 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_l3_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class L3_interfaces(ConfigBase):
+ """
+ The nxos_l3_interfaces class
+ """
+
+ gather_subset = ["min"]
+
+ gather_network_resources = ["l3_interfaces"]
+
+ exclude_params = []
+
+ def __init__(self, module):
+ super(L3_interfaces, self).__init__(module)
+
+ def get_l3_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ if self.state not in self.ACTION_STATES:
+ self.gather_subset = ["!all", "!min"]
+
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+
+ l3_interfaces_facts = facts["ansible_network_resources"].get("l3_interfaces")
+ self.platform = facts.get("ansible_net_platform", "")
+
+ return l3_interfaces_facts
+
+ def edit_config(self, commands):
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = []
+ warnings = []
+
+ if self.state in self.ACTION_STATES:
+ existing_l3_interfaces_facts = self.get_l3_interfaces_facts()
+ else:
+ existing_l3_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_l3_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_l3_interfaces_facts = self.get_l3_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_l3_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_l3_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_l3_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_l3_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_l3_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params.get("config")
+ want = []
+ if config:
+ for w in config:
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(remove_empties(w))
+ have = deepcopy(existing_l3_interfaces_facts)
+ self.init_check_existing(have)
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ commands = []
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(w, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+ Scope is limited to interface objects defined in the playbook.
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ cmds = []
+ name = want["name"]
+ obj_in_have = search_obj_in_list(want["name"], have, "name")
+
+ have_v4 = obj_in_have.pop("ipv4", []) if obj_in_have else []
+ have_v6 = obj_in_have.pop("ipv6", []) if obj_in_have else []
+
+ # Process lists of dicts separately
+ v4_cmds = self._v4_cmds(want.pop("ipv4", []), have_v4, state="replaced")
+ v6_cmds = self._v6_cmds(want.pop("ipv6", []), have_v6, state="replaced")
+
+ # Process remaining attrs
+ if obj_in_have:
+ # Find 'want' changes first
+ diff = self.diff_of_dicts(want, obj_in_have)
+ rmv = {"name": name}
+ haves_not_in_want = set(obj_in_have.keys()) - set(want.keys()) - set(diff.keys())
+ for i in haves_not_in_want:
+ rmv[i] = obj_in_have[i]
+ cmds.extend(self.generate_delete_commands(rmv))
+ else:
+ diff = want
+
+ cmds.extend(self.add_commands(diff, name=name))
+ cmds.extend(v4_cmds)
+ cmds.extend(v6_cmds)
+ self.cmd_order_fixup(cmds, name)
+ return cmds
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+ Scope includes all interface objects on the device.
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ # overridden behavior is the same as replaced except for scope.
+ cmds = []
+ for i in have:
+ obj_in_want = search_obj_in_list(i["name"], want, "name")
+ if obj_in_want:
+ if i != obj_in_want:
+ v4_cmds = self._v4_cmds(
+ obj_in_want.pop("ipv4", []),
+ i.pop("ipv4", []),
+ state="overridden",
+ )
+ replaced_cmds = self._state_replaced(obj_in_want, [i])
+ replaced_cmds.extend(v4_cmds)
+ self.cmd_order_fixup(replaced_cmds, obj_in_want["name"])
+ cmds.extend(replaced_cmds)
+ else:
+ deleted_cmds = self.generate_delete_commands(i)
+ self.cmd_order_fixup(deleted_cmds, i["name"])
+ cmds.extend(deleted_cmds)
+
+ for i in want:
+ if [item for item in have if i["name"] == item["name"]]:
+ continue
+ cmds.extend(self.add_commands(i, name=i["name"]))
+
+ return cmds
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(w, have)
+
+ def _v4_cmds(self, want, have, state=None):
+ """Helper method for processing ipv4 changes.
+ This is needed to handle primary/secondary address changes, which require a specific sequence when changing.
+ """
+ # The ip address cli does not allow removing primary addresses while
+ # secondaries are present, but it does allow changing a primary to a
+ # new address as long as the address is not a current secondary.
+ # Be aware of scenarios where a secondary is taking over
+ # the role of the primary, which must be changed in sequence.
+ # In general, primaries/secondaries should change in this order:
+ # Step 1. Remove secondaries that are being changed or removed
+ # Step 2. Change the primary if needed
+ # Step 3. Merge secondaries
+
+ # Normalize inputs (add tag key if not present)
+ for i in want:
+ i["tag"] = i.get("tag")
+ for i in have:
+ i["tag"] = i.get("tag")
+
+ merged = True if state == "merged" else False
+ replaced = True if state == "replaced" else False
+ overridden = True if state == "overridden" else False
+
+ # Create secondary and primary wants/haves
+ sec_w = [i for i in want if i.get("secondary")]
+ sec_h = [i for i in have if i.get("secondary")]
+ pri_w = [i for i in want if not i.get("secondary")]
+ pri_h = [i for i in have if not i.get("secondary")]
+ pri_w = pri_w[0] if pri_w else {}
+ pri_h = pri_h[0] if pri_h else {}
+ cmds = []
+
+ # Remove all addrs when no primary is specified in want (pri_w)
+ if pri_h and not pri_w and (replaced or overridden):
+ cmds.append("no ip address")
+ return cmds
+
+ # 1. Determine which secondaries are changing and remove them. Need a have/want
+ # diff instead of want/have because a have sec addr may be changing to a pri.
+ sec_to_rmv = []
+ sec_diff = self.diff_list_of_dicts(sec_h, sec_w)
+ for i in sec_diff:
+ if overridden or [w for w in sec_w if w["address"] == i["address"]]:
+ sec_to_rmv.append(i["address"])
+
+ # Check if new primary is currently a secondary
+ if pri_w and [h for h in sec_h if h["address"] == pri_w["address"]]:
+ if not overridden:
+ sec_to_rmv.append(pri_w["address"])
+
+ # Remove the changing secondaries
+ cmds.extend(["no ip address %s secondary" % i for i in sec_to_rmv])
+
+ # 2. change primary
+ if pri_w:
+ diff = dict(set(pri_w.items()) - set(pri_h.items()))
+ if diff:
+ addr = diff.get("address") or pri_w.get("address")
+ cmd = "ip address %s" % addr
+ tag = diff.get("tag")
+ cmd += " tag %s" % tag if tag else ""
+ cmds.append(cmd)
+
+ # 3. process remaining secondaries last
+ sec_w_to_chg = self.diff_list_of_dicts(sec_w, sec_h)
+ for i in sec_w_to_chg:
+ cmd = "ip address %s secondary" % i["address"]
+ cmd += " tag %s" % i["tag"] if i["tag"] else ""
+ cmds.append(cmd)
+
+ return cmds
+
+ def _v6_cmds(self, want, have, state=""):
+ """Helper method for processing ipv6 changes.
+ This is needed to avoid unnecessary churn on the device when removing or changing multiple addresses.
+ """
+ # Normalize inputs (add tag key if not present)
+ for i in want:
+ i["tag"] = i.get("tag")
+ for i in have:
+ i["tag"] = i.get("tag")
+
+ cmds = []
+ # items to remove (items in 'have' only)
+ if state == "replaced":
+ for i in self.diff_list_of_dicts(have, want):
+ want_addr = [w for w in want if w["address"] == i["address"]]
+ if not want_addr:
+ cmds.append("no ipv6 address %s" % i["address"])
+ elif i["tag"] and not want_addr[0]["tag"]:
+ # Must remove entire cli when removing tag
+ cmds.append("no ipv6 address %s" % i["address"])
+
+ # items to merge/add
+ for i in self.diff_list_of_dicts(want, have):
+ addr = i["address"]
+ tag = i["tag"]
+ if not tag and state == "merged":
+ # When want is IP-no-tag and have is IP+tag it will show up in diff,
+ # but for merged nothing has changed, so ignore it for idempotence.
+ have_addr = [h for h in have if h["address"] == addr]
+ if have_addr and have_addr[0].get("tag"):
+ continue
+ cmd = "ipv6 address %s" % i["address"]
+ cmd += " tag %s" % tag if tag else ""
+ cmds.append(cmd)
+
+ return cmds
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ commands.extend(self.del_all_attribs(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_all_attribs(h))
+ return commands
+
+ def del_all_attribs(self, obj):
+ commands = []
+ if not obj or len(obj.keys()) == 1:
+ return commands
+ commands = self.generate_delete_commands(obj)
+ self.cmd_order_fixup(commands, obj["name"])
+ return commands
+
+ def generate_delete_commands(self, obj):
+ """Generate CLI commands to remove non-default settings.
+ obj: dict of attrs to remove
+ """
+ commands = []
+ name = obj.get("name")
+ if "dot1q" in obj:
+ commands.append("no encapsulation dot1q")
+ if "redirects" in obj:
+ if not self.check_existing(name, "has_secondary") or re.match(
+ "N[35679]",
+ self.platform,
+ ):
+ # device auto-enables redirects when secondaries are removed;
+ # auto-enable may fail on legacy platforms so always do explicit enable
+ commands.append("ip redirects")
+ if "ipv6_redirects" in obj:
+ if not self.check_existing(name, "has_secondary") or re.match(
+ "N[35679]",
+ self.platform,
+ ):
+ # device auto-enables redirects when secondaries are removed;
+ # auto-enable may fail on legacy platforms so always do explicit enable
+ commands.append("ipv6 redirects")
+ if "unreachables" in obj:
+ commands.append("no ip unreachables")
+ if "ipv4" in obj:
+ commands.append("no ip address")
+ if "ipv6" in obj:
+ commands.append("no ipv6 address")
+ if "evpn_multisite_tracking" in obj:
+ have = self.existing_facts.get(name, {})
+ if have.get("evpn_multisite_tracking", False) is not False:
+ cmd = "no evpn multisite %s" % have.get("evpn_multisite_tracking")
+ commands.append(cmd)
+ return commands
+
+ def init_check_existing(self, have):
+ """Creates a class var dict for easier access to existing states"""
+ self.existing_facts = dict()
+ have_copy = deepcopy(have)
+ for intf in have_copy:
+ name = intf["name"]
+ self.existing_facts[name] = intf
+ # Check for presence of secondaries; used for ip redirects logic
+ if [i for i in intf.get("ipv4", []) if i.get("secondary")]:
+ self.existing_facts[name]["has_secondary"] = True
+
+ def check_existing(self, name, query):
+ """Helper method to lookup existing states on an interface.
+ This is needed for attribute changes that have additional dependencies;
+ e.g. 'ip redirects' may auto-enable when all secondary ip addrs are removed.
+ """
+ if name:
+ have = self.existing_facts.get(name, {})
+ if "has_secondary" in query:
+ return have.get("has_secondary", False)
+ if "redirects" in query:
+ return have.get("redirects", True)
+ if "unreachables" in query:
+ return have.get("unreachables", False)
+ return None
+
+ def diff_of_dicts(self, w, obj):
+ diff = set(w.items()) - set(obj.items())
+ diff = dict(diff)
+ if diff and w["name"] == obj["name"]:
+ diff.update({"name": w["name"]})
+ return diff
+
+ def diff_list_of_dicts(self, w, h):
+ diff = []
+ set_w = set(tuple(sorted(d.items())) for d in w) if w else set()
+ set_h = set(tuple(sorted(d.items())) for d in h) if h else set()
+ difference = set_w.difference(set_h)
+ for element in difference:
+ diff.append(dict((x, y) for x, y in element))
+ return diff
+
+ def add_commands(self, diff, name=""):
+ commands = []
+ if not diff:
+ return commands
+ if "dot1q" in diff:
+ commands.append("encapsulation dot1q " + str(diff["dot1q"]))
+ if "redirects" in diff:
+ # Note: device will auto-disable redirects when secondaries are present
+ if diff["redirects"] != self.check_existing(name, "redirects"):
+ no_cmd = "no " if diff["redirects"] is False else ""
+ commands.append(no_cmd + "ip redirects")
+ self.cmd_order_fixup(commands, name)
+ if "ipv6_redirects" in diff:
+ # Note: device will auto-disable redirects when secondaries are present
+ if diff["ipv6_redirects"] != self.check_existing(name, "ipv6_redirects"):
+ no_cmd = "no " if diff["ipv6_redirects"] is False else ""
+ commands.append(no_cmd + "ipv6 redirects")
+ self.cmd_order_fixup(commands, name)
+ if "unreachables" in diff:
+ if diff["unreachables"] != self.check_existing(name, "unreachables"):
+ no_cmd = "no " if diff["unreachables"] is False else ""
+ commands.append(no_cmd + "ip unreachables")
+ if "evpn_multisite_tracking" in diff:
+ commands.append("evpn multisite " + str(diff["evpn_multisite_tracking"]))
+ if "ipv4" in diff:
+ commands.extend(self.generate_afi_commands(diff["ipv4"]))
+ if "ipv6" in diff:
+ commands.extend(self.generate_afi_commands(diff["ipv6"]))
+ self.cmd_order_fixup(commands, name)
+
+ return commands
+
+ def generate_afi_commands(self, diff):
+ cmds = []
+ for i in diff:
+ cmd = "ipv6 address " if re.search("::", i["address"]) else "ip address "
+ cmd += i["address"]
+ if i.get("secondary"):
+ cmd += " secondary"
+ if i.get("tag"):
+ cmd += " tag " + str(i["tag"])
+ cmds.append(cmd)
+ return cmds
+
+ def set_commands(self, w, have):
+ commands = []
+ name = w["name"]
+ obj_in_have = search_obj_in_list(name, have, "name")
+ if not obj_in_have:
+ commands = self.add_commands(w, name=name)
+ else:
+ # lists of dicts must be processed separately from non-list attrs
+ v4_cmds = self._v4_cmds(w.pop("ipv4", []), obj_in_have.pop("ipv4", []), state="merged")
+ v6_cmds = self._v6_cmds(w.pop("ipv6", []), obj_in_have.pop("ipv6", []), state="merged")
+
+ # diff remaining attrs
+ diff = self.diff_of_dicts(w, obj_in_have)
+ commands = self.add_commands(diff, name=name)
+ commands.extend(v4_cmds)
+ commands.extend(v6_cmds)
+
+ self.cmd_order_fixup(commands, name)
+ return commands
+
+ def cmd_order_fixup(self, cmds, name):
+ """Inserts 'interface <name>' config at the beginning of populated command list; reorders dependent commands that must process after others."""
+ if cmds:
+ if name and not [item for item in cmds if item.startswith("interface")]:
+ cmds.insert(0, "interface " + name)
+
+ redirects = [item for item in cmds if re.match("(no )*ip(v6)* redirects", item)]
+ if redirects:
+ # redirects should occur after ipv4 commands, just move to end of list
+ redirects = redirects.pop()
+ cmds.remove(redirects)
+ cmds.append(redirects)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/lacp.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/lacp.py
new file mode 100644
index 00000000..4d334bad
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp/lacp.py
@@ -0,0 +1,234 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_lacp class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+
+
+class Lacp(ConfigBase):
+ """
+ The nxos_lacp class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["lacp"]
+
+ exclude_params = ["priority", "mac"]
+
+ def __init__(self, module):
+ super(Lacp, self).__init__(module)
+
+ def get_lacp_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ lacp_facts = facts["ansible_network_resources"].get("lacp", {})
+
+ return lacp_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+
+ if self.state in self.ACTION_STATES:
+ existing_lacp_facts = self.get_lacp_facts()
+ else:
+ existing_lacp_facts = {}
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_lacp_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_lacp_facts = self.get_lacp_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_lacp_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_lacp_facts
+ if result["changed"]:
+ result["after"] = changed_lacp_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_lacp_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lacp_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = remove_empties(self._module.params["config"])
+ have = existing_lacp_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ commands = list()
+
+ if state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ elif state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(want, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(want, have))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ diff = dict_diff(want, have)
+ wkeys = want.keys()
+ dkeys = diff.keys()
+ for k in wkeys:
+ if k in self.exclude_params and k in dkeys:
+ del diff[k]
+ deleted_commands = self.del_all(diff)
+ merged_commands = self._state_merged(want, have)
+
+ commands.extend(deleted_commands)
+ if merged_commands:
+ commands.extend(merged_commands)
+
+ return commands
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if not have:
+ return commands
+ commands.extend(self.del_all(have))
+ return commands
+
+ def get_diff(self, comparable, base):
+ diff = {}
+ if not base:
+ diff = comparable
+ else:
+ diff = dict_diff(base, comparable)
+ return diff
+
+ def del_all(self, diff):
+ commands = []
+ base = "no lacp system-"
+ diff = diff.get("system")
+ if diff:
+ if "priority" in diff:
+ commands.append(base + "priority")
+ if "mac" in diff:
+ commands.append(base + "mac")
+ return commands
+
+ def add_commands(self, diff):
+ commands = []
+ base = "lacp system-"
+ diff = diff.get("system")
+ if diff and "priority" in diff:
+ cmd = base + "priority" + " " + str(diff["priority"])
+ commands.append(cmd)
+ if diff and "mac" in diff:
+ cmd = ""
+ if "address" in diff["mac"]:
+ cmd += base + "mac" + " " + diff["mac"]["address"]
+ if "role" in diff["mac"]:
+ cmd += " " + "role" + " " + diff["mac"]["role"]
+ if cmd:
+ commands.append(cmd)
+
+ return commands
+
+ def set_commands(self, want, have):
+ if not want:
+ return []
+ diff = self.get_diff(want, have)
+ return self.add_commands(diff)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/lacp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/lacp_interfaces.py
new file mode 100644
index 00000000..eaad0763
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lacp_interfaces/lacp_interfaces.py
@@ -0,0 +1,323 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_lacp_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ flatten_dict,
+ get_interface_type,
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Lacp_interfaces(ConfigBase):
+ """
+ The nxos_lacp_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["lacp_interfaces"]
+
+ exclude_params = ["port_priority", "rate", "min", "max"]
+
+ def __init__(self, module):
+ super(Lacp_interfaces, self).__init__(module)
+
+ def get_lacp_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ lacp_interfaces_facts = facts["ansible_network_resources"].get("lacp_interfaces")
+ if not lacp_interfaces_facts:
+ return []
+ return lacp_interfaces_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+
+ if self.state in self.ACTION_STATES:
+ existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
+ else:
+ existing_lacp_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_lacp_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_lacp_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_lacp_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_lacp_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_lacp_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lacp_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params.get("config")
+ want = []
+ if config:
+ for w in config:
+ if get_interface_type(w["name"]) not in (
+ "portchannel",
+ "ethernet",
+ ):
+ self._module.fail_json(
+ msg="This module works with either portchannel or ethernet",
+ )
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(remove_empties(w))
+ have = existing_lacp_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+ commands = list()
+
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(flatten_dict(w), have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(flatten_dict(w), have))
+ return commands
+
+ def _state_replaced(self, w, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ diff = dict_diff(w, obj_in_have)
+ merged_commands = self.set_commands(w, have)
+ if "name" not in diff:
+ diff["name"] = w["name"]
+ wkeys = w.keys()
+ dkeys = diff.keys()
+ for k in wkeys:
+ if k in self.exclude_params and k in dkeys:
+ del diff[k]
+ replaced_commands = self.del_attribs(diff)
+
+ if merged_commands:
+ cmds = set(replaced_commands).intersection(set(merged_commands))
+ for cmd in cmds:
+ merged_commands.remove(cmd)
+ commands.extend(replaced_commands)
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ for h in have:
+ h = flatten_dict(h)
+ obj_in_want = flatten_dict(search_obj_in_list(h["name"], want, "name"))
+ if h == obj_in_want:
+ continue
+ for w in want:
+ w = flatten_dict(w)
+ if h["name"] == w["name"]:
+ wkeys = w.keys()
+ hkeys = h.keys()
+ for k in wkeys:
+ if k in self.exclude_params and k in hkeys:
+ del h[k]
+ commands.extend(self.del_attribs(h))
+ for w in want:
+ commands.extend(self.set_commands(flatten_dict(w), have))
+ return commands
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(w, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ commands.extend(self.del_attribs(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_attribs(flatten_dict(h)))
+ return commands
+
+ def del_attribs(self, obj):
+ commands = []
+ if not obj or len(obj.keys()) == 1:
+ return commands
+ commands.append("interface " + obj["name"])
+ if "graceful" in obj:
+ commands.append("lacp graceful-convergence")
+ if "vpc" in obj:
+ commands.append("no lacp vpn-convergence")
+ if "suspend_individual" in obj:
+ commands.append("lacp suspend_individual")
+ if "mode" in obj:
+ commands.append("no lacp mode " + obj["mode"])
+ if "max" in obj:
+ commands.append("no lacp max-bundle")
+ if "min" in obj:
+ commands.append("no lacp min-links")
+ if "port_priority" in obj:
+ commands.append("no lacp port-priority")
+ if "rate" in obj:
+ commands.append("no lacp rate")
+ return commands
+
+ def diff_of_dicts(self, w, obj):
+ diff = set(w.items()) - set(obj.items())
+ diff = dict(diff)
+ if diff and w["name"] == obj["name"]:
+ diff.update({"name": w["name"]})
+ return diff
+
+ def add_commands(self, d):
+ commands = []
+ if not d:
+ return commands
+ commands.append("interface" + " " + d["name"])
+
+ if "port_priority" in d:
+ commands.append("lacp port-priority " + str(d["port_priority"]))
+ if "rate" in d:
+ commands.append("lacp rate " + str(d["rate"]))
+ if "min" in d:
+ commands.append("lacp min-links " + str(d["min"]))
+ if "max" in d:
+ commands.append("lacp max-bundle " + str(d["max"]))
+ if "mode" in d:
+ commands.append("lacp mode " + d["mode"])
+ if "suspend_individual" in d:
+ if d["suspend_individual"] is True:
+ commands.append("lacp suspend-individual")
+ else:
+ commands.append("no lacp suspend-individual")
+ if "graceful" in d:
+ if d["graceful"] is True:
+ commands.append("lacp graceful-convergence")
+ else:
+ commands.append("no lacp graceful-convergence")
+ if "vpc" in d:
+ if d["vpc"] is True:
+ commands.append("lacp vpc-convergence")
+ else:
+ commands.append("no lacp vpc-convergence")
+ return commands
+
+ def set_commands(self, w, have):
+ commands = []
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ if not obj_in_have:
+ commands = self.add_commands(w)
+ else:
+ diff = self.diff_of_dicts(w, obj_in_have)
+ commands = self.add_commands(diff)
+ return commands
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/lag_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/lag_interfaces.py
new file mode 100644
index 00000000..495244b4
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lag_interfaces/lag_interfaces.py
@@ -0,0 +1,318 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_lag_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ search_obj_in_list,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ normalize_interface,
+)
+
+
+class Lag_interfaces(ConfigBase):
+ """
+ The nxos_lag_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["lag_interfaces"]
+
+ def __init__(self, module):
+ super(Lag_interfaces, self).__init__(module)
+
+ def get_lag_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ lag_interfaces_facts = facts["ansible_network_resources"].get("lag_interfaces")
+ if not lag_interfaces_facts:
+ return []
+ return lag_interfaces_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+
+ if self.state in self.ACTION_STATES:
+ existing_lag_interfaces_facts = self.get_lag_interfaces_facts()
+ else:
+ existing_lag_interfaces_facts = []
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_lag_interfaces_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ resp = self._connection.edit_config(commands)
+ if "response" in resp:
+ for item in resp["response"]:
+ if item:
+ err_str = item
+ if err_str.lower().startswith("cannot add"):
+ self._module.fail_json(msg=err_str)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_lag_interfaces_facts = self.get_lag_interfaces_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_lag_interfaces_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_lag_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_lag_interfaces_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_lag_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lag_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params.get("config")
+ if want:
+ for w in want:
+ w.update(remove_empties(w))
+ if "members" in w and w["members"]:
+ for item in w["members"]:
+ item.update({"member": normalize_interface(item["member"])})
+ have = existing_lag_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+ commands = list()
+
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(w, have))
+ if state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_replaced(self, w, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ merged_commands = self.set_commands(w, have)
+ replaced_commands = self.del_intf_commands(w, have)
+ if merged_commands:
+ commands.extend(replaced_commands)
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ for h in have:
+ obj_in_want = search_obj_in_list(h["name"], want, "name")
+ if obj_in_want:
+ diff = self.diff_list_of_dicts(h.get("members", []), obj_in_want["members"])
+ if not diff:
+ continue
+ commands.extend(self.del_all_commands(h))
+ for w in want:
+ commands.extend(self.set_commands(w, have))
+ return commands
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(w, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ commands.extend(self.del_all_commands(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_all_commands(h))
+ return commands
+
+ def diff_list_of_dicts(self, want, have):
+ if not want:
+ want = []
+
+ if not have:
+ have = []
+
+ diff = []
+ for w_item in want:
+ h_item = search_obj_in_list(w_item["member"], have, key="member") or {}
+ delta = dict_diff(h_item, w_item)
+ if delta:
+ if h_item:
+ if (
+ "mode" in delta.keys()
+ and delta["mode"] == "on"
+ and "mode" not in h_item.keys()
+ ):
+ # mode = on will not be displayed in running-config
+ continue
+ if "member" not in delta.keys():
+ delta["member"] = w_item["member"]
+ diff.append(delta)
+
+ return diff
+
+ def intersect_list_of_dicts(self, w, h):
+ intersect = []
+ wmem = []
+ hmem = []
+ for d in w:
+ wmem.append({"member": d["member"]})
+ for d in h:
+ hmem.append({"member": d["member"]})
+ set_w = set(tuple(sorted(d.items())) for d in wmem)
+ set_h = set(tuple(sorted(d.items())) for d in hmem)
+ intersection = set_w.intersection(set_h)
+ for element in intersection:
+ intersect.append(dict((x, y) for x, y in element))
+ return intersect
+
+ def add_commands(self, diff, name):
+ commands = []
+ name = name.strip("port-channel")
+ for d in diff:
+ commands.append("interface" + " " + d["member"])
+ cmd = ""
+ group_cmd = "channel-group {0}".format(name)
+ if d.get("force"):
+ cmd = group_cmd + " force "
+ if "mode" in d:
+ if cmd:
+ cmd = cmd + " mode " + d["mode"]
+ else:
+ cmd = group_cmd + " mode " + d["mode"]
+ if not cmd:
+ cmd = group_cmd
+ commands.append(cmd)
+ return commands
+
+ def set_commands(self, w, have):
+ commands = []
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ if not obj_in_have:
+ commands = self.add_commands(w["members"], w["name"])
+ else:
+ if "members" not in obj_in_have:
+ obj_in_have["members"] = None
+ diff = self.diff_list_of_dicts(w["members"], obj_in_have["members"])
+ commands = self.add_commands(diff, w["name"])
+ return commands
+
+ def del_all_commands(self, obj_in_have):
+ commands = []
+ if not obj_in_have:
+ return commands
+ for m in obj_in_have.get("members", []):
+ commands.append("interface" + " " + m["member"])
+ commands.append("no channel-group")
+ return commands
+
+ def del_intf_commands(self, w, have):
+ commands = []
+ obj_in_have = search_obj_in_list(w["name"], have, "name")
+ if obj_in_have:
+ lst_to_del = self.intersect_list_of_dicts(w["members"], obj_in_have["members"])
+ if lst_to_del:
+ for item in lst_to_del:
+ commands.append("interface" + " " + item["member"])
+ commands.append("no channel-group")
+ return commands
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/lldp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/lldp_global.py
new file mode 100644
index 00000000..68d98621
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_global/lldp_global.py
@@ -0,0 +1,277 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_lldp_global class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+
+
+class Lldp_global(ConfigBase):
+ """
+ The nxos_lldp_global class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["lldp_global"]
+
+ def __init__(self, module):
+ super(Lldp_global, self).__init__(module)
+
+ def get_lldp_global_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ lldp_global_facts = facts["ansible_network_resources"].get("lldp_global")
+ if not lldp_global_facts:
+ return {}
+ return lldp_global_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+
+ if self.state in self.ACTION_STATES:
+ existing_lldp_global_facts = self.get_lldp_global_facts()
+ else:
+ existing_lldp_global_facts = {}
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_lldp_global_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_lldp_global_facts = self.get_lldp_global_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_lldp_global_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_lldp_global_facts
+ if result["changed"]:
+ result["after"] = changed_lldp_global_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_lldp_global_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lldp_global_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params["config"]
+ have = existing_lldp_global_facts
+ resp = self.set_state(remove_empties(want), have)
+ return resp
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+ commands = list()
+
+ if state == "deleted":
+ commands = self._state_deleted(have)
+ elif state in ["merged", "rendered"]:
+ commands = self._state_merged(want, have)
+ elif state == "replaced":
+ commands = self._state_replaced(want, have)
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ merge_dict = dict_diff(have, want)
+ # merge_dict will contain new and unique values in want
+ delete_dict = self.find_delete_params(have, want)
+ self._module.params["state"] = "deleted"
+ commands.extend(self._state_deleted(delete_dict)) # delete
+ self._module.params["state"] = "merged"
+ commands.extend(self.set_commands(merge_dict)) # merge
+ self._module.params["state"] = "replaced"
+ return commands
+
+ def delete_nested_dict(self, have, want):
+ """
+ Returns tlv_select nested dict that needs to be defaulted
+ """
+ outer_dict = {}
+ for key, val in have.items():
+ inner_dict = {}
+ if not isinstance(val, dict):
+ if key not in want.keys():
+ inner_dict.update({key: val})
+ return inner_dict
+ else:
+ if key in want.keys():
+ outer_dict.update({key: self.delete_nested_dict(val, want[key])})
+ else:
+ outer_dict.update({key: val})
+ return outer_dict
+
+ def find_delete_params(self, have, want):
+ """
+ Returns parameters that are present in have and not in want, that need to be defaulted
+ """
+ delete_dict = {}
+ for key, val in have.items():
+ if key not in want.keys():
+ delete_dict.update({key: val})
+ else:
+ if key == "tlv_select":
+ delete_dict.update(
+ {key: self.delete_nested_dict(have["tlv_select"], want["tlv_select"])},
+ )
+ return delete_dict
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = []
+ diff = dict_diff(have, want)
+ commands.extend(self.set_commands(diff))
+ return commands
+
+ def _state_deleted(self, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if have:
+ for key, val in have.items():
+ if "tlv_select" in key:
+ commands.extend(self.process_nested_dict(val))
+ else:
+ if key == "port_id":
+ key = "portid-subtype"
+ commands.append("no lldp " + key + " " + str(val))
+
+ return commands
+
+ def set_commands(self, diff):
+ commands = []
+ for key, val in diff.items():
+ commands.extend(self.add_commands(key, val))
+ return commands
+
+ def add_commands(self, key, val):
+ command = []
+ if "port_id" in key:
+ command.append("lldp portid-subtype " + str(val))
+ elif "tlv_select" in key:
+ command.extend(self.process_nested_dict(val))
+ else:
+ if val:
+ command.append("lldp " + key + " " + str(val))
+ return command
+
+ def process_nested_dict(self, val):
+ nested_commands = []
+ for k, v in val.items():
+ if isinstance(v, dict):
+ for k1, v1 in v.items():
+ com1 = "lldp tlv-select "
+ com2 = ""
+ if "system" in k:
+ com2 = "system-" + k1
+ elif "management_address" in k:
+ com2 = "management-address " + k1
+ elif "port" in k:
+ com2 = "port-" + k1
+
+ com1 += com2
+ com1 = self.negate_command(com1, v1)
+ nested_commands.append(com1)
+ else:
+ com1 = "lldp tlv-select "
+ if "power_management" in k:
+ com1 += "power-management"
+ else:
+ com1 += k
+
+ com1 = self.negate_command(com1, v)
+ nested_commands.append(com1)
+ return nested_commands
+
+ def negate_command(self, command, val):
+ # for merged, replaced vals need to be checked to add 'no'
+ if self._module.params["state"] == "merged":
+ if not val:
+ command = "no " + command
+ return command
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/lldp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..528f7497
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,312 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_lldp_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ flatten_dict,
+ get_interface_type,
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Lldp_interfaces(ConfigBase):
+ """
+ The nxos_lldp_interfaces class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["lldp_interfaces"]
+
+ def __init__(self, module):
+ super(Lldp_interfaces, self).__init__(module)
+
+ def get_lldp_interfaces_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ lldp_interfaces_facts = facts["ansible_network_resources"].get("lldp_interfaces")
+ if not lldp_interfaces_facts:
+ return []
+ return lldp_interfaces_facts
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+ state = self._module.params["state"]
+ action_states = ["merged", "replaced", "deleted", "overridden"]
+
+ if state == "gathered":
+ result["gathered"] = self.get_lldp_interfaces_facts()
+ elif state == "rendered":
+ result["rendered"] = self.set_config({})
+ elif state == "parsed":
+ result["parsed"] = self.set_config({})
+ else:
+ existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
+ commands.extend(self.set_config(existing_lldp_interfaces_facts))
+ if commands and state in action_states:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+ result["before"] = existing_lldp_interfaces_facts
+ result["commands"] = commands
+ result["commands"] = commands
+
+ changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
+
+ if result["changed"]:
+ result["after"] = changed_lldp_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lldp_interfaces_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = []
+ if config:
+ for w in config:
+ if get_interface_type(w["name"]) not in (
+ "management",
+ "ethernet",
+ ):
+ self._module.fail_json(
+ msg="This module works with either management or ethernet",
+ )
+ w.update({"name": normalize_interface(w["name"])})
+ want.append(remove_empties(w))
+ have = existing_lldp_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ state = self._module.params["state"]
+ if state == "overridden":
+ commands = self._state_overridden(want, have)
+ elif state == "deleted":
+ commands = self._state_deleted(want, have)
+ elif state == "rendered":
+ commands = self._state_rendered(want)
+ elif state == "parsed":
+ want = self._module.params["running_config"]
+ commands = self._state_parsed(want)
+ else:
+ for w in want:
+ if state == "merged":
+ commands.extend(self._state_merged(flatten_dict(w), have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(flatten_dict(w), have))
+ return commands
+
+ def _state_parsed(self, want):
+ return self.get_lldp_interfaces_facts(want)
+
+ def _state_rendered(self, want):
+ commands = []
+ for w in want:
+ commands.extend(self.set_commands(w, {}))
+ return commands
+
+ def _state_gathered(self, have):
+ """The command generator when state is gathered
+
+ :rtype: A list
+ :returns: the commands necessary to reproduce the current configuration
+ """
+ commands = []
+ want = {}
+ commands.append(self.set_commands(want, have))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ del_commands = []
+ delete_dict = {}
+ obj_in_have = flatten_dict(search_obj_in_list(want["name"], have, "name"))
+ for k1 in obj_in_have.keys():
+ if k1 not in want.keys():
+ delete_dict.update({k1: obj_in_have[k1]})
+
+ if delete_dict:
+ delete_dict.update({"name": want["name"]})
+ del_commands = self.del_commands(delete_dict)
+ merged_commands = self.set_commands(want, have)
+
+ if merged_commands:
+ cmds = set(del_commands).intersection(set(merged_commands))
+ for cmd in cmds:
+ merged_commands.remove(cmd)
+
+ commands.extend(del_commands)
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ want_intfs = [w["name"] for w in want]
+ for h in have:
+ h = flatten_dict(h)
+ delete_dict = {}
+ if h["name"] in want_intfs:
+ for w in want:
+ if w["name"] == h["name"]:
+ delete_keys = list(set(h) - set(flatten_dict(w)))
+ for k in delete_keys:
+ delete_dict.update({k: h[k]})
+ delete_dict.update({"name": h["name"]})
+ break
+ else:
+ delete_dict.update(h)
+ commands.extend(self.del_commands(delete_dict))
+ for w in want:
+ commands.extend(self.set_commands(flatten_dict(w), have))
+ return commands
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = flatten_dict(search_obj_in_list(w["name"], have, "name"))
+ commands.extend(self.del_commands(obj_in_have))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.extend(self.del_commands(flatten_dict(h)))
+ return commands
+
+ def set_commands(self, want, have):
+ commands = []
+ obj_in_have = flatten_dict(search_obj_in_list(want["name"], have, "name"))
+ if not obj_in_have:
+ commands = self.add_commands(flatten_dict(want))
+ else:
+ diff = dict_diff(obj_in_have, want)
+ if diff:
+ diff.update({"name": want["name"]})
+ commands = self.add_commands(diff)
+ return commands
+
+ def add_commands(self, d):
+ commands = []
+ if not d:
+ return commands
+ commands.append("interface " + d["name"])
+ if "transmit" in d:
+ if d["transmit"]:
+ commands.append("lldp transmit")
+ else:
+ commands.append("no lldp transmit")
+ if "receive" in d:
+ if d["receive"]:
+ commands.append("lldp receive")
+ else:
+ commands.append("no lldp receive")
+ if "management_address" in d:
+ commands.append("lldp tlv-set management-address " + d["management_address"])
+ if "vlan" in d:
+ commands.append("lldp tlv-set vlan " + str(d["vlan"]))
+
+ return commands
+
+ def del_commands(self, obj):
+ commands = []
+ if not obj or len(obj.keys()) == 1:
+ return commands
+ commands.append("interface " + obj["name"])
+ if "transmit" in obj:
+ commands.append("lldp transmit")
+ if "receive" in obj:
+ commands.append("lldp receive")
+ if "management_address" in obj:
+ commands.append("no lldp tlv-set management-address " + obj["management_address"])
+ if "vlan" in obj:
+ commands.append("no lldp tlv-set vlan " + str(obj["vlan"]))
+
+ return commands
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py
new file mode 100644
index 00000000..6e493ad3
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py
@@ -0,0 +1,199 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_logging_global config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ get_from_dict,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.logging_global import (
+ Logging_globalTemplate,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_logging_sevmap,
+)
+
+
+class Logging_global(ResourceModule):
+ """
+ The nxos_logging_global config class
+ """
+
+ def __init__(self, module):
+ super(Logging_global, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="logging_global",
+ tmplt=Logging_globalTemplate(),
+ )
+ self._sev_map = get_logging_sevmap(invert=True)
+ self._state_set = ("replaced", "deleted", "overridden")
+ self.parsers = [
+ "console",
+ "module",
+ "monitor",
+ "logfile",
+ "event.link_status.enable",
+ "event.link_status.default",
+ "event.trunk_status.enable",
+ "event.trunk_status.default",
+ "history.severity",
+ "history.size",
+ "ip.access_list.cache.entries",
+ "ip.access_list.cache.interval",
+ "ip.access_list.cache.threshold",
+ "ip.access_list.detailed",
+ "ip.access_list.include.sgt",
+ "origin_id.hostname",
+ "origin_id.ip",
+ "origin_id.string",
+ "rate_limit",
+ "rfc_strict",
+ "source_interface",
+ "timestamp",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = self._logging_list_to_dict(self.want)
+ haved = self._logging_list_to_dict(self.have)
+
+ if self.state == "deleted":
+ # empty out want (in case something was specified)
+ # some items are populated later on for correct removal
+ wantd = {}
+
+ # pre-process `event.x.y` keys
+ for x in self.parsers[4:7]:
+ have_k = get_from_dict(haved, x)
+ want_k = get_from_dict(wantd, x)
+ if have_k is None and want_k is not None:
+ # set have to True to mimic default state
+ # this allows negate commands to be issued
+ self.__update_dict(haved, x)
+ if all(
+ (
+ self.state in self._state_set,
+ have_k is False,
+ want_k is None,
+ ),
+ ):
+ # if want is missing and have is negated
+ # set want to True in order to revert to default state
+ self.__update_dict(wantd, x)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ for x in self.parsers[0:4]:
+ hstate = haved.get(x, {}).get("state", "")
+ wstate = wantd.get(x, {}).get("state", "")
+ if hstate == "disabled" and not wstate:
+ # this ensures that updates are done
+ # with correct `state`
+ if wantd.get(x, {}):
+ wantd[x].update({"state": "enabled"})
+ wantd = dict_merge(haved, wantd)
+
+ if self.state in self._state_set:
+ # set default states for keys that appear in negated form
+ for x in self.parsers[0:3]:
+ if x in haved and x not in wantd:
+ wantd[x] = {"state": "enabled"}
+ if "rate_limit" in haved and "rate_limit" not in wantd:
+ wantd["rate_limit"] = "enabled"
+ if "logfile" in haved and "logfile" not in wantd:
+ wantd["logfile"] = {"name": "messages", "severity": 5}
+
+ self._compare(want=wantd, have=haved)
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Logging_global network resource.
+ """
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._compare_lists(want, have)
+
+ def _compare_lists(self, want, have):
+ """Compare list of dictionaries"""
+ for x in ["facilities", "hosts"]:
+ wantx = want.get(x, {})
+ havex = have.get(x, {})
+ for key, wentry in iteritems(wantx):
+ hentry = havex.pop(key, {})
+ if wentry != hentry:
+ if x == "hosts" and self.state in self._state_set:
+ # remove have config for hosts
+ # else want gets appended
+ self.addcmd(hentry, x, negate=True)
+ self.addcmd(wentry, x)
+ for key, hentry in iteritems(havex):
+ self.addcmd(hentry, x, negate=True)
+
+ def _logging_list_to_dict(self, data):
+ """Convert all list to dicts to dicts
+ of dicts and substitute severity values
+ """
+ tmp = deepcopy(data)
+ pkey = {"hosts": "host", "facilities": "facility"}
+ for k in ("hosts", "facilities"):
+ if k in tmp:
+ for x in tmp[k]:
+ if "severity" in x:
+ x["severity"] = self._sev_map[x["severity"]]
+ tmp[k] = {i[pkey[k]]: i for i in tmp[k]}
+ for k in ("console", "history", "logfile", "module", "monitor"):
+ if "severity" in tmp.get(k, {}):
+ tmp[k]["severity"] = self._sev_map[tmp[k]["severity"]]
+ return tmp
+
+ def __update_dict(self, datadict, key, nval=True):
+ """Utility method that updates last subkey of
+ `datadict` as identified by `key` to `nval`.
+ """
+ keys = key.split(".")
+ if keys[0] not in datadict:
+ datadict[keys[0]] = {}
+ if keys[1] not in datadict[keys[0]]:
+ datadict[keys[0]][keys[1]] = {}
+ datadict[keys[0]][keys[1]].update({keys[2]: nval})
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/ntp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/ntp_global.py
new file mode 100644
index 00000000..57a1a58d
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ntp_global/ntp_global.py
@@ -0,0 +1,161 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_ntp_global config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ntp_global import (
+ Ntp_globalTemplate,
+)
+
+
+class Ntp_global(ResourceModule):
+ """
+ The nxos_ntp_global config class
+ """
+
+ def __init__(self, module):
+ super(Ntp_global, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="ntp_global",
+ tmplt=Ntp_globalTemplate(),
+ )
+ self.parsers = [
+ "access_group.match_all",
+ "allow.control.rate_limit",
+ "allow.private",
+ "authenticate",
+ "logging",
+ "master.stratum",
+ "passive",
+ "source",
+ "source_interface",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = self._ntp_list_to_dict(self.want)
+ haved = self._ntp_list_to_dict(self.have)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd
+ if self.state == "deleted":
+ wantd = {}
+
+ self._compare(want=wantd, have=haved)
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Ntp_global network resource.
+ """
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._compare_lists(want=want, have=have)
+ self._compare_access_group(want=want, have=have)
+
+ def _compare_lists(self, want, have):
+ keys = ["authentication_keys", "peers", "servers", "trusted_keys"]
+ for x in keys:
+ wantx = want.get(x, {})
+ havex = have.get(x, {})
+
+ for wkey, wentry in iteritems(wantx):
+ hentry = havex.pop(wkey, {})
+
+ # pop aliased keys to preserve idempotence
+ if x in ["peers", "servers"]:
+ wentry.pop("use_vrf", None)
+
+ if wentry != hentry:
+ if x in keys[1:3] and self.state in [
+ "overridden",
+ "replaced",
+ ]:
+ # remove existing config else it gets appeneded
+ self.addcmd(hentry, x, negate=True)
+ self.addcmd(wentry, x)
+
+ # remove superfluos config
+ for _hkey, hentry in iteritems(havex):
+ self.addcmd(hentry, x, negate=True)
+
+ def _compare_access_group(self, want, have):
+ want_ag = want.get("access_group", {})
+ have_ag = have.get("access_group", {})
+
+ for x in ["peer", "query_only", "serve", "serve_only"]:
+ wx = want_ag.get(x, {})
+ hx = have_ag.get(x, {})
+
+ for wkey, wentry in iteritems(wx):
+ hentry = hx.pop(wkey, {})
+ if wentry != hentry:
+ self.addcmd(wentry, x)
+
+ # remove superfluos config
+ for hentry in hx.values():
+ self.addcmd(hentry, x, negate=True)
+
+ def _ntp_list_to_dict(self, data):
+ """Convert all list to dicts to dicts
+ of dicts
+ """
+ tmp = deepcopy(data)
+ if "access_group" in tmp:
+ for x in ["peer", "query_only", "serve", "serve_only"]:
+ if x in tmp["access_group"]:
+ tmp["access_group"][x] = {i["access_list"]: i for i in tmp["access_group"][x]}
+ pkey = {
+ "authentication_keys": "id",
+ "peers": "peer",
+ "servers": "server",
+ "trusted_keys": "key_id",
+ }
+ for k in pkey.keys():
+ if k in tmp:
+ tmp[k] = {i[pkey[k]]: i for i in tmp[k]}
+ return tmp
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/ospf_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/ospf_interfaces.py
new file mode 100644
index 00000000..0a1bc580
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospf_interfaces/ospf_interfaces.py
@@ -0,0 +1,204 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_ospf_interfaces config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospf_interfaces import (
+ Ospf_interfacesTemplate,
+)
+
+
+class Ospf_interfaces(ResourceModule):
+ """
+ The nxos_ospf_interfaces config class
+ """
+
+ def __init__(self, module):
+ super(Ospf_interfaces, self).__init__(
+ empty_fact_val=[],
+ facts_module=Facts(module),
+ module=module,
+ resource="ospf_interfaces",
+ tmplt=Ospf_interfacesTemplate(),
+ )
+ self.parsers = [
+ "authentication",
+ "authentication_key",
+ "message_digest_key",
+ "cost",
+ "dead_interval",
+ "hello_interval",
+ "instance",
+ "mtu_ignore",
+ "network",
+ "passive_interface",
+ "priority",
+ "retransmit_interval",
+ "shutdown",
+ "transmit_delay",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = {entry["name"]: entry for entry in self.want}
+ haved = {entry["name"]: entry for entry in self.have}
+
+ # turn all lists of dicts into dicts prior to merge
+ for entry in wantd, haved:
+ self._list_to_dict(entry)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ haved = {k: v for k, v in iteritems(haved) if k in wantd or not wantd}
+ wantd = {}
+
+ # remove superfluous config for overridden and deleted
+ if self.state in ["overridden", "deleted"]:
+ for k, have in iteritems(haved):
+ if k not in wantd:
+ self._compare(want={}, have=have)
+
+ for k, want in iteritems(wantd):
+ self._compare(want=want, have=haved.pop(k, {}))
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Ospf_interfaces network resource.
+ """
+ begin = len(self.commands)
+ self._compare_ospf_interfaces(want, have)
+ if len(self.commands) != begin:
+ self.commands.insert(begin, self._tmplt.render(want or have, "interface", False))
+
+ def _compare_ospf_interfaces(self, want, have):
+ waf = want.get("address_family", {})
+ haf = have.get("address_family", {})
+
+ for afi in ("ipv4", "ipv6"):
+ witem = waf.pop(afi, {})
+ hitem = haf.pop(afi, {})
+
+ # this key needs to be compared separately and
+ # popped from `authentication` dict to
+ # preserve idempotence for other keys in this dict
+ self.compare(["authentication.key_chain"], want=witem, have=hitem)
+ witem.get("authentication", {}).pop("key_chain", None)
+ hitem.get("authentication", {}).pop("key_chain", None)
+
+ # this ensures that the "no" form of "ip ospf passive-interface"
+ # command is executed even when there is no existing config
+ if witem.get("passive_interface") is False and "passive_interface" not in hitem:
+ hitem["passive_interface"] = True
+
+ if "passive_interface" in hitem and witem.get("default_passive_interface"):
+ self.commands.append(self._generate_passive_intf(witem))
+
+ self.compare(parsers=self.parsers, want=witem, have=hitem)
+
+ # compare top-level `multi_areas` config
+ for area in witem.get("multi_areas", []):
+ if area not in hitem.get("multi_areas", []):
+ self.addcmd({"afi": afi, "area": area}, "multi_areas", negate=False)
+ # remove superfluous top-level `multi_areas` config
+ for area in hitem.get("multi_areas", []):
+ if area not in witem.get("multi_areas", []):
+ self.addcmd({"afi": afi, "area": area}, "multi_areas", negate=True)
+
+ # compare config->address_family->processes
+ self._compare_processes(afi, witem.get("processes", {}), hitem.get("processes", {}))
+
+ def _compare_processes(self, afi, want, have):
+ # add and update config->address_family->processes
+
+ for w_id, wproc in want.items():
+ hproc = have.pop(w_id, {})
+ hproc["afi"] = wproc["afi"] = afi
+
+ # compare config->address_family->processes->area
+ self.compare(["area"], wproc, hproc)
+
+ # compare config->address_family->processes->multi_areas
+ marea_dict = {"afi": afi, "process_id": wproc["process_id"]}
+ for area in wproc.get("multi_areas", []):
+ if area not in hproc.get("multi_areas", []):
+ marea_dict["area"] = area
+ self.addcmd(marea_dict, "processes_multi_areas", negate=False)
+ # remove superfluous processes->multi_areas config
+ for area in hproc.get("multi_areas", []):
+ if area not in wproc.get("multi_areas", []):
+ marea_dict["area"] = area
+ self.addcmd(marea_dict, "processes_multi_areas", negate=True)
+
+ # remove superflous config->address_family->processes
+ for hproc in have.values():
+ hproc["afi"] = afi
+
+ # remove config->address_family->processes->area
+ self.addcmd(hproc, "area", negate=True)
+
+ # remove superfluous processes->multi_areas config
+ marea_dict = {"afi": afi, "process_id": hproc["process_id"]}
+ for area in hproc.get("multi_areas", []):
+ marea_dict["area"] = area
+ self.addcmd(marea_dict, "processes_multi_areas", negate=True)
+
+ def _list_to_dict(self, entry):
+ for item in entry.values():
+ for ag in item.get("address_family", []):
+ ag["processes"] = {
+ subentry["process_id"]: subentry for subentry in ag.get("processes", [])
+ }
+ item["address_family"] = {
+ subentry["afi"]: subentry for subentry in item.get("address_family", [])
+ }
+
+ def _generate_passive_intf(self, data):
+ cmd = "default "
+ if data["afi"] == "ipv4":
+ cmd += "ip ospf passive-interface"
+ else:
+ cmd += "ospfv3 passive-interface"
+ return cmd
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/ospfv2.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/ospfv2.py
new file mode 100644
index 00000000..8a6f42e7
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv2/ospfv2.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_ospfv2 class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ get_from_dict,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospfv2 import (
+ Ospfv2Template,
+)
+
+
+class Ospfv2(ResourceModule):
+ """
+ The nxos_ospfv2 class
+ """
+
+ def __init__(self, module):
+ super(Ospfv2, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="ospfv2",
+ tmplt=Ospfv2Template(),
+ )
+ self.parsers = [
+ "router_id",
+ "auto_cost",
+ "graceful_restart.set",
+ "graceful_restart.helper_disable",
+ "isolate",
+ "log_adjacency_changes",
+ "max_lsa",
+ "mpls.traffic_eng.router_id",
+ "mpls.traffic_eng.multicast_intact",
+ "name_lookup",
+ "passive_interface.default",
+ "rfc1583compatibility",
+ "shutdown",
+ "default_information.originate",
+ "default_metric",
+ "distance",
+ "table_map",
+ "timers.lsa_arrival",
+ "timers.lsa_group_pacing",
+ "timers.throttle.lsa",
+ "timers.throttle.spf",
+ "maximum_paths",
+ "max_metric",
+ "down_bit_ignore",
+ "capability.vrf_lite",
+ "bfd",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.gen_config()
+ self.run_commands()
+ return self.result
+
+ def gen_config(self):
+ """Select the appropriate function based on the state provided
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ wantd = {(entry["process_id"]): entry for entry in self.want.get("processes", [])}
+ haved = {(entry["process_id"]): entry for entry in self.have.get("processes", [])}
+
+ # turn all lists of dicts into dicts prior to merge
+ for entry in wantd, haved:
+ self._ospf_list_to_dict(entry)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ haved = {k: v for k, v in iteritems(haved) if k in wantd or not wantd}
+ wantd = {}
+
+ # if state is overridden, first remove processes that are in have but not in want
+ if self.state in ["overridden", "deleted"]:
+ for k, have in iteritems(haved):
+ if k not in wantd:
+ self.addcmd(have, "process_id", True)
+
+ for k, want in iteritems(wantd):
+ self._compare(want=want, have=haved.pop(k, {}))
+
+ def _compare(self, want, have):
+ begin = len(self.commands)
+ self.compare(self.parsers, want=want, have=have)
+ self._compare_lists(want=want, have=have)
+ self._areas_compare(want=want, have=have)
+ self._vrfs_compare(want=want, have=have)
+
+ if len(self.commands) != begin or (not have and want):
+ self.commands.insert(
+ begin,
+ self._tmplt.render(
+ want or have,
+ "vrf" if "vrf" in (want.keys() or have.keys()) else "process_id",
+ False,
+ ),
+ )
+
+ def _areas_compare(self, want, have):
+ wareas = want.get("areas", {})
+ hareas = have.get("areas", {})
+ for name, entry in iteritems(wareas):
+ self._area_compare(want=entry, have=hareas.pop(name, {}))
+ for name, entry in iteritems(hareas):
+ self._area_compare(want={}, have=entry)
+
+ def _area_compare(self, want, have):
+ parsers = [
+ "area.default_cost",
+ "area.authentication",
+ "area.nssa",
+ "area.nssa.translate",
+ "area.stub",
+ ]
+ self.compare(parsers=parsers, want=want, have=have)
+ self._area_compare_lists(want=want, have=have)
+
+ def _area_compare_lists(self, want, have):
+ for attrib in ["filter_list", "ranges"]:
+ wdict = want.get(attrib, {})
+ hdict = have.get(attrib, {})
+ for key, entry in iteritems(wdict):
+ if entry != hdict.pop(key, {}):
+ entry["area_id"] = want["area_id"]
+ self.addcmd(entry, "area.{0}".format(attrib), False)
+ # remove remaining items in have for replaced
+ for entry in hdict.values():
+ entry["area_id"] = have["area_id"]
+ self.addcmd(entry, "area.{0}".format(attrib), True)
+
+ def _compare_lists(self, want, have):
+ for attrib in [
+ "summary_address",
+ "redistribute",
+ "mpls.traffic_eng.areas",
+ ]:
+ wdict = get_from_dict(want, attrib) or {}
+ hdict = get_from_dict(have, attrib) or {}
+
+ for key, entry in iteritems(wdict):
+ if entry != hdict.pop(key, {}):
+ self.addcmd(entry, attrib, False)
+ # remove remaining items in have for replaced
+ for entry in hdict.values():
+ self.addcmd(entry, attrib, True)
+
+ def _vrfs_compare(self, want, have):
+ wvrfs = want.get("vrfs", {})
+ hvrfs = have.get("vrfs", {})
+ for name, entry in iteritems(wvrfs):
+ self._compare(want=entry, have=hvrfs.pop(name, {}))
+ # remove remaining items in have for replaced
+ for name, entry in iteritems(hvrfs):
+ self.addcmd(entry, "vrf", True)
+
+ def _ospf_list_to_dict(self, entry):
+ for _pid, proc in iteritems(entry):
+ for area in proc.get("areas", []):
+ area["ranges"] = {entry["prefix"]: entry for entry in area.get("ranges", [])}
+ area["filter_list"] = {
+ entry["direction"]: entry for entry in area.get("filter_list", [])
+ }
+ mpls_areas = {
+ entry["area_id"]: entry
+ for entry in proc.get("mpls", {}).get("traffic_eng", {}).get("areas", [])
+ }
+ if mpls_areas:
+ proc["mpls"]["traffic_eng"]["areas"] = mpls_areas
+ proc["areas"] = {entry["area_id"]: entry for entry in proc.get("areas", [])}
+ proc["summary_address"] = {
+ entry["prefix"]: entry for entry in proc.get("summary_address", [])
+ }
+ proc["redistribute"] = {
+ (entry.get("id"), entry["protocol"]): entry
+ for entry in proc.get("redistribute", [])
+ }
+ if "vrfs" in proc:
+ proc["vrfs"] = {entry["vrf"]: entry for entry in proc.get("vrfs", [])}
+ self._ospf_list_to_dict(proc["vrfs"])
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/ospfv3.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/ospfv3.py
new file mode 100644
index 00000000..e60fc31a
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/ospfv3/ospfv3.py
@@ -0,0 +1,230 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_ospfv3 config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ get_from_dict,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospfv3 import (
+ Ospfv3Template,
+)
+
+
+class Ospfv3(ResourceModule):
+ """
+ The nxos_ospfv3 config class
+ """
+
+ def __init__(self, module):
+ super(Ospfv3, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="ospfv3",
+ tmplt=Ospfv3Template(),
+ )
+ self.parsers = [
+ "auto_cost",
+ "flush_routes",
+ "graceful_restart.set",
+ "graceful_restart.helper_disable",
+ "graceful_restart.grace_period",
+ "graceful_restart.planned_only",
+ "isolate",
+ "log_adjacency_changes",
+ "max_lsa",
+ "max_metric",
+ "name_lookup",
+ "passive_interface.default",
+ "router_id",
+ "shutdown",
+ "timers.lsa_arrival",
+ "timers.lsa_group_pacing",
+ "timers.throttle.lsa",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = {(entry["process_id"]): entry for entry in self.want.get("processes", [])}
+ haved = {(entry["process_id"]): entry for entry in self.have.get("processes", [])}
+
+ # turn all lists of dicts into dicts prior to merge
+ for entry in wantd, haved:
+ self._ospfv3_list_to_dict(entry)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ haved = {k: v for k, v in iteritems(haved) if k in wantd or not wantd}
+ wantd = {}
+
+ # if state is overridden, first remove processes that are in have but not in want
+ if self.state in ["overridden", "deleted"]:
+ for k, have in iteritems(haved):
+ if k not in wantd:
+ self.addcmd(have, "process_id", True)
+
+ for k, want in iteritems(wantd):
+ self._compare(want=want, have=haved.pop(k, {}))
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Ospfv3 network resource.
+ """
+ begin = len(self.commands)
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._areas_compare(want=want, have=have)
+ self._vrfs_compare(want=want, have=have)
+ self._af_compare(want=want, have=have)
+
+ if len(self.commands) != begin or (not have and want):
+ self.commands.insert(
+ begin,
+ self._tmplt.render(
+ want or have,
+ "vrf" if "vrf" in (want.keys() or have.keys()) else "process_id",
+ False,
+ ),
+ )
+
+ def _areas_compare(self, want, have):
+ wareas = want.get("areas", {})
+ hareas = have.get("areas", {})
+ for name, entry in iteritems(wareas):
+ self._area_compare(want=entry, have=hareas.pop(name, {}))
+ for name, entry in iteritems(hareas):
+ self._area_compare(want={}, have=entry)
+
+ def _area_compare(self, want, have):
+ parsers = ["area.nssa", "area.nssa.translate", "area.stub"]
+ self.compare(parsers=parsers, want=want, have=have)
+
+ def _vrfs_compare(self, want, have):
+ wvrfs = want.get("vrfs", {})
+ hvrfs = have.get("vrfs", {})
+ for name, entry in iteritems(wvrfs):
+ self._compare(want=entry, have=hvrfs.pop(name, {}))
+ # remove remaining items in have for replaced
+ for name, entry in iteritems(hvrfs):
+ self.addcmd(entry, "vrf", True)
+
+ def _af_compare(self, want, have):
+ parsers = [
+ "default_information.originate",
+ "distance",
+ "maximum_paths",
+ "table_map",
+ "timers.throttle.spf",
+ ]
+ waf = want.get("address_family", {})
+ haf = have.get("address_family", {})
+
+ cmd_ptr = len(self.commands)
+
+ self._af_areas_compare(want=waf, have=haf)
+ self._af_compare_lists(want=waf, have=haf)
+ self.compare(parsers=parsers, want=waf, have=haf)
+
+ cmd_ptr_nxt = len(self.commands)
+ if cmd_ptr < cmd_ptr_nxt:
+ self.commands.insert(cmd_ptr, "address-family ipv6 unicast")
+
+ def _af_areas_compare(self, want, have):
+ wareas = want.get("areas", {})
+ hareas = have.get("areas", {})
+ for name, entry in iteritems(wareas):
+ self._af_area_compare(want=entry, have=hareas.pop(name, {}))
+ for name, entry in iteritems(hareas):
+ self._af_area_compare(want={}, have=entry)
+
+ def _af_area_compare(self, want, have):
+ self.compare(parsers=["area.default_cost"], want=want, have=have)
+ self._af_area_compare_lists(want=want, have=have)
+
+ def _af_area_compare_lists(self, want, have):
+ for attrib in ["filter_list", "ranges"]:
+ wdict = want.get(attrib, {})
+ hdict = have.get(attrib, {})
+ for key, entry in iteritems(wdict):
+ if entry != hdict.pop(key, {}):
+ entry["area_id"] = want["area_id"]
+ self.addcmd(entry, "area.{0}".format(attrib), False)
+ # remove remaining items in have for replaced
+ for entry in hdict.values():
+ entry["area_id"] = have["area_id"]
+ self.addcmd(entry, "area.{0}".format(attrib), True)
+
+ def _af_compare_lists(self, want, have):
+ for attrib in ["summary_address", "redistribute"]:
+ wdict = get_from_dict(want, attrib) or {}
+ hdict = get_from_dict(have, attrib) or {}
+
+ for key, entry in iteritems(wdict):
+ if entry != hdict.pop(key, {}):
+ self.addcmd(entry, attrib, False)
+ # remove remaining items in have for replaced
+ for entry in hdict.values():
+ self.addcmd(entry, attrib, True)
+
+ def _ospfv3_list_to_dict(self, entry):
+ for _pid, proc in iteritems(entry):
+ proc["areas"] = {entry["area_id"]: entry for entry in proc.get("areas", [])}
+ af = proc.get("address_family")
+ if af:
+ for area in af.get("areas", []):
+ area["ranges"] = {entry["prefix"]: entry for entry in area.get("ranges", [])}
+ area["filter_list"] = {
+ entry["direction"]: entry for entry in area.get("filter_list", [])
+ }
+ af["areas"] = {entry["area_id"]: entry for entry in af.get("areas", [])}
+ af["summary_address"] = {
+ entry["prefix"]: entry for entry in af.get("summary_address", [])
+ }
+ af["redistribute"] = {
+ (entry.get("id"), entry["protocol"]): entry
+ for entry in af.get("redistribute", [])
+ }
+ if "vrfs" in proc:
+ proc["vrfs"] = {entry["vrf"]: entry for entry in proc.get("vrfs", [])}
+ self._ospfv3_list_to_dict(proc["vrfs"])
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/prefix_lists.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/prefix_lists.py
new file mode 100644
index 00000000..2c6bb814
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/prefix_lists/prefix_lists.py
@@ -0,0 +1,146 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_prefix_lists config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.prefix_lists import (
+ Prefix_listsTemplate,
+)
+
+
+class Prefix_lists(ResourceModule):
+ """
+ The nxos_prefix_lists config class
+ """
+
+ def __init__(self, module):
+ super(Prefix_lists, self).__init__(
+ empty_fact_val=[],
+ facts_module=Facts(module),
+ module=module,
+ resource="prefix_lists",
+ tmplt=Prefix_listsTemplate(),
+ )
+ self.parsers = []
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = {entry["afi"]: entry for entry in self.want}
+ haved = {entry["afi"]: entry for entry in self.have}
+
+ self._prefix_list_transform(wantd)
+ self._prefix_list_transform(haved)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ haved = {k: v for k, v in iteritems(haved) if k in wantd or not wantd}
+ for key, hvalue in iteritems(haved):
+ wvalue = wantd.pop(key, {})
+ if wvalue:
+ wplists = wvalue.get("prefix_lists", {})
+ hplists = hvalue.get("prefix_lists", {})
+ hvalue["prefix_lists"] = {
+ k: v for k, v in iteritems(hplists) if k in wplists or not wplists
+ }
+
+ # remove superfluous config for overridden and deleted
+ if self.state in ["overridden", "deleted"]:
+ for k, have in iteritems(haved):
+ if k not in wantd:
+ self._compare(want={}, have=have)
+
+ for k, want in iteritems(wantd):
+ self._compare(want=want, have=haved.pop(k, {}))
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Prefix_lists network resource.
+ """
+ wplists = want.get("prefix_lists", {})
+ hplists = have.get("prefix_lists", {})
+ for wk, wentry in iteritems(wplists):
+ hentry = hplists.pop(wk, {})
+ self.compare(["description"], want=wentry, have=hentry)
+ # compare sequences
+ self._compare_seqs(wentry.pop("entries", {}), hentry.pop("entries", {}))
+
+ if self.state in ["overridden", "deleted"]:
+ # remove remaining prefix lists
+ for h in hplists.values():
+ self.commands.append(
+ "no {0} prefix-list {1}".format(h["afi"].replace("ipv4", "ip"), h["name"]),
+ )
+
+ def _compare_seqs(self, want, have):
+ for wseq, wentry in iteritems(want):
+ hentry = have.pop(wseq, {})
+ if hentry != wentry:
+ if hentry:
+ if self.state == "merged":
+ self._module.fail_json(
+ msg="Cannot update existing sequence {0} of prefix list {1} with state merged."
+ " Please use state replaced or overridden.".format(
+ hentry["sequence"],
+ hentry["name"],
+ ),
+ )
+ else:
+ self.addcmd(hentry, "entry", negate=True)
+ self.addcmd(wentry, "entry")
+ # remove remaining entries from have prefix list
+ for hseq in have.values():
+ self.addcmd(hseq, "entry", negate=True)
+
+ def _prefix_list_transform(self, entry):
+ for afi, value in iteritems(entry):
+ if "prefix_lists" in value:
+ for plist in value["prefix_lists"]:
+ plist.update({"afi": afi})
+ if "entries" in plist:
+ for seq in plist["entries"]:
+ seq.update({"afi": afi, "name": plist["name"]})
+ plist["entries"] = {x["sequence"]: x for x in plist["entries"]}
+ value["prefix_lists"] = {entry["name"]: entry for entry in value["prefix_lists"]}
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/route_maps.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/route_maps.py
new file mode 100644
index 00000000..b098b065
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/route_maps/route_maps.py
@@ -0,0 +1,192 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_route_maps config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ get_from_dict,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.route_maps import (
+ Route_mapsTemplate,
+)
+
+
+class Route_maps(ResourceModule):
+ """
+ The nxos_route_maps config class
+ """
+
+ def __init__(self, module):
+ super(Route_maps, self).__init__(
+ empty_fact_val=[],
+ facts_module=Facts(module),
+ module=module,
+ resource="route_maps",
+ tmplt=Route_mapsTemplate(),
+ )
+ self.linear_parsers = [
+ "description",
+ "continue_sequence",
+ "set.as_path.prepend.last_as",
+ "set.as_path.tag",
+ "set.comm_list",
+ "set.dampening",
+ "set.extcomm_list",
+ "set.forwarding_address",
+ "set.null_interface",
+ "set.ip.address.prefix_list",
+ "set.ip.precedence",
+ "set.ipv6.address.prefix_list",
+ "set.ipv6.precedence",
+ "set.label_index",
+ "set.level",
+ "set.local_preference",
+ "set.metric",
+ "set.metric_type",
+ "set.nssa_only",
+ "set.origin",
+ "set.path_selection",
+ "set.tag",
+ "set.weight",
+ ]
+ self.complex_parsers = [
+ "match.as_number.asn",
+ "match.as_number.as_path_list",
+ "match.as_path",
+ "match.community.community_list",
+ "match.evpn.route_types",
+ "match.extcommunity.extcommunity_list",
+ "match.interfaces",
+ "match.ip.address.access_list",
+ "match.ip.address.prefix_lists",
+ "match.ip.multicast",
+ "match.ip.next_hop.prefix_lists",
+ "match.ip.route_source.prefix_lists",
+ "match.ipv6.address.access_list",
+ "match.ipv6.address.prefix_lists",
+ "match.ipv6.multicast",
+ "match.ipv6.next_hop.prefix_lists",
+ "match.ipv6.route_source.prefix_lists",
+ "match.mac_list",
+ "match.metric",
+ "match.ospf_area",
+ "match.route_types",
+ "match.source_protocol",
+ "match.tags",
+ "set.as_path.prepend.as_number",
+ "set.distance",
+ "set.evpn.gateway_ip",
+ "set.community",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = self._route_maps_list_to_dict(self.want)
+ haved = self._route_maps_list_to_dict(self.have)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ haved = {k: v for k, v in iteritems(haved) if k in wantd or not wantd}
+ wantd = {}
+
+ # remove superfluous config for overridden and deleted
+ if self.state in ["overridden", "deleted"]:
+ for k, have in iteritems(haved):
+ if k not in wantd:
+ for _hk, hentry in iteritems(have.get("entries", {})):
+ self.commands.append(self._tmplt.render(hentry, "route_map", True))
+
+ for wk, want in iteritems(wantd):
+ self._compare(want=want, have=haved.pop(wk, {}))
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Route_maps network resource.
+ """
+ w_entries = want.get("entries", {})
+ h_entries = have.get("entries", {})
+ self._compare_entries(want=w_entries, have=h_entries)
+
+ def _compare_entries(self, want, have):
+ for wk, wentry in iteritems(want):
+ hentry = have.pop(wk, {})
+ begin = len(self.commands)
+
+ self._compare_lists(wentry, hentry)
+ self.compare(parsers=self.linear_parsers, want=wentry, have=hentry)
+
+ if len(self.commands) != begin:
+ self.commands.insert(begin, self._tmplt.render(wentry, "route_map", False))
+ # remove superfluos entries from have
+ for _hk, hentry in iteritems(have):
+ self.commands.append(self._tmplt.render(hentry, "route_map", True))
+
+ def _compare_lists(self, want, have):
+ for x in self.complex_parsers:
+ wx = get_from_dict(want, x) or []
+ hx = get_from_dict(have, x) or []
+
+ if isinstance(wx, list):
+ wx = set(wx)
+ if isinstance(hx, list):
+ hx = set(hx)
+
+ if wx != hx:
+ # negate existing config so that want is not appended
+ # in case of replaced or overridden
+ if self.state in ["replaced", "overridden"] and hx:
+ self.addcmd(have, x, negate=True)
+ self.addcmd(want, x)
+
+ def _route_maps_list_to_dict(self, entry):
+ entry = {x["route_map"]: x for x in entry}
+ for rmap, data in iteritems(entry):
+ if "entries" in data:
+ for x in data["entries"]:
+ x.update({"route_map": rmap})
+ data["entries"] = {
+ (rmap, entry["action"], entry.get("sequence")): entry
+ for entry in data["entries"]
+ }
+ return entry
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/snmp_server.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/snmp_server.py
new file mode 100644
index 00000000..472e7285
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/snmp_server/snmp_server.py
@@ -0,0 +1,243 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos_snmp_server config file.
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to its desired end-state is
+created.
+"""
+
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
+ ResourceModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_merge,
+ get_from_dict,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.snmp_server import (
+ Snmp_serverTemplate,
+)
+
+
+class Snmp_server(ResourceModule):
+ """
+ The nxos_snmp_server config class
+ """
+
+ def __init__(self, module):
+ super(Snmp_server, self).__init__(
+ empty_fact_val={},
+ facts_module=Facts(module),
+ module=module,
+ resource="snmp_server",
+ tmplt=Snmp_serverTemplate(),
+ )
+ self.parsers = [
+ "aaa_user.cache_timeout",
+ "contact",
+ "context",
+ "counter.enable",
+ "counter.cache.timeout",
+ "drop.unknown_engine_id",
+ "drop.unknown_user",
+ "traps.aaa",
+ "traps.bgp",
+ "traps.bridge.newroot",
+ "traps.bridge.topologychange",
+ "traps.callhome.event_notify",
+ "traps.callhome.smtp_send_fail",
+ "traps.cfs.merge_failure",
+ "traps.cfs.state_change_notif",
+ "traps.config.ccmCLIRunningConfigChanged",
+ "traps.entity.cefcMIBEnableStatusNotification",
+ "traps.entity.entity_fan_status_change",
+ "traps.entity.entity_mib_change",
+ "traps.entity.entity_module_inserted",
+ "traps.entity.entity_module_status_change",
+ "traps.entity.entity_power_out_change",
+ "traps.entity.entity_power_status_change",
+ "traps.entity.entity_sensor",
+ "traps.entity.entity_unrecognised_module",
+ "traps.feature_control.featureOpStatusChange",
+ "traps.feature_control.ciscoFeatOpStatusChange",
+ "traps.generic.coldStart",
+ "traps.generic.warmStart",
+ "traps.license.notify_license_expiry",
+ "traps.license.notify_license_expiry_warning",
+ "traps.license.notify_licensefile_missing",
+ "traps.license.notify_no_license_for_feature",
+ "traps.link.cErrDisableInterfaceEventRev1",
+ "traps.link.cieLinkDown",
+ "traps.link.cieLinkUp",
+ "traps.link.cisco_xcvr_mon_status_chg",
+ "traps.link.cmn_mac_move_notification",
+ "traps.link.delayed_link_state_change",
+ "traps.link.extended_linkDown",
+ "traps.link.extended_linkUp",
+ "traps.link.linkDown",
+ "traps.link.linkUp",
+ "traps.mmode.cseMaintModeChangeNotify",
+ "traps.mmode.cseNormalModeChangeNotify",
+ "traps.ospf",
+ "traps.ospfv3",
+ "traps.rf.redundancy_framework",
+ "traps.rmon.fallingAlarm",
+ "traps.rmon.hcFallingAlarm",
+ "traps.rmon.hcRisingAlarm",
+ "traps.rmon.risingAlarm",
+ "traps.snmp.authentication",
+ "traps.storm_control.cpscEventRev1",
+ "traps.storm_control.trap_rate",
+ "traps.stpx.inconsistency",
+ "traps.stpx.root_inconsistency",
+ "traps.stpx.loop_inconsistency",
+ "traps.syslog.message_generated",
+ "traps.sysmgr.cseFailSwCoreNotifyExtended",
+ "traps.system.clock_change_notification",
+ "traps.upgrade.upgradeJobStatusNotify",
+ "traps.upgrade.upgradeOpNotifyOnCompletion",
+ "traps.vtp.notifs",
+ "traps.vtp.vlancreate",
+ "traps.vtp.vlandelete",
+ "engine_id.local",
+ "global_enforce_priv",
+ "location",
+ "mib.community_map",
+ "packetsize",
+ "protocol.enable",
+ "source_interface.informs",
+ "source_interface.traps",
+ "system_shutdown",
+ "tcp_session",
+ ]
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ if self.state not in ["parsed", "gathered"]:
+ self.generate_commands()
+ self.run_commands()
+ return self.result
+
+ def generate_commands(self):
+ """Generate configuration commands to send based on
+ want, have and desired state.
+ """
+ wantd = self._list_to_dict(self.want)
+ haved = self._list_to_dict(self.have)
+
+ # if state is merged, merge want onto have and then compare
+ if self.state == "merged":
+ wantd = dict_merge(haved, wantd)
+
+ # this ensures that if user sets `enable: True` for a trap
+ # all suboptions for that trap are set to True
+ for x in [
+ "traps.aaa",
+ "traps.bridge",
+ "traps.callhome",
+ "traps.cfs",
+ "traps.config",
+ "traps.entity",
+ "traps.feature_control",
+ "traps.generic",
+ "traps.license",
+ "traps.link",
+ "traps.mmode",
+ "traps.rf",
+ "traps.rmon",
+ "traps.snmp",
+ "traps.storm_control",
+ "traps.stpx",
+ "traps.syslog",
+ "traps.sysmgr",
+ "traps.system",
+ "traps.upgrade",
+ "traps.vtp",
+ ]:
+ entry = get_from_dict(wantd, x)
+ if entry and entry.get("enable", False):
+ key = x.split(".")
+ wantd[key[0]][key[1]].pop("enable")
+ for i in self.parsers:
+ if i.startswith(x):
+ key = i.split(".")
+ wantd[key[0]][key[1]][key[2]] = True
+
+ # if state is deleted, empty out wantd and set haved to wantd
+ if self.state == "deleted":
+ wantd = {}
+
+ self._compare(want=wantd, have=haved)
+
+ def _compare(self, want, have):
+ """Leverages the base class `compare()` method and
+ populates the list of commands to be run by comparing
+ the `want` and `have` data with the `parsers` defined
+ for the Snmp_server network resource.
+ """
+ self.compare(parsers=self.parsers, want=want, have=have)
+ self._compare_lists(want=want, have=have)
+
+ def _compare_lists(self, want, have):
+ """
+ Compare list of dictionaries
+ """
+ for x in ["users.auth", "users.use_acls", "hosts", "communities"]:
+ wantx = get_from_dict(want, x) or {}
+ havex = get_from_dict(have, x) or {}
+ for wkey, wentry in iteritems(wantx):
+ hentry = havex.pop(wkey, {})
+ if wentry != hentry:
+ self.addcmd(wentry, x)
+ # remove superfluous items
+ for _k, hv in iteritems(havex):
+ self.addcmd(hv, x, negate=True)
+
+ def _list_to_dict(self, data):
+ def _build_key(x):
+ key = set()
+ for k, v in iteritems(x):
+ if isinstance(v, dict):
+ for sk, sv in iteritems(v):
+ if isinstance(sv, dict):
+ for ssk, ssv in iteritems(sv):
+ key.add(sk + "_" + ssk + "_" + str(ssv))
+ else:
+ key.add(sk + "_" + str(sv))
+ else:
+ key.add(k + "_" + str(v))
+ return tuple(sorted(key))
+
+ tmp = deepcopy(data)
+ if "communities" in tmp:
+ tmp["communities"] = {_build_key(entry): entry for entry in tmp["communities"]}
+ if "users" in tmp:
+ if "auth" in tmp["users"]:
+ tmp["users"]["auth"] = {_build_key(entry): entry for entry in tmp["users"]["auth"]}
+ if "use_acls" in tmp["users"]:
+ tmp["users"]["use_acls"] = {
+ entry["user"]: entry for entry in tmp["users"]["use_acls"]
+ }
+ if "hosts" in tmp:
+ tmp["hosts"] = {_build_key(entry): entry for entry in tmp["hosts"]}
+ return tmp
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/static_routes.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/static_routes.py
new file mode 100644
index 00000000..721ff82c
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/static_routes/static_routes.py
@@ -0,0 +1,567 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_static_routes class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ normalize_interface,
+ search_obj_in_list,
+)
+
+
+class Static_routes(ConfigBase):
+ """
+ The nxos_xstatic_routes class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["static_routes"]
+
+ def __init__(self, module):
+ super(Static_routes, self).__init__(module)
+
+ def get_static_routes_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ static_routes_facts = facts["ansible_network_resources"].get("static_routes")
+ if not static_routes_facts:
+ return []
+
+ return static_routes_facts
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = list()
+ commands = list()
+ state = self._module.params["state"]
+ action_states = ["merged", "replaced", "deleted", "overridden"]
+
+ if state == "gathered":
+ result["gathered"] = self.get_static_routes_facts()
+ elif state == "rendered":
+ result["rendered"] = self.set_config({})
+ # no need to fetch facts for rendered
+ elif state == "parsed":
+ result["parsed"] = self.set_config({})
+ # no need to fetch facts for parsed
+ else:
+ existing_static_routes_facts = self.get_static_routes_facts()
+ commands.extend(self.set_config(existing_static_routes_facts))
+ if commands and state in action_states:
+ if not self._module.check_mode:
+ self._connection.edit_config(commands)
+ result["changed"] = True
+ result["before"] = existing_static_routes_facts
+ result["commands"] = commands
+
+ changed_static_routes_facts = self.get_static_routes_facts()
+ if result["changed"]:
+ result["after"] = changed_static_routes_facts
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_static_routes_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = []
+ if config:
+ for w in config:
+ want.append(remove_empties(w))
+ have = existing_static_routes_facts
+ want = self.add_default_vrf(deepcopy(want))
+ have = self.add_default_vrf(deepcopy(have))
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ commands = []
+ if state == "overridden":
+ commands = self._state_overridden(want, have)
+ elif state == "deleted":
+ commands = self._state_deleted(want, have)
+ elif state == "rendered":
+ commands = self._state_rendered(want, have=[])
+ elif state == "parsed":
+ want = self._module.params["running_config"]
+ commands = self._state_parsed(want)
+ else:
+ for w in want:
+ if state == "merged":
+ commands.extend(self._state_merged(w, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_parsed(self, want):
+ return self.get_static_routes_facts(want)
+
+ def _state_rendered(self, want, have):
+ commands = []
+ for w in want:
+ commands.extend(self.set_commands(w, {}))
+ return commands
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ delete_commands = []
+ state = self._module.params["state"]
+ merged_commands = []
+ obj_in_have = search_obj_in_list(want["vrf"], have, "vrf")
+ # in replaced, we check if whatever in have is in want, unlike merged. This is because we need to apply deleted on have config
+ if obj_in_have and obj_in_have != {"vrf": "default"}:
+ want_afi_list = []
+ if "address_families" in want.keys():
+ want_afi_list = [w["afi"] for w in want["address_families"]]
+ if len(want_afi_list) > 0:
+ for h in obj_in_have["address_families"]:
+ if h["afi"] in want_afi_list:
+ want_afi = search_obj_in_list(h["afi"], want["address_families"], "afi")
+ want_dest_list = []
+ if "routes" in want_afi.keys():
+ want_dest_list = [w["dest"] for w in want_afi["routes"]]
+ if len(want_dest_list) > 0:
+ for ro in h["routes"]:
+ if ro["dest"] in want_dest_list:
+ want_dest = search_obj_in_list(
+ ro["dest"],
+ want_afi["routes"],
+ "dest",
+ )
+ want_next_hops = []
+ if "next_hops" in want_dest.keys():
+ want_next_hops = list(want_dest["next_hops"])
+ if len(want_next_hops) > 0:
+ for next_hop in ro["next_hops"]:
+ if next_hop not in want_next_hops:
+ # have's next hop not in want, so delete it
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": h["afi"],
+ "routes": [
+ {
+ "dest": ro["dest"],
+ "next_hops": [next_hop],
+ },
+ ],
+ },
+ ],
+ }
+ delete_commands.extend(
+ self.del_commands([delete_dict]),
+ )
+ else:
+ # want has no next_hops, so delete all next_hops under that dest
+ if state == "overridden":
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": h["afi"],
+ "routes": [
+ {
+ "dest": ro["dest"],
+ "next_hops": ro["next_hops"],
+ },
+ ],
+ },
+ ],
+ }
+ delete_commands.extend(self.del_commands([delete_dict]))
+ else:
+ if state == "overridden":
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": h["afi"],
+ "routes": [
+ {
+ "dest": ro["dest"],
+ "next_hops": ro["next_hops"],
+ },
+ ],
+ },
+ ],
+ }
+ delete_commands.extend(self.del_commands([delete_dict]))
+
+ else:
+ if (
+ state == "overridden"
+ ): # want has no 'routes' key, so delete all routes under that afi
+ if "routes" in h.keys():
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": h["afi"],
+ "routes": h["routes"],
+ },
+ ],
+ }
+ delete_commands.extend(self.del_commands([delete_dict]))
+ else:
+ if (
+ state == "overridden"
+ ): # want has 'vrf' key only. So delete all address families in it
+ delete_commands.extend(
+ self.del_commands(
+ [
+ {
+ "address_families": list(obj_in_have["address_families"]),
+ "vrf": obj_in_have["vrf"],
+ },
+ ],
+ ),
+ )
+ final_delete_commands = []
+ for d in delete_commands:
+ if d not in final_delete_commands:
+ final_delete_commands.append(d)
+ # if there are two afis, 'vrf context..' is added twice fom del_commands. The above code removes the redundant 'vrf context ..'
+ merged_commands = self.set_commands(want, have)
+ if merged_commands:
+ cmds = set(final_delete_commands).intersection(set(merged_commands))
+ for c in cmds:
+ merged_commands.remove(c)
+
+ # set_commands adds a 'vrf context..' line. The above code removes the redundant 'vrf context ..'
+ commands.extend(final_delete_commands)
+ commands.extend(merged_commands)
+ return commands
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ want_vrfs = [w["vrf"] for w in want]
+ for h in have:
+ if h["vrf"] not in want_vrfs and h["vrf"] != "management":
+ commands.extend(self._state_deleted([h], have))
+ for w in want:
+ commands.extend(self._state_replaced(w, have))
+ return commands
+
+ def _state_merged(self, want, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ return self.set_commands(want, have)
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ delete_dict = {}
+ obj_in_have = search_obj_in_list(w["vrf"], have, "vrf")
+ if obj_in_have:
+ if "address_families" in w.keys():
+ o1 = obj_in_have["address_families"]
+ afi_list = [o["afi"] for o in o1] # have's afi list
+ for w1 in w["address_families"]:
+ if w1["afi"] in afi_list:
+ o2 = search_obj_in_list(w1["afi"], o1, "afi")
+ state = self._module.params["state"]
+ if state != "deleted":
+ # Deleted scope is till afi only. Part below is for use by overridden state.
+ if "routes" in w1.keys():
+ for w2 in w1["routes"]:
+ o3 = search_obj_in_list(
+ w2["dest"],
+ o2["routes"],
+ "dest",
+ )
+ hops = []
+ if "next_hops" in w2.keys():
+ for nh in w2["next_hops"]:
+ if nh in o3["next_hops"]:
+ hops.append(nh)
+ else:
+ # if next hops not given
+ hops = o3["next_hops"]
+
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": w1["afi"],
+ "routes": [
+ {
+ "dest": w2["dest"],
+ "next_hops": hops,
+ },
+ ],
+ },
+ ],
+ }
+ commands.extend(self.del_commands([delete_dict]))
+ else:
+ # case when only afi given for delete
+ delete_dict = {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [
+ {
+ "afi": o2["afi"],
+ "routes": o2["routes"],
+ },
+ ],
+ }
+ commands.extend(self.del_commands([delete_dict]))
+ else:
+ commands.extend(
+ self.del_commands(
+ [
+ {
+ "vrf": obj_in_have["vrf"],
+ "address_families": [o2],
+ },
+ ],
+ ),
+ )
+ else:
+ # only vrf given to delete
+ commands.extend(self.del_commands([obj_in_have]))
+ else:
+ if have:
+ # delete everything
+ del_have = []
+ for h in have:
+ if h["vrf"] != "management": # protect management vrf
+ del_have.append(h)
+ commands = self.del_commands(del_have)
+
+ final_delete_commands = []
+ # del_commands might add 'vrf context..' twice for two routes in the same vrf. This removes it
+ for c in commands:
+ if c not in final_delete_commands:
+ final_delete_commands.append(c)
+ return final_delete_commands
+
+ def del_commands(self, have):
+ commands = []
+ for h in have:
+ if h != {"vrf": "default"}:
+ vrf = h["vrf"]
+ if "default" not in vrf:
+ commands.append("vrf context " + vrf)
+ else:
+ # Default static routes are configured in global context.
+ # "vrf context default" command throws error 9.X release onwards.
+ # Changing the context to global is achieved by "configure terminal"
+ commands.append("configure terminal")
+ for af in h["address_families"]:
+ for route in af["routes"]:
+ for next_hop in route["next_hops"]:
+ command = self.del_next_hop(af, route, next_hop)
+ commands.append(command.strip())
+ return commands
+
+ def del_next_hop(self, af, route, next_hop):
+ command = ""
+ if af["afi"] == "ipv4":
+ command = "no ip route " + route["dest"] + " " + self.add_commands(next_hop)
+ else:
+ command = "no ipv6 route " + route["dest"] + " " + self.add_commands(next_hop)
+ return command
+
+ def add_commands(self, want):
+ command = ""
+ params = want.keys()
+ pref = vrf = ip = intf = name = tag = track = ""
+ if "admin_distance" in params:
+ pref = str(want["admin_distance"]) + " "
+ if "track" in params:
+ track = "track " + str(want["track"]) + " "
+ if "dest_vrf" in params:
+ vrf = "vrf " + str(want["dest_vrf"]) + " "
+ if "forward_router_address" in params:
+ ip = want["forward_router_address"] + " "
+ if "interface" in params:
+ intf = normalize_interface(want["interface"]) + " "
+ if "null0" in intf:
+ ip = ""
+ intf = "null0 "
+ if "route_name" in params:
+ name = "name " + str(want["route_name"]) + " "
+ if "tag" in params:
+ tag = "tag " + str(want["tag"]) + " "
+ command = intf + ip + vrf + name + tag + track + pref
+ if intf != "Null0 " and ip == "":
+ self._module.fail_json(msg="forward_router_address error")
+ return command.strip()
+
+ def set_commands(self, want, have):
+ commands = []
+ h1 = h2 = h3 = {}
+ want = remove_empties(want)
+ vrf_list = []
+ if have:
+ vrf_list = [h["vrf"] for h in have]
+ if want["vrf"] in vrf_list and have != [{"vrf": "default"}]:
+ for x in have:
+ if x["vrf"] == want["vrf"]:
+ h1 = x # this has the 'have' dict with same vrf as want
+ if "address_families" in h1.keys():
+ afi_list = [h["afi"] for h in h1["address_families"]]
+ if "address_families" in want.keys():
+ for af in want["address_families"]:
+ if af["afi"] in afi_list:
+ for x in h1["address_families"]:
+ if x["afi"] == af["afi"]:
+ h2 = x # this has the have dict with same vrf and afi as want
+ dest_list = [h["dest"] for h in h2["routes"]]
+ if "routes" in af.keys():
+ for ro in af["routes"]:
+ if ro["dest"] in dest_list:
+ for x in h2["routes"]:
+ if x["dest"] == ro["dest"]:
+ h3 = x # this has the have dict with same vrf, afi and dest as want
+ next_hop_list = list(h3["next_hops"])
+ if "next_hops" in ro.keys():
+ for nh in ro["next_hops"]:
+ if "interface" in nh.keys():
+ nh["interface"] = normalize_interface(
+ nh["interface"],
+ )
+ if nh not in next_hop_list:
+ # no match for next hop in have
+ commands = self.set_next_hop(
+ want,
+ h2,
+ nh,
+ ro,
+ commands,
+ )
+ vrf_list.append(want["vrf"])
+ else:
+ # no match for dest
+ if "next_hops" in ro.keys():
+ for nh in ro["next_hops"]:
+ commands = self.set_next_hop(
+ want,
+ h2,
+ nh,
+ ro,
+ commands,
+ )
+ else:
+ # no match for afi
+ if "routes" in af.keys():
+ for ro in af["routes"]:
+ for nh in ro["next_hops"]:
+ commands = self.set_next_hop(want, af, nh, ro, commands)
+ else:
+ # no match for vrf
+ vrf_list.append(want["vrf"])
+ for af in want["address_families"]:
+ for ro in af["routes"]:
+ for nh in ro["next_hops"]:
+ commands = self.set_next_hop(want, af, nh, ro, commands)
+ return commands
+
+ def set_next_hop(self, want, h2, nh, ro, commands):
+ vrf = want["vrf"]
+ if h2["afi"] == "ipv4":
+ com = "ip route " + ro["dest"] + " " + self.add_commands(nh)
+ else:
+ com = "ipv6 route " + ro["dest"] + " " + self.add_commands(nh)
+ commands.append(com.strip())
+ if "default" not in vrf:
+ string = "vrf context " + str(vrf)
+ else:
+ # Default static routes are configured in global context.
+ # "vrf context default" command throws error 9.X release onwards.
+ # Changing the context to global is achieved by "configure terminal"
+ string = "configure terminal"
+ if string not in commands:
+ commands.insert(0, string)
+ return commands
+
+ def add_default_vrf(self, dictionary):
+ """
+ This method is used to add 'default' vrf to the facts collected as global/default vrf
+ is not shown in facts. vrf key exists for all vrfs except global.
+ """
+ for d in dictionary:
+ if "vrf" not in d.keys():
+ d.update({"vrf": "default"})
+ return dictionary
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/telemetry.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/telemetry.py
new file mode 100644
index 00000000..da3f7743
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/telemetry/telemetry.py
@@ -0,0 +1,593 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_telemetry class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.cmdref.telemetry.telemetry import (
+ TMS_DESTGROUP,
+ TMS_GLOBAL,
+ TMS_SENSORGROUP,
+ TMS_SUBSCRIPTION,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import NxosCmdRef
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.telemetry.telemetry import (
+ get_module_params_subsection,
+ get_setval_path,
+ massage_data,
+ normalize_data,
+ remove_duplicate_commands,
+ remove_duplicate_context,
+ valiate_input,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ normalize_interface,
+)
+
+
+class Telemetry(ConfigBase):
+ """
+ The nxos_telemetry class
+ """
+
+ gather_subset = ["!all", "!min"]
+
+ gather_network_resources = ["telemetry"]
+
+ def __init__(self, module):
+ super(Telemetry, self).__init__(module)
+
+ def get_telemetry_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ telemetry_facts = facts["ansible_network_resources"].get("telemetry")
+ if not telemetry_facts:
+ return {}
+ return telemetry_facts
+
+ def edit_config(self, commands):
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+
+ state = self._module.params["state"]
+ if "overridden" in state:
+ self._module.fail_json(msg="State <overridden> is invalid for this module.")
+ # When state is 'deleted', the module_params should not contain data
+ # under the 'config' key
+ if "deleted" in state and self._module.params.get("config"):
+ self._module.fail_json(msg="Remove config key from playbook when state is <deleted>")
+
+ if self._module.params["config"] is None:
+ self._module.params["config"] = {}
+ # Normalize interface name.
+ int = self._module.params["config"].get("source_interface")
+ if int:
+ self._module.params["config"]["source_interface"] = normalize_interface(int)
+
+ if self.state in self.ACTION_STATES:
+ existing_telemetry_facts = self.get_telemetry_facts()
+ else:
+ existing_telemetry_facts = []
+
+ if self.state in self.ACTION_STATES:
+ commands.extend(self.set_config(existing_telemetry_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_telemetry_facts = self.get_telemetry_facts()
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_telemetry_facts
+ if result["changed"]:
+ result["after"] = changed_telemetry_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_telemetry_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_tms_global_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ config = self._module.params["config"]
+ want = dict((k, v) for k, v in config.items() if v is not None)
+ have = existing_tms_global_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+
+ # The deleted case is very simple since we purge all telemetry config
+ # and does not require any processing using NxosCmdRef objects.
+ if state == "deleted":
+ return self._state_deleted(want, have)
+ elif state == "replaced":
+ if want == have:
+ return []
+ return self._state_replaced(want, have)
+
+ # Save off module params
+ ALL_MP = self._module.params["config"]
+
+ cmd_ref = {}
+ cmd_ref["TMS_GLOBAL"] = {}
+ cmd_ref["TMS_DESTGROUP"] = {}
+ cmd_ref["TMS_SENSORGROUP"] = {}
+ cmd_ref["TMS_SUBSCRIPTION"] = {}
+
+ # Build Telemetry Global NxosCmdRef Object
+ cmd_ref["TMS_GLOBAL"]["ref"] = []
+ self._module.params["config"] = get_module_params_subsection(ALL_MP, "TMS_GLOBAL")
+ cmd_ref["TMS_GLOBAL"]["ref"].append(NxosCmdRef(self._module, TMS_GLOBAL))
+ ref = cmd_ref["TMS_GLOBAL"]["ref"][0]
+ ref.set_context()
+ ref.get_existing()
+ ref.get_playvals()
+ device_cache = ref.cache_existing
+
+ def build_cmdref_objects(td):
+ cmd_ref[td["type"]]["ref"] = []
+ saved_ids = []
+ if want.get(td["name"]):
+ for playvals in want[td["name"]]:
+ valiate_input(playvals, td["name"], self._module)
+ if playvals["id"] in saved_ids:
+ continue
+ saved_ids.append(playvals["id"])
+ resource_key = td["cmd"].format(playvals["id"])
+ # Only build the NxosCmdRef object for the td['name'] module parameters.
+ self._module.params["config"] = get_module_params_subsection(
+ ALL_MP,
+ td["type"],
+ playvals["id"],
+ )
+ cmd_ref[td["type"]]["ref"].append(NxosCmdRef(self._module, td["obj"]))
+ ref = cmd_ref[td["type"]]["ref"][-1]
+ ref.set_context([resource_key])
+ if td["type"] == "TMS_SENSORGROUP" and get_setval_path(self._module):
+ # Sensor group path setting can contain optional values.
+ # Call get_setval_path helper function to process any
+ # optional setval keys.
+ ref._ref["path"]["setval"] = get_setval_path(self._module)
+ ref.get_existing(device_cache)
+ ref.get_playvals()
+ if td["type"] == "TMS_DESTGROUP":
+ normalize_data(ref)
+
+ # Build Telemetry Destination Group NxosCmdRef Objects
+ td = {
+ "name": "destination_groups",
+ "type": "TMS_DESTGROUP",
+ "obj": TMS_DESTGROUP,
+ "cmd": "destination-group {0}",
+ }
+ build_cmdref_objects(td)
+
+ # Build Telemetry Sensor Group NxosCmdRef Objects
+ td = {
+ "name": "sensor_groups",
+ "type": "TMS_SENSORGROUP",
+ "obj": TMS_SENSORGROUP,
+ "cmd": "sensor-group {0}",
+ }
+ build_cmdref_objects(td)
+
+ # Build Telemetry Subscription NxosCmdRef Objects
+ td = {
+ "name": "subscriptions",
+ "type": "TMS_SUBSCRIPTION",
+ "obj": TMS_SUBSCRIPTION,
+ "cmd": "subscription {0}",
+ }
+ build_cmdref_objects(td)
+
+ if state == "merged":
+ if want == have:
+ return []
+ commands = self._state_merged(cmd_ref)
+ return commands
+
+ @staticmethod
+ def _state_replaced(want, have):
+ """The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ massaged_have = massage_data(have)
+ massaged_want = massage_data(want)
+
+ ref = {}
+ ref["tms_global"] = NxosCmdRef([], TMS_GLOBAL, ref_only=True)
+ ref["tms_destgroup"] = NxosCmdRef([], TMS_DESTGROUP, ref_only=True)
+ ref["tms_sensorgroup"] = NxosCmdRef([], TMS_SENSORGROUP, ref_only=True)
+ ref["tms_subscription"] = NxosCmdRef([], TMS_SUBSCRIPTION, ref_only=True)
+
+ # Order matters for state replaced.
+ # First remove all subscriptions, followed by sensor-groups and destination-groups.
+ # Second add all destination-groups, followed by sensor-groups and subscriptions
+ add = {
+ "TMS_GLOBAL": [],
+ "TMS_DESTGROUP": [],
+ "TMS_SENSORGROUP": [],
+ "TMS_SUBSCRIPTION": [],
+ }
+ delete = {
+ "TMS_DESTGROUP": [],
+ "TMS_SENSORGROUP": [],
+ "TMS_SUBSCRIPTION": [],
+ }
+
+ # Process Telemetry Global Want and Have Values
+ # Possible states:
+ # - want and have are (set) (equal: no action, not equal: replace with want)
+ # - want (set) have (not set) (add want)
+ # - want (not set) have (set) (delete have)
+ # - want (not set) have (not set) (no action)
+ # global_ctx = ref['tms_global']._ref['_template']['context']
+ # property_ctx = ref['tms_global']._ref['certificate'].get('context')
+ # setval = ref['tms_global']._ref['certificate']['setval']
+ #
+ all_global_properties = [
+ "certificate",
+ "compression",
+ "source_interface",
+ "vrf",
+ ]
+ dest_profile_properties = ["compression", "source_interface", "vrf"]
+ dest_profile_remote_commands = []
+ for property in all_global_properties:
+ cmd = None
+ global_ctx = ref["tms_global"]._ref["_template"]["context"]
+ property_ctx = ref["tms_global"]._ref[property].get("context")
+ setval = ref["tms_global"]._ref[property]["setval"]
+ kind = ref["tms_global"]._ref[property]["kind"]
+ if want.get(property) is not None:
+ if have.get(property) is not None:
+ if want.get(property) != have.get(property):
+ if kind == "dict":
+ cmd = [setval.format(**want.get(property))]
+ else:
+ cmd = [setval.format(want.get(property))]
+ elif have.get(property) is None:
+ if kind == "dict":
+ cmd = [setval.format(**want.get(property))]
+ else:
+ cmd = [setval.format(want.get(property))]
+ elif want.get(property) is None:
+ if have.get(property) is not None:
+ if kind == "dict":
+ cmd = ["no " + setval.format(**have.get(property))]
+ else:
+ cmd = ["no " + setval.format(have.get(property))]
+ if property in dest_profile_properties:
+ dest_profile_remote_commands.extend(cmd)
+
+ if cmd is not None:
+ ctx = global_ctx
+ if property_ctx is not None:
+ ctx.extend(property_ctx)
+ add["TMS_GLOBAL"].extend(ctx)
+ add["TMS_GLOBAL"].extend(cmd)
+
+ add["TMS_GLOBAL"] = remove_duplicate_commands(add["TMS_GLOBAL"])
+ # If all destination profile commands are being removed then just
+ # remove the config context instead.
+ if len(dest_profile_remote_commands) == 3:
+ for item in dest_profile_remote_commands:
+ add["TMS_GLOBAL"].remove(item)
+ add["TMS_GLOBAL"].remove("destination-profile")
+ add["TMS_GLOBAL"].extend(["no destination-profile"])
+
+ # Process Telemetry destination_group, sensor_group and subscription Want and Have Values
+ # Possible states:
+ # - want (not set) have (set) (delete have)
+ # - want and have are (set) (equal: no action, not equal: replace with want)
+ # - want (set) have (not set) (add want)
+ # - want (not set) have (not set) (no action)
+ tms_resources = [
+ "TMS_DESTGROUP",
+ "TMS_SENSORGROUP",
+ "TMS_SUBSCRIPTION",
+ ]
+ for resource in tms_resources:
+ if resource == "TMS_DESTGROUP":
+ name = "destination-group"
+ cmd_property = "destination"
+ global_ctx = ref["tms_destgroup"]._ref["_template"]["context"]
+ setval = ref["tms_destgroup"]._ref["destination"]["setval"]
+ want_resources = massaged_want.get("destination_groups")
+ have_resources = massaged_have.get("destination_groups")
+ if resource == "TMS_SENSORGROUP":
+ name = "sensor-group"
+ global_ctx = ref["tms_sensorgroup"]._ref["_template"]["context"]
+ setval = {}
+ setval["data_source"] = ref["tms_sensorgroup"]._ref["data_source"]["setval"]
+ setval["path"] = ref["tms_sensorgroup"]._ref["path"]["setval"]
+ want_resources = massaged_want.get("sensor_groups")
+ have_resources = massaged_have.get("sensor_groups")
+ if resource == "TMS_SUBSCRIPTION":
+ name = "subscription"
+ global_ctx = ref["tms_subscription"]._ref["_template"]["context"]
+ setval = {}
+ setval["destination_group"] = ref["tms_subscription"]._ref["destination_group"][
+ "setval"
+ ]
+ setval["sensor_group"] = ref["tms_subscription"]._ref["sensor_group"]["setval"]
+ want_resources = massaged_want.get("subscriptions")
+ have_resources = massaged_have.get("subscriptions")
+
+ if not want_resources and have_resources:
+ # want not and have not set so delete have
+ for key in have_resources.keys():
+ remove_context = ["{0} {1} {2}".format("no", name, key)]
+ delete[resource].extend(global_ctx)
+ if remove_context[0] not in delete[resource]:
+ delete[resource].extend(remove_context)
+ else:
+ # want and have are set.
+ # process wants:
+ for want_key in want_resources.keys():
+ if want_key not in have_resources.keys():
+ # Want resource key not in have resource key so add it
+ property_ctx = ["{0} {1}".format(name, want_key)]
+ for item in want_resources[want_key]:
+ if resource == "TMS_DESTGROUP":
+ cmd = [setval.format(**item[cmd_property])]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ add[resource].extend(cmd)
+ if resource == "TMS_SENSORGROUP":
+ cmd = {}
+ if item.get("data_source"):
+ cmd["data_source"] = [
+ setval["data_source"].format(item["data_source"]),
+ ]
+ if item.get("path"):
+ setval["path"] = get_setval_path(item.get("path"))
+ cmd["path"] = [setval["path"].format(**item["path"])]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ if cmd.get("data_source"):
+ add[resource].extend(cmd["data_source"])
+ if cmd.get("path"):
+ add[resource].extend(cmd["path"])
+ if resource == "TMS_SUBSCRIPTION":
+ cmd = {}
+ if item.get("destination_group"):
+ cmd["destination_group"] = [
+ setval["destination_group"].format(
+ item["destination_group"],
+ ),
+ ]
+ if item.get("sensor_group"):
+ cmd["sensor_group"] = [
+ setval["sensor_group"].format(**item["sensor_group"]),
+ ]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ if cmd.get("destination_group"):
+ add[resource].extend(cmd["destination_group"])
+ if cmd.get("sensor_group"):
+ add[resource].extend(cmd["sensor_group"])
+
+ elif want_key in have_resources.keys():
+ # Want resource key exists in have resource keys but we need to
+ # inspect the individual items under the resource key
+ # for differences
+ for item in want_resources[want_key]:
+ if item not in have_resources[want_key]:
+ if item is None:
+ continue
+ # item wanted but does not exist so add it
+ property_ctx = ["{0} {1}".format(name, want_key)]
+ if resource == "TMS_DESTGROUP":
+ cmd = [setval.format(**item[cmd_property])]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ add[resource].extend(cmd)
+ if resource == "TMS_SENSORGROUP":
+ cmd = {}
+ if item.get("data_source"):
+ cmd["data_source"] = [
+ setval["data_source"].format(item["data_source"]),
+ ]
+ if item.get("path"):
+ setval["path"] = get_setval_path(item.get("path"))
+ cmd["path"] = [setval["path"].format(**item["path"])]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ if cmd.get("data_source"):
+ add[resource].extend(cmd["data_source"])
+ if cmd.get("path"):
+ add[resource].extend(cmd["path"])
+ if resource == "TMS_SUBSCRIPTION":
+ cmd = {}
+ if item.get("destination_group"):
+ cmd["destination_group"] = [
+ setval["destination_group"].format(
+ item["destination_group"],
+ ),
+ ]
+ if item.get("sensor_group"):
+ cmd["sensor_group"] = [
+ setval["sensor_group"].format(**item["sensor_group"]),
+ ]
+ add[resource].extend(global_ctx)
+ if property_ctx[0] not in add[resource]:
+ add[resource].extend(property_ctx)
+ if cmd.get("destination_group"):
+ add[resource].extend(cmd["destination_group"])
+ if cmd.get("sensor_group"):
+ add[resource].extend(cmd["sensor_group"])
+
+ # process haves:
+ for have_key in have_resources.keys():
+ if have_key not in want_resources.keys():
+ # Want resource key is not in have resource keys so remove it
+ cmd = ["no " + "{0} {1}".format(name, have_key)]
+ delete[resource].extend(global_ctx)
+ delete[resource].extend(cmd)
+ elif have_key in want_resources.keys():
+ # Have resource key exists in want resource keys but we need to
+ # inspect the individual items under the resource key
+ # for differences
+ for item in have_resources[have_key]:
+ if item not in want_resources[have_key]:
+ if item is None:
+ continue
+ # have item not wanted so remove it
+ property_ctx = ["{0} {1}".format(name, have_key)]
+ if resource == "TMS_DESTGROUP":
+ cmd = ["no " + setval.format(**item[cmd_property])]
+ delete[resource].extend(global_ctx)
+ if property_ctx[0] not in delete[resource]:
+ delete[resource].extend(property_ctx)
+ delete[resource].extend(cmd)
+ if resource == "TMS_SENSORGROUP":
+ cmd = {}
+ if item.get("data_source"):
+ cmd["data_source"] = [
+ "no "
+ + setval["data_source"].format(item["data_source"]),
+ ]
+ if item.get("path"):
+ setval["path"] = get_setval_path(item.get("path"))
+ cmd["path"] = [
+ "no " + setval["path"].format(**item["path"]),
+ ]
+ delete[resource].extend(global_ctx)
+ if property_ctx[0] not in delete[resource]:
+ delete[resource].extend(property_ctx)
+ if cmd.get("data_source"):
+ delete[resource].extend(cmd["data_source"])
+ if cmd.get("path"):
+ delete[resource].extend(cmd["path"])
+ if resource == "TMS_SUBSCRIPTION":
+ cmd = {}
+ if item.get("destination_group"):
+ cmd["destination_group"] = [
+ "no "
+ + setval["destination_group"].format(
+ item["destination_group"],
+ ),
+ ]
+ if item.get("sensor_group"):
+ cmd["sensor_group"] = [
+ "no "
+ + setval["sensor_group"].format(**item["sensor_group"]),
+ ]
+ delete[resource].extend(global_ctx)
+ if property_ctx[0] not in delete[resource]:
+ delete[resource].extend(property_ctx)
+ if cmd.get("destination_group"):
+ delete[resource].extend(cmd["destination_group"])
+ if cmd.get("sensor_group"):
+ delete[resource].extend(cmd["sensor_group"])
+
+ add[resource] = remove_duplicate_context(add[resource])
+ delete[resource] = remove_duplicate_context(delete[resource])
+
+ commands.extend(delete["TMS_SUBSCRIPTION"])
+ commands.extend(delete["TMS_SENSORGROUP"])
+ commands.extend(delete["TMS_DESTGROUP"])
+ commands.extend(add["TMS_DESTGROUP"])
+ commands.extend(add["TMS_SENSORGROUP"])
+ commands.extend(add["TMS_SUBSCRIPTION"])
+ commands.extend(add["TMS_GLOBAL"])
+ commands = remove_duplicate_context(commands)
+
+ return commands
+
+ @staticmethod
+ def _state_merged(cmd_ref):
+ """The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = cmd_ref["TMS_GLOBAL"]["ref"][0].get_proposed()
+
+ if cmd_ref["TMS_DESTGROUP"].get("ref"):
+ for cr in cmd_ref["TMS_DESTGROUP"]["ref"]:
+ commands.extend(cr.get_proposed())
+
+ if cmd_ref["TMS_SENSORGROUP"].get("ref"):
+ for cr in cmd_ref["TMS_SENSORGROUP"]["ref"]:
+ commands.extend(cr.get_proposed())
+
+ if cmd_ref["TMS_SUBSCRIPTION"].get("ref"):
+ for cr in cmd_ref["TMS_SUBSCRIPTION"]["ref"]:
+ commands.extend(cr.get_proposed())
+
+ return remove_duplicate_context(commands)
+
+ @staticmethod
+ def _state_deleted(want, have):
+ """The command generator when state is deleted
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want != have:
+ commands = ["no telemetry"]
+ return commands
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/vlans.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/vlans.py
new file mode 100644
index 00000000..515618ad
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/vlans/vlans.py
@@ -0,0 +1,334 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos_vlans class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ dict_diff,
+ remove_empties,
+ to_list,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import Facts
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ search_obj_in_list,
+)
+
+
+class Vlans(ConfigBase):
+ """
+ The nxos_vlans class
+ """
+
+ gather_subset = ["min"]
+
+ gather_network_resources = ["vlans"]
+
+ def __init__(self, module):
+ super(Vlans, self).__init__(module)
+
+ def get_platform(self):
+ """Wrapper method for getting platform info
+ This method exists solely to allow the unit test framework to mock calls.
+ """
+ return self.facts.get("ansible_net_platform", "")
+
+ def get_vlans_facts(self, data=None):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ self.facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset,
+ self.gather_network_resources,
+ data=data,
+ )
+ vlans_facts = self.facts["ansible_network_resources"].get("vlans")
+ if not vlans_facts:
+ return []
+
+ return vlans_facts
+
+ def edit_config(self, commands):
+ """Wrapper method for `_connection.edit_config()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return self._connection.edit_config(commands)
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = []
+ warnings = []
+
+ if self.state in self.ACTION_STATES:
+ existing_vlans_facts = self.get_vlans_facts()
+ self._platform = self.get_platform()
+ else:
+ existing_vlans_facts = []
+ self._platform = ""
+
+ if self.state in self.ACTION_STATES or self.state == "rendered":
+ commands.extend(self.set_config(existing_vlans_facts))
+
+ if commands and self.state in self.ACTION_STATES:
+ if not self._module.check_mode:
+ self.edit_config(commands)
+ result["changed"] = True
+
+ if self.state in self.ACTION_STATES:
+ result["commands"] = commands
+
+ if self.state in self.ACTION_STATES or self.state == "gathered":
+ changed_vlans_facts = self.get_vlans_facts()
+
+ elif self.state == "rendered":
+ result["rendered"] = commands
+
+ elif self.state == "parsed":
+ running_config = self._module.params["running_config"]
+ if not running_config:
+ self._module.fail_json(
+ msg="value of running_config parameter must not be empty for state parsed",
+ )
+ result["parsed"] = self.get_vlans_facts(data=running_config)
+
+ if self.state in self.ACTION_STATES:
+ result["before"] = existing_vlans_facts
+ if result["changed"]:
+ result["after"] = changed_vlans_facts
+
+ elif self.state == "gathered":
+ result["gathered"] = changed_vlans_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_vlans_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params.get("config") or []
+ have = existing_vlans_facts
+ resp = self.set_state(self._sanitize(want), self._sanitize(have))
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params["state"]
+ if state in ("overridden", "merged", "replaced", "rendered") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(state),
+ )
+
+ commands = list()
+ if state == "overridden":
+ commands.extend(self._state_overridden(want, have))
+ elif state == "deleted":
+ commands.extend(self._state_deleted(want, have))
+ else:
+ for w in want:
+ if state in ["merged", "rendered"]:
+ commands.extend(self._state_merged(w, have))
+ elif state == "replaced":
+ commands.extend(self._state_replaced(w, have))
+
+ return commands
+
+ def remove_default_states(self, obj):
+ """Removes non-empty but default states from the obj."""
+ default_states = {"enabled": True, "state": "active", "mode": "ce"}
+ for k in default_states.keys():
+ if obj.get(k) == default_states[k]:
+ obj.pop(k, None)
+ return obj
+
+ def _state_replaced(self, want, have):
+ """The command generator when state is replaced.
+ Scope is limited to vlan objects defined in the playbook.
+ :rtype: A list
+ :returns: The minimum command set required to migrate the current
+ configuration to the desired configuration.
+ """
+ obj_in_have = search_obj_in_list(want["vlan_id"], have, "vlan_id")
+ if obj_in_have:
+ # Diff the want and have
+ diff = dict_diff(want, obj_in_have)
+ # Remove merge items from diff; what's left will be used to
+ # remove states not specified in the playbook
+ for k in dict(set(want.items()) - set(obj_in_have.items())).keys():
+ diff.pop(k, None)
+ else:
+ diff = want
+
+ # Remove default states from resulting diff
+ diff = self.remove_default_states(diff)
+
+ # merged_cmds: 'want' cmds to update 'have' states that don't match
+ # replaced_cmds: remaining 'have' cmds that need to be reset to default
+ merged_cmds = self.set_commands(want, have)
+ replaced_cmds = []
+ if obj_in_have:
+ # Remaining diff items are used to reset states to default
+ replaced_cmds = self.del_attribs(diff)
+ cmds = []
+ if replaced_cmds or merged_cmds:
+ cmds += ["vlan %s" % str(want["vlan_id"])]
+ cmds += merged_cmds + replaced_cmds
+ return cmds
+
+ def _state_overridden(self, want, have):
+ """The command generator when state is overridden.
+ Scope includes all vlan objects on the device.
+ :rtype: A list
+ :returns: the minimum command set required to migrate the current
+ configuration to the desired configuration.
+ """
+ # overridden behavior is the same as replaced except for scope.
+ cmds = []
+ existing_vlans = []
+ for h in have:
+ existing_vlans.append(h["vlan_id"])
+ obj_in_want = search_obj_in_list(h["vlan_id"], want, "vlan_id")
+ if obj_in_want:
+ if h != obj_in_want:
+ replaced_cmds = self._state_replaced(obj_in_want, [h])
+ if replaced_cmds:
+ cmds.extend(replaced_cmds)
+ else:
+ cmds.append("no vlan %s" % h["vlan_id"])
+
+ # Add wanted vlans that don't exist on the device yet
+ for w in want:
+ if w["vlan_id"] not in existing_vlans:
+ new_vlan = ["vlan %s" % w["vlan_id"]]
+ cmds.extend(new_vlan + self.add_commands(w))
+ return cmds
+
+ def _state_merged(self, w, have):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ cmds = self.set_commands(w, have)
+ if cmds:
+ cmds.insert(0, "vlan %s" % str(w["vlan_id"]))
+ return cmds
+
+ def _state_deleted(self, want, have):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ for w in want:
+ obj_in_have = search_obj_in_list(w["vlan_id"], have, "vlan_id")
+ if obj_in_have:
+ commands.append("no vlan " + str(obj_in_have["vlan_id"]))
+ else:
+ if not have:
+ return commands
+ for h in have:
+ commands.append("no vlan " + str(h["vlan_id"]))
+ return commands
+
+ def del_attribs(self, obj):
+ """Returns a list of commands to reset states to default"""
+ commands = []
+ if not obj:
+ return commands
+ default_cmds = {
+ "name": "no name",
+ "state": "no state",
+ "enabled": "no shutdown",
+ "mode": "mode ce",
+ "mapped_vni": "no vn-segment",
+ }
+ for k in obj:
+ commands.append(default_cmds[k])
+ return commands
+
+ def diff_of_dicts(self, w, obj):
+ diff = set(w.items()) - set(obj.items())
+ diff = dict(diff)
+ if diff and w["vlan_id"] == obj["vlan_id"]:
+ diff.update({"vlan_id": w["vlan_id"]})
+ return diff
+
+ def add_commands(self, d):
+ commands = []
+ if not d:
+ return commands
+ if "name" in d:
+ commands.append("name " + d["name"])
+ if "state" in d:
+ commands.append("state " + d["state"])
+ if "enabled" in d:
+ if d["enabled"] is True:
+ commands.append("no shutdown")
+ else:
+ commands.append("shutdown")
+ if "mode" in d:
+ commands.append("mode " + d["mode"])
+ if "mapped_vni" in d:
+ commands.append("vn-segment %s" % d["mapped_vni"])
+
+ return commands
+
+ def set_commands(self, w, have):
+ commands = []
+ obj_in_have = search_obj_in_list(w["vlan_id"], have, "vlan_id")
+ if not obj_in_have:
+ commands = self.add_commands(w)
+ else:
+ diff = self.diff_of_dicts(w, obj_in_have)
+ commands = self.add_commands(diff)
+ return commands
+
+ def _sanitize(self, vlans):
+ sanitized_vlans = []
+ for vlan in vlans:
+ if not re.search("N[567][7K]", self._platform):
+ if "mode" in vlan:
+ del vlan["mode"]
+ sanitized_vlans.append(remove_empties(vlan))
+ return sanitized_vlans
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/acl_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/acl_interfaces.py
new file mode 100644
index 00000000..053b56a9
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,129 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+The nxos acl_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.acl_interfaces.acl_interfaces import (
+ Acl_interfacesArgs,
+)
+
+
+class Acl_interfacesFacts(object):
+ """The nxos acl_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Acl_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection):
+ return connection.get("show running-config | section ^interface")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for acl_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_device_data(connection)
+ data = data.split("interface")
+
+ resources = []
+ for i in range(len(data)):
+ intf = data[i].split("\n")
+ for l in range(1, len(intf)):
+ if not re.search("ip(v6)?( port)? (access-group|traffic-filter)", intf[l]):
+ intf[l] = ""
+ intf = list(filter(None, intf))
+ resources.append(intf)
+
+ objs = []
+ for resource in resources:
+ if resource:
+ obj = self.render_config(self.generated_spec, resource)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("acl_interfaces", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ params = utils.remove_empties(params)
+ facts["acl_interfaces"] = params["config"]
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config["name"] = conf[0].strip()
+ config["access_groups"] = []
+ v4 = {"afi": "ipv4", "acls": []}
+ v6 = {"afi": "ipv6", "acls": []}
+ for c in conf[1:]:
+ if c:
+ acl4 = re.search(
+ r"ip(?P<port>\sport)?\saccess-group\s(?P<name>\S+)\s(?P<dir>in|out)",
+ c,
+ )
+ acl6 = re.search(
+ r"ipv6(?P<port>\sport)?\straffic-filter\s(?P<name>\S+)\s(?P<dir>in|out)",
+ c,
+ )
+ if acl4:
+ v4["acls"].append(self._parse(acl4))
+ elif acl6:
+ v6["acls"].append(self._parse(acl6))
+
+ if len(v4["acls"]) > 0:
+ config["access_groups"].append(v4)
+ if len(v6["acls"]) > 0:
+ config["access_groups"].append(v6)
+
+ return utils.remove_empties(config)
+
+ def _parse(self, data):
+ return {
+ "name": data.group("name").strip(),
+ "direction": data.group("dir").strip(),
+ "port": True if data.group("port") else None,
+ }
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/acls.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/acls.py
new file mode 100644
index 00000000..70ebfcdd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/acls/acls.py
@@ -0,0 +1,327 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos acls fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.acls.acls import (
+ AclsArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ validate_ipv4_addr,
+ validate_ipv6_addr,
+)
+
+
+class AclsFacts(object):
+ """The nxos acls fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = AclsArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection):
+ data = connection.get("show running-config | section 'ip(v6)* access-list'")
+ if data == "{}":
+ return ""
+ return data
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for acls
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_device_data(connection)
+ data = re.split("\nip", data)
+ v6 = []
+ v4 = []
+
+ for i in range(len(data)):
+ if str(data[i]):
+ if "v6" in str(data[i]).split()[0]:
+ v6.append(data[i])
+ else:
+ v4.append(data[i])
+
+ resources = []
+ resources.append(v6)
+ resources.append(v4)
+ objs = []
+ for resource in resources:
+ if resource:
+ obj = self.render_config(self.generated_spec, resource)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("acls", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ params = utils.remove_empties(params)
+ facts["acls"] = params["config"]
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def get_endpoint(self, ace, pro):
+ ret_dict = {}
+ option = ace.split()[0]
+ if option == "any":
+ ret_dict.update({"any": True})
+ else:
+ # it could be a.b.c.d or a.b.c.d/x or a.b.c.d/32
+ if "/" in option: # or 'host' in option:
+ prefix = re.search(r"(.*)/(\d+)", option)
+ ip = prefix.group(1)
+ cidr = prefix.group(2)
+ if (validate_ipv4_addr(option) and int(cidr) == 32) or (
+ validate_ipv6_addr(option) and int(cidr) == 128
+ ):
+ ret_dict.update({"host": ip})
+ else:
+ ret_dict.update({"prefix": option})
+ else:
+ ret_dict.update({"address": option})
+ wb = ace.split()[1]
+ ret_dict.update({"wildcard_bits": wb})
+ ace = re.sub("{0}".format(wb), "", ace, 1)
+ ace = re.sub(option, "", ace, 1)
+ if pro in ["tcp", "udp"]:
+ keywords = ["eq", "lt", "gt", "neq", "range"]
+ if len(ace.split()) and ace.split()[0] in keywords:
+ port_protocol = {}
+ port_pro = re.search(r"(eq|lt|gt|neq) (\S+)", ace)
+ if port_pro:
+ port_protocol.update({port_pro.group(1): port_pro.group(2)})
+ ace = re.sub(port_pro.group(1), "", ace, 1)
+ ace = re.sub(port_pro.group(2), "", ace, 1)
+ else:
+ limit = re.search(r"(range) (\w*) (\w*)", ace)
+ if limit:
+ port_protocol.update(
+ {
+ "range": {
+ "start": limit.group(2),
+ "end": limit.group(3),
+ },
+ },
+ )
+ ace = re.sub(limit.group(2), "", ace, 1)
+ ace = re.sub(limit.group(3), "", ace, 1)
+ if port_protocol:
+ ret_dict.update({"port_protocol": port_protocol})
+ return ace, ret_dict
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ protocol_options = {
+ "tcp": ["fin", "established", "psh", "rst", "syn", "urg", "ack"],
+ "icmp": [
+ "administratively_prohibited",
+ "alternate_address",
+ "conversion_error",
+ "dod_host_prohibited",
+ "dod_net_prohibited",
+ "echo_request",
+ "echo_reply",
+ "echo",
+ "general_parameter_problem",
+ "host_isolated",
+ "host_precedence_unreachable",
+ "host_redirect",
+ "host_tos_redirect",
+ "host_tos_unreachable",
+ "host_unknown",
+ "host_unreachable",
+ "information_reply",
+ "information_request",
+ "mask_reply",
+ "mask_request",
+ "mobile_redirect",
+ "net_redirect",
+ "net_tos_redirect",
+ "net_tos_unreachable",
+ "net_unreachable",
+ "network_unknown",
+ "no_room_for_option",
+ "option_missing",
+ "packet_too_big",
+ "parameter_problem",
+ "port_unreachable",
+ "precedence_unreachable",
+ "protocol_unreachable",
+ "unreachable",
+ "reassembly_timeout",
+ "redirect",
+ "router_advertisement",
+ "router_solicitation",
+ "source_quench",
+ "source_route_failed",
+ "time_exceeded",
+ "timestamp_reply",
+ "timestamp_request",
+ "traceroute",
+ "ttl_exceeded",
+ ],
+ "icmpv6": [
+ "beyond_scope",
+ "destination_unreachable",
+ "echo_reply",
+ "echo_request",
+ "fragments",
+ "header",
+ "hop_limit",
+ "mld_query",
+ "mld_reduction",
+ "mld_report",
+ "mldv2",
+ "nd_na",
+ "nd_ns",
+ "next_header",
+ "no_admin",
+ "no_route",
+ "packet_too_big",
+ "parameter_option",
+ "parameter_problem",
+ "port_unreachable",
+ "reassembly_timeout",
+ "renum_command",
+ "renum_result",
+ "renum_seq_number",
+ "router_advertisement",
+ "router_renumbering",
+ "router_solicitation",
+ "time_exceeded",
+ "unreachable",
+ "telemetry_path",
+ "telemetry_queue",
+ ],
+ "igmp": ["dvmrp", "host_query", "host_report"],
+ }
+ if conf:
+ if "v6" in conf[0].split()[0]:
+ config["afi"] = "ipv6"
+ else:
+ config["afi"] = "ipv4"
+ config["acls"] = []
+ for acl in conf:
+ acls = {}
+ if "match-local-traffic" in acl:
+ config["match_local_traffic"] = True
+ continue
+ acl = acl.split("\n")
+ acl = [a.strip() for a in acl]
+ acl = list(filter(None, acl))
+ acls["name"] = re.match(r"(ip)?(v6)?\s?access-list (.*)", acl[0]).group(3)
+ acls["aces"] = []
+ for ace in list(filter(None, acl[1:])):
+ if re.search(r"^ip(.*)access-list.*", ace):
+ break
+ ace = ace.strip()
+ seq = re.match(r"(\d+)", ace)
+ rem = ""
+ entry = {}
+ if seq:
+ seq = seq.group(0)
+ entry.update({"sequence": seq})
+ ace = re.sub(seq, "", ace, 1)
+ grant = ace.split()[0]
+ if grant != "remark":
+ entry.update({"grant": grant})
+ else:
+ rem = re.match(".*remark (.*)", ace).group(1)
+ entry.update({"remark": rem})
+
+ if not rem and seq:
+ ace = re.sub(grant, "", ace, 1)
+
+ pro = ace.split()[0]
+ if pro == "icmp" and config["afi"] == "ipv6":
+ entry.update({"protocol": "icmpv6"})
+ else:
+ entry.update({"protocol": pro})
+
+ ace = re.sub(pro, "", ace, 1)
+ ace, source = self.get_endpoint(ace, pro)
+ entry.update({"source": source})
+ ace, dest = self.get_endpoint(ace, pro)
+ entry.update({"destination": dest})
+
+ dscp = re.search(r"dscp (\w*)", ace)
+ if dscp:
+ entry.update({"dscp": dscp.group(1)})
+
+ frag = re.search(r"fragments", ace)
+ if frag:
+ entry.update({"fragments": True})
+
+ prec = re.search(r"precedence (\w*)", ace)
+ if prec:
+ entry.update({"precedence": prec.group(1)})
+
+ log = re.search("log", ace)
+ if log:
+ entry.update({"log": True})
+
+ pro = entry.get("protocol", "")
+ if pro in ["tcp", "icmp", "icmpv6", "igmp"]:
+ pro_options = {}
+ options = {}
+ for option in protocol_options[pro]:
+ if option not in ["telemetry_path", "telemetry_queue"]:
+ option = re.sub("_", "-", option)
+ if option in ace:
+ if option == "echo" and (
+ "echo_request" in options or "echo_reply" in options
+ ):
+ continue
+ elif option == "unreachable" and "port_unreachable" in options:
+ continue
+ option = re.sub("-", "_", option)
+ options.update({option: True})
+ if options:
+ pro_options.update({pro: options})
+ if pro_options:
+ entry.update({"protocol_options": pro_options})
+ if entry:
+ acls["aces"].append(entry)
+ config["acls"].append(acls)
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/bfd_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/bfd_interfaces.py
new file mode 100644
index 00000000..9d7cceac
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bfd_interfaces/bfd_interfaces.py
@@ -0,0 +1,104 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos bfd_interfaces fact class
+Populate the facts tree based on the current device configuration.
+"""
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bfd_interfaces.bfd_interfaces import (
+ Bfd_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class Bfd_interfacesFacts(object):
+ """The nxos_bfd_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Bfd_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for bfd_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ data = connection.get("show running-config | section '^interface|^feature bfd'")
+
+ # Some of the bfd attributes
+ if "feature bfd" in data.split("\n"):
+ resources = data.split("interface ")
+ resources.pop(0)
+ else:
+ resources = []
+ for resource in resources:
+ if resource:
+ obj = self.render_config(self.generated_spec, resource)
+ if obj and len(obj.keys()) > 1:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("bfd_interfaces", None)
+ facts = {}
+ if objs:
+ facts["bfd_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["bfd_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+ config["name"] = intf
+ # 'bfd'/'bfd echo' do not nvgen when enabled thus set to 'enable' when None.
+ # 'bfd' is not supported on some platforms
+ config["bfd"] = utils.parse_conf_cmd_arg(conf, "bfd", "enable", "disable") or "enable"
+ config["echo"] = utils.parse_conf_cmd_arg(conf, "bfd echo", "enable", "disable") or "enable"
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/bgp_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/bgp_address_family.py
new file mode 100644
index 00000000..a1b0bc22
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_address_family/bgp_address_family.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos bgp_address_family fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bgp_address_family.bgp_address_family import (
+ Bgp_address_familyArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_address_family import (
+ Bgp_address_familyTemplate,
+)
+
+
+class Bgp_address_familyFacts(object):
+ """The nxos bgp_address_family facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Bgp_address_familyArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^router bgp'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Bgp_address_family network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ data = self._flatten_config(data)
+
+ # parse native config using the Bgp_address_family template
+ bgp_address_family_parser = Bgp_address_familyTemplate(lines=data.splitlines())
+ objs = bgp_address_family_parser.parse()
+ if objs:
+ nbr = []
+ if "address_family" in objs:
+ # remove neighbor AF entries
+ for k, v in iteritems(objs["address_family"]):
+ if not k.startswith("nbr_"):
+ nbr.append(k)
+ for x in nbr:
+ del objs["address_family"][x]
+
+ objs["address_family"] = list(objs["address_family"].values())
+ # sort list of dictionaries
+ for x in objs["address_family"]:
+ if "aggregate_address" in x:
+ x["aggregate_address"] = sorted(
+ x["aggregate_address"],
+ key=lambda k, s="prefix": k[s],
+ )
+ if "networks" in x:
+ x["networks"] = sorted(x["networks"], key=lambda k, s="prefix": k[s])
+ if "redistribute" in x:
+ x["redistribute"] = sorted(
+ x["redistribute"],
+ key=lambda k: (k.get("id", -1), k["protocol"]),
+ )
+ objs["address_family"] = sorted(
+ objs["address_family"],
+ key=lambda k: (
+ k.get("afi", ""),
+ k.get("safi", ""),
+ k.get("vrf", ""),
+ ),
+ )
+
+ ansible_facts["ansible_network_resources"].pop("bgp_address_family", None)
+
+ params = utils.remove_empties(utils.validate_config(self.argument_spec, {"config": objs}))
+
+ facts["bgp_address_family"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def _flatten_config(self, data):
+ """Flatten contexts in the BGP
+ running-config for easier parsing.
+ :param obj: dict
+ :returns: flattened running config
+ """
+ data = data.split("\n")
+ in_vrf_cxt = False
+ in_nbr_cxt = False
+ cur_vrf = {}
+
+ for x in data:
+ cur_indent = len(x) - len(x.lstrip())
+ if x.strip().startswith("vrf"):
+ in_vrf_cxt = True
+ in_nbr_cxt = False
+ cur_vrf["vrf"] = x
+ cur_vrf["indent"] = cur_indent
+ elif cur_vrf and (cur_indent <= cur_vrf["indent"]):
+ in_vrf_cxt = False
+ elif x.strip().startswith("neighbor"):
+ # we entered a neighbor context which
+ # also has address-family lines
+ in_nbr_cxt = True
+ nbr = x
+ elif x.strip().startswith("address-family"):
+ if in_vrf_cxt or in_nbr_cxt:
+ prepend = ""
+ if in_vrf_cxt:
+ prepend += cur_vrf["vrf"]
+ if in_nbr_cxt:
+ if in_vrf_cxt:
+ nbr = " " + nbr.strip()
+ prepend += nbr
+ data[data.index(x)] = prepend + " " + x.strip()
+
+ return "\n".join(data)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/bgp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/bgp_global.py
new file mode 100644
index 00000000..621e499f
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_global/bgp_global.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos bgp_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bgp_global.bgp_global import (
+ Bgp_globalArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_global import (
+ Bgp_globalTemplate,
+)
+
+
+class Bgp_globalFacts(object):
+ """The nxos bgp_global facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Bgp_globalArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^router bgp'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Bgp_global network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+
+ if not data:
+ data = self.get_config(connection)
+
+ data = self._flatten_config(data)
+
+ # parse native config using the Bgp_global template
+ bgp_global_parser = Bgp_globalTemplate(lines=data.splitlines(), module=self._module)
+ obj = bgp_global_parser.parse()
+
+ vrfs = obj.get("vrfs", {})
+
+ # move global vals to their correct position in facts tree
+ # this is only needed for keys that are valid for both global
+ # and VRF contexts
+ global_vals = vrfs.pop("vrf_", {})
+ for key, value in iteritems(global_vals):
+ obj[key] = value
+
+ # transform vrfs into a list
+ if vrfs:
+ obj["vrfs"] = sorted(list(obj["vrfs"].values()), key=lambda k, sk="vrf": k[sk])
+ for vrf in obj["vrfs"]:
+ self._post_parse(vrf)
+
+ self._post_parse(obj)
+
+ obj = utils.remove_empties(obj)
+
+ ansible_facts["ansible_network_resources"].pop("bgp_global", None)
+ params = utils.remove_empties(
+ bgp_global_parser.validate_config(self.argument_spec, {"config": obj}, redact=True),
+ )
+
+ facts["bgp_global"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def _flatten_config(self, data):
+ """Flatten neighbor contexts in
+ the running-config for easier parsing.
+ :param obj: dict
+ :returns: flattened running config
+ """
+ data = data.split("\n")
+ in_nbr_cxt = False
+ cur_nbr = {}
+
+ for x in data:
+ cur_indent = len(x) - len(x.lstrip())
+ if x.strip().startswith("neighbor"):
+ in_nbr_cxt = True
+ cur_nbr["nbr"] = x
+ cur_nbr["indent"] = cur_indent
+ elif cur_nbr and (cur_indent <= cur_nbr["indent"]):
+ in_nbr_cxt = False
+ elif in_nbr_cxt:
+ data[data.index(x)] = cur_nbr["nbr"] + " " + x.strip()
+
+ return "\n".join(data)
+
+ def _post_parse(self, obj):
+ """Converts the intermediate data structure
+ to valid format as per argspec.
+ :param obj: dict
+ """
+ conf_peers = obj.get("confederation", {}).get("peers")
+ if conf_peers:
+ obj["confederation"]["peers"] = conf_peers.split()
+ obj["confederation"]["peers"].sort()
+
+ neighbors = obj.get("neighbors", {})
+ if neighbors:
+ obj["neighbors"] = sorted(
+ list(neighbors.values()),
+ key=lambda k, sk="neighbor_address": k[sk],
+ )
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/bgp_neighbor_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/bgp_neighbor_address_family.py
new file mode 100644
index 00000000..e26c1826
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/bgp_neighbor_address_family/bgp_neighbor_address_family.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos bgp_neighbor_address_family fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bgp_neighbor_address_family.bgp_neighbor_address_family import (
+ Bgp_neighbor_address_familyArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.bgp_neighbor_address_family import (
+ Bgp_neighbor_address_familyTemplate,
+)
+
+
+class Bgp_neighbor_address_familyFacts(object):
+ """The nxos bgp_neighbor_address_family facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Bgp_neighbor_address_familyArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^router bgp'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Bgp_neighbor_address_family network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = {}
+
+ if not data:
+ data = self.get_config(connection)
+
+ data = self._flatten_config(data)
+
+ # parse native config using the Bgp_neighbor_address_family template
+ bgp_neighbor_address_family_parser = Bgp_neighbor_address_familyTemplate(lines=data)
+ objs = bgp_neighbor_address_family_parser.parse()
+
+ if objs:
+ top_lvl_nbrs = objs.get("vrfs", {}).pop("vrf_", {})
+ objs["neighbors"] = self._post_parse(top_lvl_nbrs).get("neighbors", [])
+
+ if "vrfs" in objs:
+ for vrf in objs["vrfs"].values():
+ vrf["neighbors"] = self._post_parse(vrf)["neighbors"]
+ objs["vrfs"] = list(objs["vrfs"].values())
+
+ ansible_facts["ansible_network_resources"].pop("bgp_neighbor_address_family", None)
+
+ params = utils.remove_empties(utils.validate_config(self.argument_spec, {"config": objs}))
+
+ facts["bgp_neighbor_address_family"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def _post_parse(self, data):
+ if "neighbors" in data:
+ data["neighbors"] = sorted(
+ list(data["neighbors"].values()),
+ key=lambda k, s="neighbor_address": k[s],
+ )
+ for nbr in data["neighbors"]:
+ nbr["address_family"] = sorted(
+ list(nbr["address_family"].values()),
+ key=lambda k: (k["afi"], k.get("safi", "")),
+ )
+ return data
+
+ def _flatten_config(self, data):
+ """Flatten contexts in the BGP
+ running-config for easier parsing.
+ Only neighbor AF contexts are returned.
+ :param data: str
+ :returns: flattened running config
+ """
+ data = data.split("\n")
+ nbr_af_cxt = []
+ context = ""
+ cur_vrf = ""
+ cur_nbr_indent = None
+ in_nbr_cxt = False
+ in_af = False
+
+ # this is the "router bgp <asn>" line
+ nbr_af_cxt.append(data[0])
+ for x in data:
+ cur_indent = len(x) - len(x.lstrip())
+ x = x.strip()
+ if x.startswith("vrf"):
+ cur_vrf = x + " "
+ in_nbr_cxt = False
+ elif x.startswith("neighbor"):
+ in_nbr_cxt = True
+ in_af = False
+ cur_nbr_indent = cur_indent
+ context = x
+ if cur_vrf:
+ context = cur_vrf + context
+ elif in_nbr_cxt and cur_indent > cur_nbr_indent:
+ if x.startswith("address-family"):
+ in_af = True
+ x = context + " " + x
+ if in_af:
+ nbr_af_cxt.append(x)
+ else:
+ in_nbr_cxt = False
+
+ return nbr_af_cxt
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/facts.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/facts.py
new file mode 100644
index 00000000..b70cb590
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/facts.py
@@ -0,0 +1,188 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+"""
+The facts class for nxos
+this file validates each subset of facts and selectively
+calls the appropriate facts gathering function
+"""
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import (
+ FactsBase,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.acl_interfaces.acl_interfaces import (
+ Acl_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.acls.acls import (
+ AclsFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.bfd_interfaces.bfd_interfaces import (
+ Bfd_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.bgp_address_family.bgp_address_family import (
+ Bgp_address_familyFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.bgp_global.bgp_global import (
+ Bgp_globalFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.bgp_neighbor_address_family.bgp_neighbor_address_family import (
+ Bgp_neighbor_address_familyFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.hostname.hostname import (
+ HostnameFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.hsrp_interfaces.hsrp_interfaces import (
+ Hsrp_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.interfaces.interfaces import (
+ InterfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.l2_interfaces.l2_interfaces import (
+ L2_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.l3_interfaces.l3_interfaces import (
+ L3_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.lacp.lacp import (
+ LacpFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.lacp_interfaces.lacp_interfaces import (
+ Lacp_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.lag_interfaces.lag_interfaces import (
+ Lag_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.legacy.base import (
+ Config,
+ Default,
+ Features,
+ Hardware,
+ Interfaces,
+ Legacy,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.lldp_global.lldp_global import (
+ Lldp_globalFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.logging_global.logging_global import (
+ Logging_globalFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.ntp_global.ntp_global import (
+ Ntp_globalFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.ospf_interfaces.ospf_interfaces import (
+ Ospf_interfacesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.ospfv2.ospfv2 import (
+ Ospfv2Facts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.ospfv3.ospfv3 import (
+ Ospfv3Facts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.prefix_lists.prefix_lists import (
+ Prefix_listsFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.route_maps.route_maps import (
+ Route_mapsFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.snmp_server.snmp_server import (
+ Snmp_serverFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.static_routes.static_routes import (
+ Static_routesFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.telemetry.telemetry import (
+ TelemetryFacts,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.vlans.vlans import (
+ VlansFacts,
+)
+
+
+FACT_LEGACY_SUBSETS = dict(
+ default=Default,
+ legacy=Legacy,
+ hardware=Hardware,
+ interfaces=Interfaces,
+ config=Config,
+ features=Features,
+)
+NX_FACT_RESOURCE_SUBSETS = dict(
+ bfd_interfaces=Bfd_interfacesFacts,
+ hsrp_interfaces=Hsrp_interfacesFacts,
+ lag_interfaces=Lag_interfacesFacts,
+ lldp_global=Lldp_globalFacts,
+ telemetry=TelemetryFacts,
+ vlans=VlansFacts,
+ lacp=LacpFacts,
+ lacp_interfaces=Lacp_interfacesFacts,
+ interfaces=InterfacesFacts,
+ l3_interfaces=L3_interfacesFacts,
+ l2_interfaces=L2_interfacesFacts,
+ lldp_interfaces=Lldp_interfacesFacts,
+ acl_interfaces=Acl_interfacesFacts,
+ acls=AclsFacts,
+ static_routes=Static_routesFacts,
+ ospfv2=Ospfv2Facts,
+ ospfv3=Ospfv3Facts,
+ ospf_interfaces=Ospf_interfacesFacts,
+ bgp_global=Bgp_globalFacts,
+ bgp_address_family=Bgp_address_familyFacts,
+ bgp_neighbor_address_family=Bgp_neighbor_address_familyFacts,
+ route_maps=Route_mapsFacts,
+ prefix_lists=Prefix_listsFacts,
+ logging_global=Logging_globalFacts,
+ ntp_global=Ntp_globalFacts,
+ snmp_server=Snmp_serverFacts,
+ hostname=HostnameFacts,
+)
+MDS_FACT_RESOURCE_SUBSETS = dict(
+ logging_global=Logging_globalFacts,
+ ntp_global=Ntp_globalFacts,
+ snmp_server=Snmp_serverFacts,
+)
+
+
+class Facts(FactsBase):
+ """The fact class for nxos"""
+
+ VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
+
+ def __init__(self, module, chassis_type="nexus"):
+ super(Facts, self).__init__(module)
+ self.chassis_type = chassis_type
+
+ def get_resource_subsets(self):
+ """Return facts resource subsets based on
+ target device model.
+ """
+ facts_resource_subsets = NX_FACT_RESOURCE_SUBSETS
+ if self.chassis_type == "mds":
+ facts_resource_subsets = MDS_FACT_RESOURCE_SUBSETS
+ return facts_resource_subsets
+
+ def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None):
+ """Collect the facts for nxos
+ :param legacy_facts_type: List of legacy facts types
+ :param resource_facts_type: List of resource fact types
+ :param data: previously collected conf
+ :rtype: dict
+ :return: the facts gathered
+ """
+ VALID_RESOURCE_SUBSETS = self.get_resource_subsets()
+
+ if frozenset(VALID_RESOURCE_SUBSETS.keys()):
+ self.get_network_resources_facts(VALID_RESOURCE_SUBSETS, resource_facts_type, data)
+
+ if self.VALID_LEGACY_GATHER_SUBSETS:
+ self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type)
+
+ return self.ansible_facts, self._warnings
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/hostname.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/hostname.py
new file mode 100644
index 00000000..981648c0
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hostname/hostname.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos hostname fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.hostname.hostname import (
+ HostnameArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.hostname import (
+ HostnameTemplate,
+)
+
+
+class HostnameFacts(object):
+ """The nxos hostname facts class"""
+
+ def __init__(self, module):
+ self._module = module
+ self.argument_spec = HostnameArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section ^hostname")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Hostname network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Hostname template
+ hostname_parser = HostnameTemplate(lines=data.splitlines(), module=self._module)
+ objs = hostname_parser.parse()
+
+ ansible_facts["ansible_network_resources"].pop("hostname", None)
+
+ params = utils.remove_empties(
+ hostname_parser.validate_config(self.argument_spec, {"config": objs}, redact=True),
+ )
+
+ facts["hostname"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/hsrp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/hsrp_interfaces.py
new file mode 100644
index 00000000..d12e3223
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/hsrp_interfaces/hsrp_interfaces.py
@@ -0,0 +1,96 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos hsrp_interfaces fact class
+Populate the facts tree based on the current device configuration.
+"""
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.hsrp_interfaces.hsrp_interfaces import (
+ Hsrp_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class Hsrp_interfacesFacts(object):
+ """The nxos hsrp_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Hsrp_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for hsrp_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ resources = data.split("interface ")
+ for resource in resources:
+ if resource:
+ obj = self.render_config(self.generated_spec, resource)
+ if obj and len(obj.keys()) > 1:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("hsrp_interfaces", None)
+ facts = {}
+ if objs:
+ facts["hsrp_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["hsrp_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+ config["name"] = intf
+ config["bfd"] = utils.parse_conf_cmd_arg(conf, "hsrp bfd", "enable", "disable")
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/interfaces.py
new file mode 100644
index 00000000..92592b07
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/interfaces/interfaces.py
@@ -0,0 +1,110 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
+"""
+The nxos interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.interfaces.interfaces import (
+ InterfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class InterfacesFacts(object):
+ """The nxos interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = InterfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for interfaces
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ config = ("\n" + data).split("\ninterface ")
+ for conf in config:
+ conf = conf.strip()
+ if conf:
+ obj = self.render_config(self.generated_spec, conf)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("interfaces", None)
+ facts = {}
+ facts["interfaces"] = []
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+ config["name"] = intf
+ config["description"] = utils.parse_conf_arg(conf, "description")
+ config["speed"] = utils.parse_conf_arg(conf, "speed")
+ config["mtu"] = utils.parse_conf_arg(conf, "mtu")
+ config["duplex"] = utils.parse_conf_arg(conf, "duplex")
+ config["mode"] = utils.parse_conf_cmd_arg(conf, "switchport", "layer2", "layer3")
+
+ config["enabled"] = utils.parse_conf_cmd_arg(conf, "shutdown", False, True)
+
+ config["fabric_forwarding_anycast_gateway"] = utils.parse_conf_cmd_arg(
+ conf,
+ "fabric forwarding mode anycast-gateway",
+ True,
+ )
+ config["ip_forward"] = utils.parse_conf_cmd_arg(conf, "ip forward", True)
+
+ interfaces_cfg = utils.remove_empties(config)
+ return interfaces_cfg
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/l2_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/l2_interfaces.py
new file mode 100644
index 00000000..cf55552b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l2_interfaces/l2_interfaces.py
@@ -0,0 +1,104 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
+"""
+The nxos l2_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.l2_interfaces.l2_interfaces import (
+ L2_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class L2_interfacesFacts(object):
+ """The nxos l2_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = L2_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for l2_interfaces
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ config = ("\n" + data).split("\ninterface ")
+ for conf in config:
+ conf = conf.strip()
+ if conf:
+ obj = self.render_config(self.generated_spec, conf)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("l2_interfaces", None)
+ facts = {}
+ if objs:
+ facts["l2_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["l2_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+
+ config["name"] = intf
+ config["mode"] = utils.parse_conf_arg(conf, "switchport mode")
+ config["ip_forward"] = utils.parse_conf_arg(conf, "ip forward")
+ config["access"]["vlan"] = utils.parse_conf_arg(conf, "switchport access vlan")
+ config["trunk"]["allowed_vlans"] = utils.parse_conf_arg(
+ conf,
+ "switchport trunk allowed vlan",
+ )
+ config["trunk"]["native_vlan"] = utils.parse_conf_arg(conf, "switchport trunk native vlan")
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/l3_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/l3_interfaces.py
new file mode 100644
index 00000000..ad36e17c
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/l3_interfaces/l3_interfaces.py
@@ -0,0 +1,135 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
+"""
+The nxos l3_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.l3_interfaces.l3_interfaces import (
+ L3_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class L3_interfacesFacts(object):
+ """The nxos l3_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = L3_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for l3_interfaces
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ config = ("\n" + data).split("\ninterface ")
+ for conf in config:
+ conf = conf.strip()
+ if conf:
+ obj = self.render_config(self.generated_spec, conf)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("l3_interfaces", None)
+ facts = {}
+ if objs:
+ facts["l3_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["l3_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+ config["name"] = intf
+ config["dot1q"] = utils.parse_conf_arg(conf, "encapsulation dot1[qQ]")
+ config["redirects"] = utils.parse_conf_cmd_arg(conf, "no ip redirects", False, True)
+ config["ipv6_redirects"] = utils.parse_conf_cmd_arg(conf, "no ipv6 redirects", False, True)
+ config["unreachables"] = utils.parse_conf_cmd_arg(conf, "ip unreachables", True, False)
+ config["evpn_multisite_tracking"] = utils.parse_conf_arg(conf, "evpn multisite")
+ ipv4_match = re.compile(r"\n ip address (.*)")
+ matches = ipv4_match.findall(conf)
+ if matches:
+ if matches[0]:
+ config["ipv4"] = []
+ for m in matches:
+ ipv4_conf = m.split()
+ addr = ipv4_conf[0]
+ if addr:
+ config_dict = {"address": addr}
+ if len(ipv4_conf) > 1:
+ d = ipv4_conf[1]
+ if d == "secondary":
+ config_dict.update({"secondary": True})
+ if len(ipv4_conf) == 4:
+ if ipv4_conf[2] == "tag":
+ config_dict.update({"tag": int(ipv4_conf[-1])})
+ elif d == "tag":
+ config_dict.update({"tag": int(ipv4_conf[-1])})
+ config["ipv4"].append(config_dict)
+
+ ipv6_match = re.compile(r"\n ipv6 address (.*)")
+ matches = ipv6_match.findall(conf)
+ if matches:
+ if matches[0]:
+ config["ipv6"] = []
+ for m in matches:
+ ipv6_conf = m.split()
+ addr = ipv6_conf[0]
+ if addr:
+ config_dict = {"address": addr}
+ if len(ipv6_conf) > 1:
+ d = ipv6_conf[1]
+ if d == "tag":
+ config_dict.update({"tag": int(ipv6_conf[-1])})
+ config["ipv6"].append(config_dict)
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/lacp.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/lacp.py
new file mode 100644
index 00000000..51b1413d
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp/lacp.py
@@ -0,0 +1,89 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos lacp fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.lacp.lacp import (
+ LacpArgs,
+)
+
+
+class LacpFacts(object):
+ """The nxos lacp fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = LacpArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for lacp
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get("show running-config | include lacp")
+ resources = data.strip()
+ objs = self.render_config(self.generated_spec, resources)
+ ansible_facts["ansible_network_resources"].pop("lacp", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ facts["lacp"] = utils.remove_empties(params["config"])
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ p_match = re.search(r"lacp system-priority (\d+)", conf, re.M)
+ if p_match:
+ config["system"]["priority"] = p_match.group(1)
+
+ a_match = re.search(r"lacp system-mac (\S+)", conf, re.M)
+ if a_match:
+ address = a_match.group(1)
+ config["system"]["mac"]["address"] = address
+ r_match = re.search(r"lacp system-mac {0} role (\S+)".format(address), conf, re.M)
+ if r_match:
+ config["system"]["mac"]["role"] = r_match.group(1)
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/lacp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/lacp_interfaces.py
new file mode 100644
index 00000000..c60d8abd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lacp_interfaces/lacp_interfaces.py
@@ -0,0 +1,115 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos lacp_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.lacp_interfaces.lacp_interfaces import (
+ Lacp_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class Lacp_interfacesFacts(object):
+ """The nxos lacp_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lacp_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for lacp_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ resources = ("\n" + data).split("\ninterface ")
+ for resource in resources:
+ if resource and re.search(r"lacp", resource):
+ obj = self.render_config(self.generated_spec, resource)
+ if obj and len(obj.keys()) > 1:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("lacp_interfaces", None)
+ facts = {}
+ if objs:
+ facts["lacp_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["lacp_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ match = re.search(r"^(\S+)", conf)
+ intf = match.group(1)
+ if get_interface_type(intf) == "unknown":
+ return {}
+ config["name"] = intf
+ config["port_priority"] = utils.parse_conf_arg(conf, "lacp port-priority")
+ config["rate"] = utils.parse_conf_arg(conf, "lacp rate")
+ config["mode"] = utils.parse_conf_arg(conf, "lacp mode")
+ suspend_individual = re.search(r"no lacp suspend-individual", conf)
+ if suspend_individual:
+ config["suspend_individual"] = False
+ max_links = utils.parse_conf_arg(conf, "lacp max-bundle")
+ if max_links:
+ config["links"]["max"] = max_links
+ min_links = utils.parse_conf_arg(conf, "lacp min-links")
+ if min_links:
+ config["links"]["min"] = min_links
+ graceful = re.search(r"no lacp graceful-convergence", conf)
+ if graceful:
+ config["convergence"]["graceful"] = False
+ vpc = re.search(r"lacp vpc-convergence", conf)
+ if vpc:
+ config["convergence"]["vpc"] = True
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/lag_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/lag_interfaces.py
new file mode 100644
index 00000000..b94c57e2
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lag_interfaces/lag_interfaces.py
@@ -0,0 +1,104 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
+"""
+The nxos lag_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.lag_interfaces.lag_interfaces import (
+ Lag_interfacesArgs,
+)
+
+
+class Lag_interfacesFacts(object):
+ """The nxos lag_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lag_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for lag_interfaces
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ if not data:
+ data = connection.get("show running-config | section ^interface")
+
+ objs = self.render_config(self.generated_spec, data, connection)
+
+ ansible_facts["ansible_network_resources"].pop("lag_interfaces", None)
+ facts = {}
+ if objs:
+ facts["lag_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["lag_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf, connection):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ result = []
+ match = re.findall(r"interface (port-channel\d+)", conf)
+
+ for item in match:
+ result.append({"name": item, "members": []})
+
+ for intf in conf.split("interface "):
+ member = {}
+ match_intf = re.search(r"(port-channel|Ethernet)(\S+)", intf)
+ if match_intf:
+ member["member"] = match_intf.group(0)
+
+ match_line = re.search(
+ r"channel-group\s(?P<port_channel>\d+)(\smode\s(?P<mode>on|active|passive))?",
+ intf,
+ )
+ if match_line:
+ member.update(match_line.groupdict())
+
+ if member and member.get("port_channel", None):
+ port_channel = "port-channel{0}".format(member.pop("port_channel"))
+ for x in result:
+ if x["name"] == port_channel:
+ x["members"].append(utils.remove_empties(member))
+
+ return result
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/base.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/base.py
new file mode 100644
index 00000000..2244e222
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/legacy/base.py
@@ -0,0 +1,793 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import platform
+import re
+
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
+ get_capabilities,
+ get_config,
+ run_commands,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+ normalize_interface,
+)
+
+
+g_config = None
+
+
+class FactsBase(object):
+ def __init__(self, module):
+ self.module = module
+ self.warnings = list()
+ self.facts = dict()
+ self.capabilities = get_capabilities(self.module)
+
+ def populate(self):
+ pass
+
+ def run(self, command, output="text"):
+ command_string = command
+ command = {"command": command, "output": output}
+ resp = run_commands(self.module, [command], check_rc="retry_json")
+ try:
+ return resp[0]
+ except IndexError:
+ self.warnings.append(
+ "command %s failed, facts for this command will not be populated" % command_string,
+ )
+ return None
+
+ def get_config(self):
+ global g_config
+ if not g_config:
+ g_config = get_config(self.module)
+ return g_config
+
+ def transform_dict(self, data, keymap):
+ transform = dict()
+ for key, fact in keymap:
+ if key in data:
+ transform[fact] = data[key]
+ return transform
+
+ def transform_iterable(self, iterable, keymap):
+ for item in iterable:
+ yield self.transform_dict(item, keymap)
+
+
+class Default(FactsBase):
+ def populate(self):
+ data = None
+ data = self.run("show version")
+
+ if data:
+ self.facts["serialnum"] = self.parse_serialnum(data)
+
+ data = self.run("show license host-id")
+ if data:
+ self.facts["license_hostid"] = self.parse_license_hostid(data)
+
+ self.facts.update(self.platform_facts())
+
+ def parse_serialnum(self, data):
+ match = re.search(r"Processor Board ID\s*(\S+)", data, re.M)
+ if match:
+ return match.group(1)
+
+ def platform_facts(self):
+ platform_facts = {}
+
+ resp = self.capabilities
+ device_info = resp["device_info"]
+
+ platform_facts["system"] = device_info["network_os"]
+
+ for item in ("model", "image", "version", "platform", "hostname"):
+ val = device_info.get("network_os_%s" % item)
+ if val:
+ platform_facts[item] = val
+
+ platform_facts["api"] = resp["network_api"]
+ platform_facts["python_version"] = platform.python_version()
+
+ return platform_facts
+
+ def parse_license_hostid(self, data):
+ match = re.search(r"License hostid: VDH=(.+)$", data, re.M)
+ if match:
+ return match.group(1)
+
+
+class Config(FactsBase):
+ def populate(self):
+ super(Config, self).populate()
+ self.facts["config"] = self.get_config()
+
+
+class Features(FactsBase):
+ def populate(self):
+ super(Features, self).populate()
+ data = self.get_config()
+
+ if data:
+ features = []
+ for line in data.splitlines():
+ if line.startswith("feature"):
+ features.append(line.replace("feature", "").strip())
+
+ self.facts["features_enabled"] = features
+
+
+class Hardware(FactsBase):
+ def populate(self):
+ data = self.run("dir")
+ if data:
+ self.facts["filesystems"] = self.parse_filesystems(data)
+
+ data = None
+ data = self.run("show system resources", output="json")
+
+ if data:
+ if isinstance(data, dict):
+ self.facts["memtotal_mb"] = int(data["memory_usage_total"]) / 1024
+ self.facts["memfree_mb"] = int(data["memory_usage_free"]) / 1024
+ else:
+ self.facts["memtotal_mb"] = self.parse_memtotal_mb(data)
+ self.facts["memfree_mb"] = self.parse_memfree_mb(data)
+
+ def parse_filesystems(self, data):
+ return re.findall(r"^Usage for (\S+)//", data, re.M)
+
+ def parse_memtotal_mb(self, data):
+ match = re.search(r"(\S+)K(\s+|)total", data, re.M)
+ if match:
+ memtotal = match.group(1)
+ return int(memtotal) / 1024
+
+ def parse_memfree_mb(self, data):
+ match = re.search(r"(\S+)K(\s+|)free", data, re.M)
+ if match:
+ memfree = match.group(1)
+ return int(memfree) / 1024
+
+
+class Interfaces(FactsBase):
+ INTERFACE_MAP = frozenset(
+ [
+ ("state", "state"),
+ ("desc", "description"),
+ ("eth_bw", "bandwidth"),
+ ("eth_duplex", "duplex"),
+ ("eth_speed", "speed"),
+ ("eth_mode", "mode"),
+ ("eth_hw_addr", "macaddress"),
+ ("eth_mtu", "mtu"),
+ ("eth_hw_desc", "type"),
+ ],
+ )
+
+ INTERFACE_SVI_MAP = frozenset(
+ [
+ ("svi_line_proto", "state"),
+ ("svi_bw", "bandwidth"),
+ ("svi_mac", "macaddress"),
+ ("svi_mtu", "mtu"),
+ ("type", "type"),
+ ],
+ )
+
+ INTERFACE_IPV4_MAP = frozenset([("eth_ip_addr", "address"), ("eth_ip_mask", "masklen")])
+
+ INTERFACE_SVI_IPV4_MAP = frozenset([("svi_ip_addr", "address"), ("svi_ip_mask", "masklen")])
+
+ INTERFACE_IPV6_MAP = frozenset([("addr", "address"), ("prefix", "subnet")])
+
+ def ipv6_structure_op_supported(self):
+ data = self.capabilities
+ if data:
+ nxos_os_version = data["device_info"]["network_os_version"]
+ unsupported_versions = ["I2", "F1", "A8"]
+ for ver in unsupported_versions:
+ if ver in nxos_os_version:
+ return False
+ return True
+
+ def populate(self):
+ self.facts["all_ipv4_addresses"] = list()
+ self.facts["all_ipv6_addresses"] = list()
+ self.facts["neighbors"] = {}
+ data = None
+
+ data = self.run("show interface", output="json")
+
+ if data:
+ if isinstance(data, dict):
+ self.facts["interfaces"] = self.populate_structured_interfaces(data)
+ else:
+ interfaces = self.parse_interfaces(data)
+ self.facts["interfaces"] = self.populate_interfaces(interfaces)
+
+ if self.ipv6_structure_op_supported():
+ data = self.run("show ipv6 interface", output="json")
+ else:
+ data = None
+ if data:
+ if isinstance(data, dict):
+ self.populate_structured_ipv6_interfaces(data)
+ else:
+ interfaces = self.parse_interfaces(data)
+ self.populate_ipv6_interfaces(interfaces)
+
+ data = self.run("show lldp neighbors", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["neighbors"].update(self.populate_structured_neighbors_lldp(data))
+ else:
+ self.facts["neighbors"].update(self.populate_neighbors(data))
+
+ data = self.run("show cdp neighbors detail", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["neighbors"].update(self.populate_structured_neighbors_cdp(data))
+ else:
+ self.facts["neighbors"].update(self.populate_neighbors_cdp(data))
+
+ self.facts["neighbors"].pop(None, None) # Remove null key
+
+ def populate_structured_interfaces(self, data):
+ interfaces = dict()
+ data = data["TABLE_interface"]["ROW_interface"]
+
+ if isinstance(data, dict):
+ data = [data]
+
+ for item in data:
+ name = item["interface"]
+
+ intf = dict()
+ if "type" in item:
+ intf.update(self.transform_dict(item, self.INTERFACE_SVI_MAP))
+ else:
+ intf.update(self.transform_dict(item, self.INTERFACE_MAP))
+
+ if "eth_ip_addr" in item:
+ intf["ipv4"] = self.transform_dict(item, self.INTERFACE_IPV4_MAP)
+ self.facts["all_ipv4_addresses"].append(item["eth_ip_addr"])
+
+ if "svi_ip_addr" in item:
+ intf["ipv4"] = self.transform_dict(item, self.INTERFACE_SVI_IPV4_MAP)
+ self.facts["all_ipv4_addresses"].append(item["svi_ip_addr"])
+
+ interfaces[name] = intf
+
+ return interfaces
+
+ def populate_structured_ipv6_interfaces(self, data):
+ try:
+ data = data["TABLE_intf"]
+ if data:
+ if isinstance(data, dict):
+ data = [data]
+ for item in data:
+ name = item["ROW_intf"]["intf-name"]
+ intf = self.facts["interfaces"][name]
+ intf["ipv6"] = self.transform_dict(item, self.INTERFACE_IPV6_MAP)
+ try:
+ addr = item["ROW_intf"]["addr"]
+ except KeyError:
+ addr = item["ROW_intf"]["TABLE_addr"]["ROW_addr"]["addr"]
+ self.facts["all_ipv6_addresses"].append(addr)
+ else:
+ return ""
+ except TypeError:
+ return ""
+
+ def populate_structured_neighbors_lldp(self, data):
+ objects = dict()
+ data = data["TABLE_nbor"]["ROW_nbor"]
+
+ if isinstance(data, dict):
+ data = [data]
+
+ for item in data:
+ local_intf = normalize_interface(item["l_port_id"])
+ objects[local_intf] = list()
+ nbor = dict()
+ nbor["port"] = item["port_id"]
+ nbor["host"] = nbor["sysname"] = item["chassis_id"]
+ objects[local_intf].append(nbor)
+
+ return objects
+
+ def populate_structured_neighbors_cdp(self, data):
+ objects = dict()
+ data = data["TABLE_cdp_neighbor_detail_info"]["ROW_cdp_neighbor_detail_info"]
+
+ if isinstance(data, dict):
+ data = [data]
+
+ for item in data:
+ if "intf_id" in item:
+ local_intf = item["intf_id"]
+ else:
+ # in some N7Ks the key has been renamed
+ local_intf = item["interface"]
+ objects[local_intf] = list()
+ nbor = dict()
+ nbor["port"] = item["port_id"]
+ nbor["host"] = nbor["sysname"] = item["device_id"]
+ objects[local_intf].append(nbor)
+
+ return objects
+
+ def parse_interfaces(self, data):
+ parsed = dict()
+ key = ""
+ for line in data.split("\n"):
+ if len(line) == 0:
+ continue
+ elif line.startswith("admin") or line[0] == " ":
+ parsed[key] += "\n%s" % line
+ else:
+ match = re.match(r"^(\S+)", line)
+ if match:
+ key = match.group(1)
+ if not key.startswith("admin") or not key.startswith("IPv6 Interface"):
+ parsed[key] = line
+ return parsed
+
+ def populate_interfaces(self, interfaces):
+ facts = dict()
+ for key, value in iteritems(interfaces):
+ intf = dict()
+ if get_interface_type(key) == "svi":
+ intf["state"] = self.parse_state(key, value, intf_type="svi")
+ intf["macaddress"] = self.parse_macaddress(value, intf_type="svi")
+ intf["mtu"] = self.parse_mtu(value, intf_type="svi")
+ intf["bandwidth"] = self.parse_bandwidth(value, intf_type="svi")
+ intf["type"] = self.parse_type(value, intf_type="svi")
+ if "Internet Address" in value:
+ intf["ipv4"] = self.parse_ipv4_address(value, intf_type="svi")
+ facts[key] = intf
+ else:
+ intf["state"] = self.parse_state(key, value)
+ intf["description"] = self.parse_description(value)
+ intf["macaddress"] = self.parse_macaddress(value)
+ intf["mode"] = self.parse_mode(value)
+ intf["mtu"] = self.parse_mtu(value)
+ intf["bandwidth"] = self.parse_bandwidth(value)
+ intf["duplex"] = self.parse_duplex(value)
+ intf["speed"] = self.parse_speed(value)
+ intf["type"] = self.parse_type(value)
+ if "Internet Address" in value:
+ intf["ipv4"] = self.parse_ipv4_address(value)
+ facts[key] = intf
+
+ return facts
+
+ def parse_state(self, key, value, intf_type="ethernet"):
+ match = None
+ if intf_type == "svi":
+ match = re.search(r"line protocol is\s*(\S+)", value, re.M)
+ else:
+ match = re.search(r"%s is\s*(\S+)" % key, value, re.M)
+
+ if match:
+ return match.group(1)
+
+ def parse_macaddress(self, value, intf_type="ethernet"):
+ match = None
+ if intf_type == "svi":
+ match = re.search(r"address is\s*(\S+)", value, re.M)
+ else:
+ match = re.search(r"address:\s*(\S+)", value, re.M)
+
+ if match:
+ return match.group(1)
+
+ def parse_mtu(self, value, intf_type="ethernet"):
+ match = re.search(r"MTU\s*(\S+)", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_bandwidth(self, value, intf_type="ethernet"):
+ match = re.search(r"BW\s*(\S+)", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_type(self, value, intf_type="ethernet"):
+ match = None
+ if intf_type == "svi":
+ match = re.search(r"Hardware is\s*(\S+)", value, re.M)
+ else:
+ match = re.search(r"Hardware:\s*(.+),", value, re.M)
+
+ if match:
+ return match.group(1)
+
+ def parse_description(self, value, intf_type="ethernet"):
+ match = re.search(r"Description: (.+)$", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_mode(self, value, intf_type="ethernet"):
+ match = re.search(r"Port mode is (\S+)", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_duplex(self, value, intf_type="ethernet"):
+ match = re.search(r"(\S+)-duplex", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_speed(self, value, intf_type="ethernet"):
+ match = re.search(r"duplex, (.+)$", value, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_ipv4_address(self, value, intf_type="ethernet"):
+ ipv4 = {}
+ match = re.search(r"Internet Address is (.+)$", value, re.M)
+ if match:
+ address = match.group(1)
+ addr = address.split("/")[0]
+ ipv4["address"] = address.split("/")[0]
+ ipv4["masklen"] = address.split("/")[1]
+ self.facts["all_ipv4_addresses"].append(addr)
+ return ipv4
+
+ def populate_neighbors(self, data):
+ objects = dict()
+ # if there are no neighbors the show command returns
+ # ERROR: No neighbour information
+ if data.startswith("ERROR"):
+ return dict()
+
+ regex = re.compile(r"(\S+)\s+(\S+)\s+\d+\s+\w+\s+(\S+)")
+
+ for item in data.split("\n")[4:-1]:
+ match = regex.match(item)
+ if match:
+ nbor = dict()
+ nbor["host"] = nbor["sysname"] = match.group(1)
+ nbor["port"] = match.group(3)
+ local_intf = normalize_interface(match.group(2))
+ if local_intf not in objects:
+ objects[local_intf] = []
+ objects[local_intf].append(nbor)
+
+ return objects
+
+ def populate_neighbors_cdp(self, data):
+ facts = dict()
+
+ for item in data.split("----------------------------------------"):
+ if item == "":
+ continue
+ local_intf = self.parse_lldp_intf(item)
+ if local_intf not in facts:
+ facts[local_intf] = list()
+
+ fact = dict()
+ fact["port"] = self.parse_lldp_port(item)
+ fact["sysname"] = self.parse_lldp_sysname(item)
+ facts[local_intf].append(fact)
+
+ return facts
+
+ def parse_lldp_intf(self, data):
+ match = re.search(r"Interface:\s*(\S+)", data, re.M)
+ if match:
+ return match.group(1).strip(",")
+
+ def parse_lldp_port(self, data):
+ match = re.search(r"Port ID \(outgoing port\):\s*(\S+)", data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_sysname(self, data):
+ match = re.search(r"Device ID:(.+)$", data, re.M)
+ if match:
+ return match.group(1)
+
+ def populate_ipv6_interfaces(self, interfaces):
+ facts = dict()
+ for key, value in iteritems(interfaces):
+ intf = dict()
+ intf["ipv6"] = self.parse_ipv6_address(value)
+ facts[key] = intf
+
+ def parse_ipv6_address(self, value):
+ ipv6 = {}
+ match_addr = re.search(r"IPv6 address:\s*(\S+)", value, re.M)
+ if match_addr:
+ addr = match_addr.group(1)
+ ipv6["address"] = addr
+ self.facts["all_ipv6_addresses"].append(addr)
+ match_subnet = re.search(r"IPv6 subnet:\s*(\S+)", value, re.M)
+ if match_subnet:
+ ipv6["subnet"] = match_subnet.group(1)
+
+ return ipv6
+
+
+class Legacy(FactsBase):
+ # facts from nxos_facts 2.1
+
+ VERSION_MAP = frozenset(
+ [
+ ("host_name", "_hostname"),
+ ("kickstart_ver_str", "_os"),
+ ("chassis_id", "_platform"),
+ ],
+ )
+
+ MODULE_MAP = frozenset(
+ [
+ ("model", "model"),
+ ("modtype", "type"),
+ ("ports", "ports"),
+ ("status", "status"),
+ ],
+ )
+
+ FAN_MAP = frozenset(
+ [
+ ("fanname", "name"),
+ ("fanmodel", "model"),
+ ("fanhwver", "hw_ver"),
+ ("fandir", "direction"),
+ ("fanstatus", "status"),
+ ],
+ )
+
+ POWERSUP_MAP = frozenset(
+ [
+ ("psmodel", "model"),
+ ("psnum", "number"),
+ ("ps_status", "status"),
+ ("ps_status_3k", "status"),
+ ("actual_out", "actual_output"),
+ ("actual_in", "actual_in"),
+ ("total_capa", "total_capacity"),
+ ("input_type", "input_type"),
+ ("watts", "watts"),
+ ("amps", "amps"),
+ ],
+ )
+
+ def populate(self):
+ data = None
+
+ data = self.run("show version", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts.update(self.transform_dict(data, self.VERSION_MAP))
+ else:
+ self.facts["hostname"] = self.parse_hostname(data)
+ self.facts["os"] = self.parse_os(data)
+ self.facts["platform"] = self.parse_platform(data)
+
+ data = self.run("show interface", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["interfaces_list"] = self.parse_structured_interfaces(data)
+ else:
+ self.facts["interfaces_list"] = self.parse_interfaces(data)
+
+ data = self.run("show vlan brief", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["vlan_list"] = self.parse_structured_vlans(data)
+ else:
+ self.facts["vlan_list"] = self.parse_vlans(data)
+
+ data = self.run("show module", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["module"] = self.parse_structured_module(data)
+ else:
+ self.facts["module"] = self.parse_module(data)
+
+ data = self.run("show environment fan", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["fan_info"] = self.parse_structured_fan_info(data)
+ else:
+ self.facts["fan_info"] = self.parse_fan_info(data)
+
+ data = self.run("show environment power", output="json")
+ if data:
+ if isinstance(data, dict):
+ self.facts["power_supply_info"] = self.parse_structured_power_supply_info(data)
+ else:
+ self.facts["power_supply_info"] = self.parse_power_supply_info(data)
+
+ def parse_structured_interfaces(self, data):
+ objects = list()
+ data = data["TABLE_interface"]["ROW_interface"]
+ if isinstance(data, dict):
+ objects.append(data["interface"])
+ elif isinstance(data, list):
+ for item in data:
+ objects.append(item["interface"])
+ return objects
+
+ def parse_structured_vlans(self, data):
+ objects = list()
+ data = data["TABLE_vlanbriefxbrief"]["ROW_vlanbriefxbrief"]
+ if isinstance(data, dict):
+ objects.append(data["vlanshowbr-vlanid-utf"])
+ elif isinstance(data, list):
+ for item in data:
+ objects.append(item["vlanshowbr-vlanid-utf"])
+ return objects
+
+ def parse_structured_module(self, data):
+ modinfo = data["TABLE_modinfo"]
+ if isinstance(modinfo, dict):
+ modinfo = [modinfo]
+
+ objects = []
+ for entry in modinfo:
+ entry = entry["ROW_modinfo"]
+ if isinstance(entry, dict):
+ entry = [entry]
+ entry_objects = list(self.transform_iterable(entry, self.MODULE_MAP))
+ objects.extend(entry_objects)
+ return objects
+
+ def parse_structured_fan_info(self, data):
+ objects = list()
+
+ for key in ("fandetails", "fandetails_3k"):
+ if data.get(key):
+ try:
+ data = data[key]["TABLE_faninfo"]["ROW_faninfo"]
+ except KeyError:
+ # Some virtual images don't actually report faninfo. In this case, move on and
+ # just return an empty list.
+ pass
+ else:
+ objects = list(self.transform_iterable(data, self.FAN_MAP))
+ break
+
+ return objects
+
+ def parse_structured_power_supply_info(self, data):
+ ps_data = data.get("powersup", {})
+ if ps_data.get("TABLE_psinfo_n3k"):
+ fact = ps_data["TABLE_psinfo_n3k"]["ROW_psinfo_n3k"]
+ else:
+ # {TABLE,ROW}_psinfo keys have been renamed to
+ # {TABLE,ROW}_ps_info in later NX-OS releases
+ tab_key, row_key = "TABLE_psinfo", "ROW_psinfo"
+ if tab_key not in ps_data:
+ tab_key, row_key = "TABLE_ps_info", "ROW_ps_info"
+
+ ps_tab_data = ps_data[tab_key]
+
+ if isinstance(ps_tab_data, list):
+ fact = []
+ for i in ps_tab_data:
+ fact.append(i[row_key])
+ else:
+ fact = ps_tab_data[row_key]
+
+ objects = list(self.transform_iterable(fact, self.POWERSUP_MAP))
+ return objects
+
+ def parse_hostname(self, data):
+ match = re.search(r"\s+Device name:\s+(\S+)", data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_os(self, data):
+ match = re.search(r"\s+system:\s+version\s*(\S+)", data, re.M)
+ if match:
+ return match.group(1)
+ else:
+ match = re.search(r"\s+kickstart:\s+version\s*(\S+)", data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_platform(self, data):
+ match = re.search(r"Hardware\n\s+cisco\s+(\S+\s+\S+)", data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_interfaces(self, data):
+ objects = list()
+ for line in data.split("\n"):
+ if len(line) == 0:
+ continue
+ elif line.startswith("admin") or line[0] == " ":
+ continue
+ else:
+ match = re.match(r"^(\S+)", line)
+ if match:
+ intf = match.group(1)
+ if get_interface_type(intf) != "unknown":
+ objects.append(intf)
+ return objects
+
+ def parse_vlans(self, data):
+ objects = list()
+ for line in data.splitlines():
+ if line == "":
+ continue
+ if line[0].isdigit():
+ vlan = line.split()[0]
+ objects.append(vlan)
+ return objects
+
+ def parse_module(self, data):
+ objects = list()
+ for line in data.splitlines():
+ if line == "":
+ break
+ if line[0].isdigit():
+ obj = {}
+ match_port = re.search(r"\d\s*(\d*)", line, re.M)
+ if match_port:
+ obj["ports"] = match_port.group(1)
+
+ match = re.search(r"\d\s*\d*\s*(.+)$", line, re.M)
+ if match:
+ l = match.group(1).split(" ")
+ items = list()
+ for item in l:
+ if item == "":
+ continue
+ items.append(item.strip())
+
+ if items:
+ obj["type"] = items[0]
+ obj["model"] = items[1]
+ obj["status"] = items[2]
+
+ objects.append(obj)
+ return objects
+
+ def parse_fan_info(self, data):
+ objects = list()
+
+ for l in data.splitlines():
+ if "-----------------" in l or "Status" in l:
+ continue
+ line = l.split()
+ if len(line) > 1:
+ obj = {}
+ obj["name"] = line[0]
+ obj["model"] = line[1]
+ obj["hw_ver"] = line[-2]
+ obj["status"] = line[-1]
+ objects.append(obj)
+ return objects
+
+ def parse_power_supply_info(self, data):
+ objects = list()
+
+ for l in data.splitlines():
+ if l == "":
+ break
+ if l[0].isdigit():
+ obj = {}
+ line = l.split()
+ obj["model"] = line[1]
+ obj["number"] = line[0]
+ obj["status"] = line[-1]
+
+ objects.append(obj)
+ return objects
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/lldp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/lldp_global.py
new file mode 100644
index 00000000..6e8aabde
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_global/lldp_global.py
@@ -0,0 +1,107 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos lldp_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.lldp_global.lldp_global import (
+ Lldp_globalArgs,
+)
+
+
+class Lldp_globalFacts(object):
+ """The nxos lldp_global fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lldp_globalArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for lldp_global
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+
+ if not data:
+ data = connection.get("show running-config | include lldp")
+
+ objs = {}
+ objs = self.render_config(self.generated_spec, data)
+ ansible_facts["ansible_network_resources"].pop("lldp_global", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ facts["lldp_global"] = params["config"]
+ facts = utils.remove_empties(facts)
+ ansible_facts["ansible_network_resources"].update((facts))
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ conf = re.split("\n", conf)
+ for command in conf:
+ param = re.search(r"(.*)lldp (\w+(-?)\w+)", command) # get the word after 'lldp'
+ if param:
+ # get the nested-dict/value for that param
+ key2 = re.search(r"%s(.*)" % param.group(2), command)
+ key2 = key2.group(1).strip()
+ key1 = param.group(2).replace("-", "_")
+
+ if key1 == "portid_subtype":
+ key1 = "port_id"
+ config[key1] = key2
+ elif key1 == "tlv_select":
+ key2 = key2.split()
+ key2[0] = key2[0].replace("-", "_")
+ if len(key2) == 1:
+ if "port" in key2[0] or "system" in key2[0]: # nested dicts
+ key2 = key2[0].split("_")
+ # config[tlv_select][system][name]=False
+ config[key1][key2[0]][key2[1]] = False
+ else:
+ # config[tlv_select][dcbxp]=False
+ config[key1][key2[0]] = False
+ else:
+ # config[tlv_select][management_address][v6]=False
+ config[key1][key2[0]][key2[1]] = False
+ else:
+ config[key1] = key2 # config[reinit]=4
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/lldp_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..4cd8b211
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,128 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos lldp_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_interface_type,
+)
+
+
+class Lldp_interfacesFacts(object):
+ """The nxos lldp_interfaces fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lldp_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection):
+ return connection.get("show running-config | section ^interface")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for lldp_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_device_data(connection)
+
+ objs = []
+
+ data = data.split("interface")
+ resources = []
+
+ for i in range(len(data)):
+ intf = data[i].split("\n")
+ for l in range(1, len(intf)):
+ if not re.search("lldp", intf[l]):
+ intf[l] = ""
+ intf = list(filter(None, intf))
+ intf = "".join(i for i in intf)
+ resources.append(intf)
+
+ for resource in resources:
+ if resource: # and re.search(r'lldp', resource):
+ obj = self.render_config(self.generated_spec, resource)
+ if obj and len(obj.keys()) >= 1:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("lldp_interfaces", None)
+ facts = {}
+ if objs:
+ facts["lldp_interfaces"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["lldp_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ match = re.search(r"^ (\S+)", conf)
+ if match is None:
+ return {}
+ intf = match.group(1)
+ if get_interface_type(intf) not in ["management", "ethernet"]:
+ return {}
+ config["name"] = intf
+ if "lldp receive" in conf: # for parsed state only
+ config["receive"] = True
+ if "no lldp receive" in conf:
+ config["receive"] = False
+
+ if "lldp transmit" in conf: # for parsed state only
+ config["transmit"] = True
+ if "no lldp transmit" in conf:
+ config["transmit"] = False
+ if "management-address" in conf:
+ config["tlv_set"]["management_address"] = re.search(
+ r"management-address (\S*)",
+ conf,
+ ).group(1)
+ if "vlan" in conf:
+ config["tlv_set"]["vlan"] = re.search(r"vlan (\S*)", conf).group(1)
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/logging_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/logging_global.py
new file mode 100644
index 00000000..189db0e4
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/logging_global/logging_global.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos logging_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.logging_global.logging_global import (
+ Logging_globalArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.logging_global import (
+ Logging_globalTemplate,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
+ get_logging_sevmap,
+)
+
+
+class Logging_globalFacts(object):
+ """The nxos logging_global facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Logging_globalArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | include logging")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Logging_global network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+ sev_map = get_logging_sevmap()
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Logging_global template
+ logging_global_parser = Logging_globalTemplate(lines=data.splitlines(), module=self._module)
+ objs = logging_global_parser.parse()
+
+ if objs:
+ for k in ("console", "history", "logfile", "module", "monitor"):
+ if "severity" in objs.get(k, {}):
+ objs[k]["severity"] = sev_map[objs[k]["severity"]]
+ # pre-sort list of dictionaries
+ pkey = {"hosts": "host", "facilities": "facility"}
+ for x in ("hosts", "facilities"):
+ if x in objs:
+ for item in objs[x]:
+ if "severity" in item:
+ item["severity"] = sev_map[item["severity"]]
+ objs[x] = sorted(objs[x], key=lambda k: k[pkey[x]])
+
+ ansible_facts["ansible_network_resources"].pop("logging_global", None)
+
+ params = utils.remove_empties(
+ logging_global_parser.validate_config(
+ self.argument_spec,
+ {"config": objs},
+ redact=True,
+ ),
+ )
+
+ facts["logging_global"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/ntp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/ntp_global.py
new file mode 100644
index 00000000..258b68aa
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ntp_global/ntp_global.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos ntp_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.ntp_global.ntp_global import (
+ Ntp_globalArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ntp_global import (
+ Ntp_globalTemplate,
+)
+
+
+class Ntp_globalFacts(object):
+ """The nxos ntp_global facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Ntp_globalArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config ntp")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Ntp_global network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Ntp_global template
+ ntp_global_parser = Ntp_globalTemplate(lines=data.splitlines(), module=self._module)
+ objs = ntp_global_parser.parse()
+
+ if "access_group" in objs:
+ for x in ["peer", "query_only", "serve", "serve_only"]:
+ if x in objs["access_group"]:
+ objs["access_group"][x] = sorted(
+ objs["access_group"][x],
+ key=lambda k: k["access_list"],
+ )
+
+ pkey = {
+ "authentication_keys": "id",
+ "peers": "peer",
+ "servers": "server",
+ "trusted_keys": "key_id",
+ }
+
+ for x in pkey.keys():
+ if x in objs:
+ objs[x] = sorted(objs[x], key=lambda k: k[pkey[x]])
+
+ ansible_facts["ansible_network_resources"].pop("ntp_global", None)
+
+ params = utils.remove_empties(
+ ntp_global_parser.validate_config(self.argument_spec, {"config": objs}, redact=True),
+ )
+
+ facts["ntp_global"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/ospf_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/ospf_interfaces.py
new file mode 100644
index 00000000..745f373b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospf_interfaces/ospf_interfaces.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos ospf_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.ospf_interfaces.ospf_interfaces import (
+ Ospf_interfacesArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospf_interfaces import (
+ Ospf_interfacesTemplate,
+)
+
+
+class Ospf_interfacesFacts(object):
+ """The nxos ospf_interfaces facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Ospf_interfacesArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^interface'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Ospf_interfaces network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Ospf_interfaces template
+ ospf_interfaces_parser = Ospf_interfacesTemplate(
+ lines=data.splitlines(),
+ module=self._module,
+ )
+ objs = list(ospf_interfaces_parser.parse().values())
+ if objs:
+ for item in objs:
+ item["address_family"] = list(item["address_family"].values())
+ if "address_family" in item:
+ for af in item["address_family"]:
+ if af.get("processes"):
+ af["processes"] = list(af["processes"].values())
+ if af.get("multi_areas"):
+ af["multi_areas"].sort()
+ item["address_family"] = sorted(item["address_family"], key=lambda i: i["afi"])
+
+ objs = sorted(
+ objs,
+ key=lambda i: [
+ int(k) if k.isdigit() else k for k in i["name"].replace(".", "/").split("/")
+ ],
+ )
+
+ ansible_facts["ansible_network_resources"].pop("ospf_interfaces", None)
+
+ params = utils.remove_empties(
+ ospf_interfaces_parser.validate_config(
+ self.argument_spec,
+ {"config": objs},
+ redact=True,
+ ),
+ )
+
+ facts["ospf_interfaces"] = params.get("config", [])
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/ospfv2.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/ospfv2.py
new file mode 100644
index 00000000..248f3ed4
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv2/ospfv2.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos snmp fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from copy import deepcopy
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.ospfv2.ospfv2 import (
+ Ospfv2Args,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospfv2 import (
+ Ospfv2Template,
+)
+
+
+class Ospfv2Facts(object):
+ """The nxos snmp fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Ospfv2Args.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^router ospf .*'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_config(connection)
+
+ ipv4 = {"processes": []}
+ for section in data.split("router "):
+ rmmod = Ospfv2Template(lines=section.splitlines())
+ entry = rmmod.parse()
+
+ if entry:
+ global_vals = entry.get("vrfs", {}).pop("vrf_", {})
+ for key, value in iteritems(global_vals):
+ entry[key] = value
+
+ if "vrfs" in entry:
+ entry["vrfs"] = list(entry["vrfs"].values())
+
+ for vrf in entry["vrfs"]:
+ if "areas" in vrf:
+ vrf["areas"] = list(vrf["areas"].values())
+
+ if "areas" in entry:
+ entry["areas"] = list(entry["areas"].values())
+
+ ipv4["processes"].append(entry)
+
+ ansible_facts["ansible_network_resources"].pop("ospfv2", None)
+ facts = {}
+ params = utils.validate_config(self.argument_spec, {"config": ipv4})
+ params = utils.remove_empties(params)
+
+ facts["ospfv2"] = params.get("config", [])
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/ospfv3.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/ospfv3.py
new file mode 100644
index 00000000..796dacfb
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/ospfv3/ospfv3.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos ospfv3 fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.ospfv3.ospfv3 import (
+ Ospfv3Args,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.ospfv3 import (
+ Ospfv3Template,
+)
+
+
+class Ospfv3Facts(object):
+ """The nxos ospfv3 facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Ospfv3Args.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^router ospfv3'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Ospfv3 network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_config(connection)
+
+ ipv6 = {"processes": []}
+ for section in data.split("router "):
+ rmmod = Ospfv3Template(lines=section.splitlines())
+ entry = rmmod.parse()
+
+ if entry:
+ global_vals = entry.get("vrfs", {}).pop("vrf_", {})
+ for key, value in iteritems(global_vals):
+ entry[key] = value
+
+ if "vrfs" in entry:
+ entry["vrfs"] = list(entry["vrfs"].values())
+
+ for vrf in entry["vrfs"]:
+ if "areas" in vrf:
+ vrf["areas"] = list(vrf["areas"].values())
+
+ if "areas" in entry:
+ entry["areas"] = list(entry["areas"].values())
+
+ if "address_family" in entry:
+ if "areas" in entry["address_family"]:
+ entry["address_family"]["areas"] = list(
+ entry["address_family"]["areas"].values(),
+ )
+
+ ipv6["processes"].append(entry)
+
+ ansible_facts["ansible_network_resources"].pop("ospfv3", None)
+ facts = {}
+ params = utils.validate_config(self.argument_spec, {"config": ipv6})
+ params = utils.remove_empties(params)
+
+ facts["ospfv3"] = params.get("config", [])
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/prefix_lists.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/prefix_lists.py
new file mode 100644
index 00000000..7bb18631
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/prefix_lists/prefix_lists.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos prefix_lists fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.prefix_lists.prefix_lists import (
+ Prefix_listsArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.prefix_lists import (
+ Prefix_listsTemplate,
+)
+
+
+class Prefix_listsFacts(object):
+ """The nxos prefix_lists facts class"""
+
+ def __init__(self, module):
+ self._module = module
+ self.argument_spec = Prefix_listsArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section 'ip(.*) prefix-list'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Prefix_lists network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Prefix_lists template
+ prefix_lists_parser = Prefix_listsTemplate(lines=data.splitlines(), module=self._module)
+
+ objs = list(prefix_lists_parser.parse().values())
+ if objs:
+ # pre-sort lists of dictionaries
+ for item in objs:
+ item["prefix_lists"] = sorted(
+ list(item["prefix_lists"].values()),
+ key=lambda k: k["name"],
+ )
+ for x in item["prefix_lists"]:
+ if "entries" in x:
+ x["entries"] = sorted(x["entries"], key=lambda k: k["sequence"])
+ objs = sorted(objs, key=lambda k: k["afi"])
+
+ ansible_facts["ansible_network_resources"].pop("prefix_lists", None)
+ params = utils.remove_empties(
+ prefix_lists_parser.validate_config(self.argument_spec, {"config": objs}, redact=True),
+ )
+ facts["prefix_lists"] = params.get("config", [])
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/route_maps.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/route_maps.py
new file mode 100644
index 00000000..d1bad913
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/route_maps/route_maps.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos route_maps fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.route_maps.route_maps import (
+ Route_mapsArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.route_maps import (
+ Route_mapsTemplate,
+)
+
+
+class Route_mapsFacts(object):
+ """The nxos route_maps facts class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Route_mapsArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^route-map'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Route_maps network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Route_maps template
+ route_maps_parser = Route_mapsTemplate(lines=data.splitlines(), module=self._module)
+
+ objs = list(route_maps_parser.parse().values())
+
+ for item in objs:
+ item["entries"] = list(item["entries"].values())
+
+ ansible_facts["ansible_network_resources"].pop("route_maps", None)
+
+ params = utils.remove_empties(
+ route_maps_parser.validate_config(self.argument_spec, {"config": objs}, redact=True),
+ )
+
+ facts["route_maps"] = params.get("config", [])
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/snmp_server.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/snmp_server.py
new file mode 100644
index 00000000..c46e1a78
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/snmp_server/snmp_server.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The nxos snmp_server fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from ansible.module_utils._text import to_text
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.snmp_server.snmp_server import (
+ Snmp_serverArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.snmp_server import (
+ Snmp_serverTemplate,
+)
+
+
+class Snmp_serverFacts(object):
+ """The nxos snmp_server facts class"""
+
+ def __init__(self, module):
+ self._module = module
+ self.argument_spec = Snmp_serverArgs.argument_spec
+
+ def get_config(self, connection):
+ """Wrapper method for `connection.get()`
+ This method exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get("show running-config | section '^snmp-server'")
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for Snmp_server network resource
+
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+
+ :rtype: dictionary
+ :returns: facts
+ """
+ facts = {}
+ objs = []
+
+ if not data:
+ data = self.get_config(connection)
+
+ # parse native config using the Snmp_server template
+ snmp_server_parser = Snmp_serverTemplate(lines=data.splitlines(), module=self._module)
+ objs = snmp_server_parser.parse()
+
+ if "communities" in objs:
+ objs["communities"] = sorted(objs["communities"], key=lambda k: to_text(k["name"]))
+
+ if "users" in objs:
+ if "auth" in objs["users"]:
+ objs["users"]["auth"] = sorted(
+ objs["users"]["auth"],
+ key=lambda k: to_text(k["user"]),
+ )
+ if "use_acls" in objs["users"]:
+ objs["users"]["use_acls"] = sorted(
+ objs["users"]["use_acls"],
+ key=lambda k: to_text(k["user"]),
+ )
+
+ ansible_facts["ansible_network_resources"].pop("snmp_server", None)
+
+ params = utils.remove_empties(
+ snmp_server_parser.validate_config(self.argument_spec, {"config": objs}, redact=True),
+ )
+
+ facts["snmp_server"] = params.get("config", {})
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/static_routes.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/static_routes.py
new file mode 100644
index 00000000..b62d2580
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/static_routes/static_routes.py
@@ -0,0 +1,230 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+The nxos static_routes fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.static_routes.static_routes import (
+ Static_routesArgs,
+)
+
+
+class Static_routesFacts(object):
+ """The nxos static_routes fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Static_routesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection, data):
+ vrf_data = []
+ non_vrf_data = []
+ if not data:
+ non_vrf_data = connection.get("show running-config | include '^ip(v6)* route'")
+ vrf_data = connection.get("show running-config | section '^vrf context'")
+ if non_vrf_data:
+ non_vrf_data = non_vrf_data.split("\n")
+ else:
+ non_vrf_data = []
+ vrf_data = vrf_data.split("\nvrf context")
+ # as we split based on 'vrf context', it is stripped from the data except the first element
+ else:
+ # used for parsed state where data is from the 'running-config' key
+ data = data.split("\n")
+ i = 0
+ while i <= (len(data) - 1):
+ if "vrf context " in data[i]:
+ vrf_conf = data[i]
+ j = i + 1
+ while j < len(data) and "vrf context " not in data[j]:
+ vrf_conf += "\n" + data[j]
+ j += 1
+ i = j
+ vrf_data.append(vrf_conf)
+ else:
+ non_vrf_data.append(data[i])
+ i += 1
+
+ new_vrf_data = []
+ for v in vrf_data:
+ if re.search(r"\n\s*ip(v6)? route", v):
+ new_vrf_data.append(v)
+ # dont consider vrf if it does not have routes
+ for i in range(len(new_vrf_data)):
+ if not re.search("^vrf context", new_vrf_data[i]):
+ new_vrf_data[i] = "vrf context" + new_vrf_data[i]
+
+ resources = non_vrf_data + new_vrf_data
+ return resources
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for static_routes
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ resources = self.get_device_data(connection, data)
+ objs = self.render_config(self.generated_spec, resources)
+ ansible_facts["ansible_network_resources"].pop("static_routes", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ params = utils.remove_empties(params)
+ for c in params["config"]:
+ if c == {"vrf": "default"}:
+ params["config"].remove(c)
+ facts["static_routes"] = params["config"]
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def get_inner_dict(self, conf, inner_dict):
+ """
+ This method parses the command to create the innermost dictionary of the config
+ """
+ conf = re.sub(r"\s*ip(v6)? route", "", conf)
+ # strip 'ip route'
+ inner_dict["dest"] = re.match(r"^\s*(\S+\/\d+) .*", conf).group(1)
+
+ # ethernet1/2/23
+ iface = re.match(r".* (Ethernet|loopback|mgmt|port\-channel)(\S*) .*", conf)
+ i = ["Ethernet", "loopback", "mgmt", "port-channel"]
+ if iface and iface.group(1) in i:
+ inner_dict["interface"] = (iface.group(1)) + (iface.group(2))
+ conf = re.sub(inner_dict["interface"], "", conf)
+
+ if "." in inner_dict["dest"]:
+ conf = re.sub(inner_dict["dest"], "", conf)
+ inner_dict["afi"] = "ipv4"
+ ipv4 = re.match(r".* (\d+\.\d+\.\d+\.\d+\/?\d*).*", conf) # gets next hop ip
+ if ipv4:
+ inner_dict["forward_router_address"] = ipv4.group(1)
+ conf = re.sub(inner_dict["forward_router_address"], "", conf)
+ else:
+ inner_dict["afi"] = "ipv6"
+ conf = re.sub(inner_dict["dest"], "", conf)
+ ipv6 = re.match(r".* (\S*:\S*:\S*\/?\d*).*", conf)
+ if ipv6:
+ inner_dict["forward_router_address"] = ipv6.group(1)
+ conf = re.sub(inner_dict["forward_router_address"], "", conf)
+
+ nullif = re.search(r"null0", conf, re.IGNORECASE)
+ if nullif:
+ inner_dict["interface"] = "Null0"
+ inner_dict["forward_router_address"] = None
+ return inner_dict # dest IP not needed for null if
+
+ keywords = ["vrf", "name", "tag", "track"]
+ for key in keywords:
+ pattern = re.match(r".* (?:%s) (\S+).*" % key, conf)
+ if pattern:
+ if key == "vrf":
+ key = "dest_vrf"
+ elif key == "name":
+ key = "route_name"
+ inner_dict[key] = pattern.group(1).strip()
+ conf = re.sub(key + " " + inner_dict[key], "", conf)
+
+ pref = re.match(r"(?:.*) (\d+)$", conf)
+ if pref:
+ # if something is left at the end without any key, it is the pref
+ inner_dict["admin_distance"] = pref.group(1)
+ return inner_dict
+
+ def get_command(self, conf, afi_list, dest_list, af):
+ inner_dict = {}
+ inner_dict = self.get_inner_dict(conf, inner_dict)
+ if inner_dict["afi"] not in afi_list:
+ af.append({"afi": inner_dict["afi"], "routes": []})
+ afi_list.append(inner_dict["afi"])
+
+ next_hop = {}
+ params = [
+ "forward_router_address",
+ "interface",
+ "admin_distance",
+ "route_name",
+ "tag",
+ "track",
+ "dest_vrf",
+ ]
+ for p in params:
+ if p in inner_dict.keys():
+ next_hop.update({p: inner_dict[p]})
+
+ if inner_dict["dest"] not in dest_list:
+ dest_list.append(inner_dict["dest"])
+ af[-1]["routes"].append({"dest": inner_dict["dest"], "next_hops": []})
+ # if 'dest' is new, create new list under 'routes'
+ af[-1]["routes"][-1]["next_hops"].append(next_hop)
+ else:
+ af[-1]["routes"][-1]["next_hops"].append(next_hop)
+ # just append if dest already exists
+ return af
+
+ def render_config(self, spec, con):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ # config=deepcopy(spec)
+ config = []
+ global_afi_list = []
+ global_af = []
+ global_dest_list = []
+ if con:
+ for conf in con:
+ if conf.startswith("vrf context"):
+ svrf = re.match(r"vrf context (\S+)\n", conf).group(1)
+ afi_list = []
+ af = []
+ dest_list = []
+ config_dict = {"vrf": svrf, "address_families": []}
+ conf = conf.split("\n")
+ # considering from the second line as first line is 'vrf context..'
+ conf = conf[1:]
+ for c in conf:
+ if ("ip route" in c or "ipv6 route" in c) and "bfd" not in c:
+ self.get_command(c, afi_list, dest_list, af)
+ config_dict["address_families"] = af
+ config.append(config_dict)
+ else:
+ if ("ip route" in conf or "ipv6 route" in conf) and "bfd" not in conf:
+ self.get_command(conf, global_afi_list, global_dest_list, global_af)
+ if global_af:
+ config.append(utils.remove_empties({"address_families": global_af}))
+ return config
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/telemetry.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/telemetry.py
new file mode 100644
index 00000000..bdb28031
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/telemetry/telemetry.py
@@ -0,0 +1,185 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos telemetry fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.telemetry.telemetry import (
+ TelemetryArgs,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.cmdref.telemetry.telemetry import (
+ TMS_DESTGROUP,
+ TMS_GLOBAL,
+ TMS_SENSORGROUP,
+ TMS_SUBSCRIPTION,
+)
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import NxosCmdRef
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.telemetry.telemetry import (
+ cr_key_lookup,
+ get_instance_data,
+ normalize_data,
+)
+
+
+class TelemetryFacts(object):
+ """The nxos telemetry fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = TelemetryArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for telemetry
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ cmd_ref = {}
+ cmd_ref["TMS_GLOBAL"] = {}
+ cmd_ref["TMS_DESTGROUP"] = {}
+ cmd_ref["TMS_SENSORGROUP"] = {}
+ cmd_ref["TMS_SUBSCRIPTION"] = {}
+
+ # For fact gathering, module state should be 'present' when using
+ # NxosCmdRef to query state
+ if self._module.params.get("state"):
+ saved_module_state = self._module.params["state"]
+ self._module.params["state"] = "present"
+
+ # Get Telemetry Global Data
+ cmd_ref["TMS_GLOBAL"]["ref"] = []
+ cmd_ref["TMS_GLOBAL"]["ref"].append(NxosCmdRef(self._module, TMS_GLOBAL))
+ ref = cmd_ref["TMS_GLOBAL"]["ref"][0]
+ ref.set_context()
+ ref.get_existing()
+ device_cache = ref.cache_existing
+
+ if device_cache is None:
+ device_cache_lines = []
+ else:
+ device_cache_lines = device_cache.split("\n")
+
+ # Get Telemetry Destination Group Data
+ cmd_ref["TMS_DESTGROUP"]["ref"] = []
+ for line in device_cache_lines:
+ if re.search(r"destination-group", line):
+ resource_key = line.strip()
+ cmd_ref["TMS_DESTGROUP"]["ref"].append(NxosCmdRef(self._module, TMS_DESTGROUP))
+ ref = cmd_ref["TMS_DESTGROUP"]["ref"][-1]
+ ref.set_context([resource_key])
+ ref.get_existing(device_cache)
+ normalize_data(ref)
+
+ # Get Telemetry Sensorgroup Group Data
+ cmd_ref["TMS_SENSORGROUP"]["ref"] = []
+ for line in device_cache_lines:
+ if re.search(r"sensor-group", line):
+ resource_key = line.strip()
+ cmd_ref["TMS_SENSORGROUP"]["ref"].append(NxosCmdRef(self._module, TMS_SENSORGROUP))
+ ref = cmd_ref["TMS_SENSORGROUP"]["ref"][-1]
+ ref.set_context([resource_key])
+ ref.get_existing(device_cache)
+
+ # Get Telemetry Subscription Data
+ cmd_ref["TMS_SUBSCRIPTION"]["ref"] = []
+ for line in device_cache_lines:
+ if re.search(r"subscription", line):
+ resource_key = line.strip()
+ cmd_ref["TMS_SUBSCRIPTION"]["ref"].append(
+ NxosCmdRef(self._module, TMS_SUBSCRIPTION),
+ )
+ ref = cmd_ref["TMS_SUBSCRIPTION"]["ref"][-1]
+ ref.set_context([resource_key])
+ ref.get_existing(device_cache)
+
+ objs = []
+ objs = self.render_config(self.generated_spec, cmd_ref)
+ facts = {"telemetry": {}}
+ if objs:
+ # params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts["telemetry"] = objs
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ if self._module.params.get("state"):
+ self._module.params["state"] = saved_module_state
+ return ansible_facts
+
+ def render_config(self, spec, cmd_ref):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config["destination_groups"] = []
+ config["sensor_groups"] = []
+ config["subscriptions"] = []
+ managed_objects = [
+ "TMS_GLOBAL",
+ "TMS_DESTGROUP",
+ "TMS_SENSORGROUP",
+ "TMS_SUBSCRIPTION",
+ ]
+
+ # Walk the argspec and cmd_ref objects and build out config dict.
+ for key in config.keys():
+ for mo in managed_objects:
+ for cr in cmd_ref[mo]["ref"]:
+ cr_keys = cr_key_lookup(key, mo)
+ for cr_key in cr_keys:
+ if cr._ref.get(cr_key) and cr._ref[cr_key].get("existing"):
+ if isinstance(config[key], dict):
+ for k in config[key].keys():
+ for existing_key in cr._ref[cr_key]["existing"].keys():
+ config[key][k] = cr._ref[cr_key]["existing"][existing_key][
+ k
+ ]
+ continue
+ if isinstance(config[key], list):
+ for existing_key in cr._ref[cr_key]["existing"].keys():
+ data = get_instance_data(key, cr_key, cr, existing_key)
+ config[key].append(data)
+ continue
+ for existing_key in cr._ref[cr_key]["existing"].keys():
+ config[key] = cr._ref[cr_key]["existing"][existing_key]
+ elif cr._ref.get(cr_key):
+ data = get_instance_data(key, cr_key, cr, None)
+ if isinstance(config[key], list) and data not in config[key]:
+ config[key].append(data)
+
+ return utils.remove_empties(config)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/vlans.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/vlans.py
new file mode 100644
index 00000000..32968d2d
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/facts/vlans/vlans.py
@@ -0,0 +1,197 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
+
+"""
+The nxos vlans fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import ast
+import re
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ parse_conf_arg,
+)
+
+from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.vlans.vlans import (
+ VlansArgs,
+)
+
+
+class VlansFacts(object):
+ """The nxos vlans fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = VlansArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection, show_cmd):
+ """Wrapper method for `connection.get()`
+ This exists solely to allow the unit test framework to mock device connection calls.
+ """
+ return connection.get(show_cmd)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for vlans
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ # **TBD**
+ # N7K EOL/legacy image 6.2 does not support show vlan | json output.
+ # If support is still required for this image then:
+ # - Wrapp the json calls below in a try/except
+ # - When excepted, use a helper method to parse the run_cfg_output,
+ # using the run_cfg_output data to generate compatible json data that
+ # can be read by normalize_table_data.
+ if not data:
+ # Use structured for most of the vlan parameter states.
+ # This data is consistent across the supported nxos platforms.
+ try:
+ # Not all devices support | json-pretty but is a workaround for
+ # libssh issue https://github.com/ansible/pylibssh/issues/208
+ structured = self.get_device_data(connection, "show vlan | json-pretty")
+ except Exception:
+ # When json-pretty is not supported, we fall back to | json
+ structured = self.get_device_data(connection, "show vlan | json")
+
+ # Raw cli config is needed for mapped_vni, which is not included in structured.
+ run_cfg_output = self.get_device_data(connection, "show running-config | section ^vlan")
+ else:
+ running_config = data.split("\n\n")
+ structured, run_cfg_output = running_config[0], running_config[1]
+
+ # Create a single dictionary from all data sources
+ data = self.normalize_table_data(structured, run_cfg_output)
+
+ for vlan in data:
+ obj = self.render_config(self.generated_spec, vlan)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("vlans", None)
+ facts = {}
+ if objs:
+ facts["vlans"] = []
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ for cfg in params["config"]:
+ facts["vlans"].append(utils.remove_empties(cfg))
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, vlan):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param vlan: structured data vlan settings (dict) and raw cfg from device
+ :rtype: dictionary
+ :returns: The generated config
+ Sample inputs: test/units/modules/network/nxos/fixtures/nxos_vlans/show_vlan
+ """
+ obj = deepcopy(spec)
+
+ obj["vlan_id"] = vlan["vlan_id"]
+
+ # name: 'VLAN000x' (default name) or custom name
+ name = vlan["vlanshowbr-vlanname"]
+ if name and re.match("VLAN%04d" % int(vlan["vlan_id"]), name):
+ name = None
+ obj["name"] = name
+
+ # mode: 'ce-vlan' or 'fabricpath-vlan'
+ obj["mode"] = vlan["vlanshowinfo-vlanmode"].replace("-vlan", "")
+
+ # enabled: shutdown, noshutdown
+ obj["enabled"] = True if "noshutdown" in vlan["vlanshowbr-shutstate"] else False
+
+ # state: active, suspend
+ obj["state"] = vlan["vlanshowbr-vlanstate"]
+
+ # non-structured data
+ obj["mapped_vni"] = parse_conf_arg(vlan["run_cfg"], "vn-segment")
+
+ return utils.remove_empties(obj)
+
+ def normalize_table_data(self, structured, run_cfg_output):
+ """Normalize structured output and raw running-config output into
+ a single dict to simplify render_config usage.
+ This is needed because:
+ - The NXOS devices report most of the vlan settings within two
+ structured data keys: 'vlanbrief' and 'mtuinfo', but the output is
+ incomplete and therefore raw running-config data is also needed.
+ - running-config by itself is insufficient because of major differences
+ in the cli config syntax across platforms.
+ - Thus a helper method combines settings from the separate top-level keys,
+ and adds a 'run_cfg' key containing raw cli from the device.
+ """
+ # device output may be string, convert to list
+ structured = ast.literal_eval(str(structured))
+
+ vlanbrief = []
+ mtuinfo = []
+ if "TABLE_vlanbrief" in structured:
+ # SAMPLE: {"TABLE_vlanbriefid": {"ROW_vlanbriefid": {
+ # "vlanshowbr-vlanid": "4", "vlanshowbr-vlanid-utf": "4",
+ # "vlanshowbr-vlanname": "VLAN0004", "vlanshowbr-vlanstate": "active",
+ # "vlanshowbr-shutstate": "noshutdown"}},
+ vlanbrief = structured["TABLE_vlanbrief"]["ROW_vlanbrief"]
+
+ # SAMPLE: "TABLE_mtuinfoid": {"ROW_mtuinfoid": {
+ # "vlanshowinfo-vlanid": "4", "vlanshowinfo-media-type": "enet",
+ # "vlanshowinfo-vlanmode": "ce-vlan"}}
+ mtuinfo = structured["TABLE_mtuinfo"]["ROW_mtuinfo"]
+
+ if type(vlanbrief) is not list:
+ # vlanbrief is not a list when only one vlan is found.
+ vlanbrief = [vlanbrief]
+ mtuinfo = [mtuinfo]
+
+ # split out any per-vlan cli config
+ run_cfg_list = re.split(r"[\n^]vlan ", run_cfg_output)
+
+ # Create a list of vlan dicts where each dict contains vlanbrief,
+ # mtuinfo, and non-structured running-config data for one vlan.
+ vlans = []
+ for index, v in enumerate(vlanbrief):
+ v["vlan_id"] = v.get("vlanshowbr-vlanid-utf")
+ vlan = {}
+ vlan.update(v)
+ vlan.update(mtuinfo[index])
+
+ vlan["run_cfg"] = ""
+ for item in run_cfg_list:
+ # Sample match lines
+ # 202\n name Production-Segment-100101\n vn-segment 100101
+ # 5\n state suspend\n shutdown\n name test-changeme\n vn-segment 942
+ pattern = r"^{0}\s+\S.*vn-segment".format(v["vlan_id"])
+ if re.search(pattern, item, flags=re.DOTALL):
+ vlan["run_cfg"] = item
+ break
+
+ vlans.append(vlan)
+ return vlans
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/nxos.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/nxos.py
new file mode 100644
index 00000000..f0e3843e
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/nxos.py
@@ -0,0 +1,1031 @@
+#
+# This code is part of Ansible, but is an independent component.
+#
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2017, Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+
+import json
+import re
+
+from copy import deepcopy
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible.module_utils.six import PY2, PY3
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
+ CustomNetworkConfig,
+ NetworkConfig,
+ dumps,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ ComplexList,
+ to_list,
+)
+
+
+try:
+ import yaml
+
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+
+_DEVICE_CONNECTION = None
+
+
+def get_connection(module):
+ global _DEVICE_CONNECTION
+ if not _DEVICE_CONNECTION:
+ connection_proxy = Connection(module._socket_path)
+ cap = json.loads(connection_proxy.get_capabilities())
+ if cap["network_api"] == "cliconf":
+ conn = Cli(module)
+ elif cap["network_api"] == "nxapi":
+ conn = HttpApi(module)
+ _DEVICE_CONNECTION = conn
+ return _DEVICE_CONNECTION
+
+
+class Cli:
+ def __init__(self, module):
+ self._module = module
+ self._device_configs = {}
+ self._connection = None
+
+ def _get_connection(self):
+ if self._connection:
+ return self._connection
+ self._connection = Connection(self._module._socket_path)
+
+ return self._connection
+
+ def get_config(self, flags=None):
+ """Retrieves the current config from the device or cache"""
+ flags = [] if flags is None else flags
+
+ cmd = "show running-config "
+ cmd += " ".join(flags)
+ cmd = cmd.strip()
+
+ try:
+ return self._device_configs[cmd]
+ except KeyError:
+ connection = self._get_connection()
+ try:
+ out = connection.get_config(flags=flags)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ cfg = to_text(out, errors="surrogate_then_replace").strip() + "\n"
+ self._device_configs[cmd] = cfg
+ return cfg
+
+ def run_commands(self, commands, check_rc=True):
+ """Run list of commands on remote device and return results"""
+ connection = self._get_connection()
+
+ try:
+ out = connection.run_commands(commands, check_rc)
+ if check_rc == "retry_json":
+ capabilities = self.get_capabilities()
+ network_api = capabilities.get("network_api")
+
+ if network_api == "cliconf" and out:
+ for index, resp in enumerate(out):
+ if (
+ "Invalid command at" in resp or "Ambiguous command at" in resp
+ ) and "json" in resp:
+ if commands[index]["output"] == "json":
+ commands[index]["output"] = "text"
+ out = connection.run_commands(commands, check_rc)
+ return out
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc))
+
+ def load_config(self, config, return_error=False, opts=None, replace=None):
+ """Sends configuration commands to the remote device"""
+ if opts is None:
+ opts = {}
+
+ connection = self._get_connection()
+ responses = []
+ try:
+ resp = connection.edit_config(config, replace=replace)
+ if isinstance(resp, Mapping):
+ resp = resp["response"]
+ except ConnectionError as e:
+ code = getattr(e, "code", 1)
+ message = getattr(e, "err", e)
+ err = to_text(message, errors="surrogate_then_replace")
+ if opts.get("ignore_timeout") and code:
+ responses.append(err)
+ return responses
+ elif code and "no graceful-restart" in err:
+ if "ISSU/HA will be affected if Graceful Restart is disabled" in err:
+ msg = [""]
+ responses.extend(msg)
+ return responses
+ else:
+ self._module.fail_json(msg=err)
+ elif code:
+ self._module.fail_json(msg=err)
+
+ responses.extend(resp)
+ return responses
+
+ def get_diff(
+ self,
+ candidate=None,
+ running=None,
+ diff_match="line",
+ diff_ignore_lines=None,
+ path=None,
+ diff_replace="line",
+ ):
+ conn = self._get_connection()
+ try:
+ response = conn.get_diff(
+ candidate=candidate,
+ running=running,
+ diff_match=diff_match,
+ diff_ignore_lines=diff_ignore_lines,
+ path=path,
+ diff_replace=diff_replace,
+ )
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ return response
+
+ def get_capabilities(self):
+ """Returns platform info of the remove device"""
+ if hasattr(self._module, "_capabilities"):
+ return self._module._capabilities
+
+ connection = self._get_connection()
+ try:
+ capabilities = connection.get_capabilities()
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ self._module._capabilities = json.loads(capabilities)
+ return self._module._capabilities
+
+ def read_module_context(self, module_key):
+ connection = self._get_connection()
+ try:
+ module_context = connection.read_module_context(module_key)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return module_context
+
+ def save_module_context(self, module_key, module_context):
+ connection = self._get_connection()
+ try:
+ connection.save_module_context(module_key, module_context)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return None
+
+
+class HttpApi:
+ def __init__(self, module):
+ self._module = module
+ self._device_configs = {}
+ self._module_context = {}
+ self._connection_obj = None
+
+ @property
+ def _connection(self):
+ if not self._connection_obj:
+ self._connection_obj = Connection(self._module._socket_path)
+
+ return self._connection_obj
+
+ def run_commands(self, commands, check_rc=True):
+ """Runs list of commands on remote device and returns results"""
+ try:
+ out = self._connection.send_request(commands)
+ except ConnectionError as exc:
+ if check_rc is True:
+ raise
+ out = to_text(exc)
+
+ out = to_list(out)
+ if not out[0]:
+ return out
+
+ for index, response in enumerate(out):
+ if response[0] == "{":
+ out[index] = json.loads(response)
+
+ return out
+
+ def get_config(self, flags=None):
+ """Retrieves the current config from the device or cache"""
+ flags = [] if flags is None else flags
+
+ cmd = "show running-config "
+ cmd += " ".join(flags)
+ cmd = cmd.strip()
+
+ try:
+ return self._device_configs[cmd]
+ except KeyError:
+ try:
+ out = self._connection.send_request(cmd)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ cfg = to_text(out).strip()
+ self._device_configs[cmd] = cfg
+ return cfg
+
+ def get_diff(
+ self,
+ candidate=None,
+ running=None,
+ diff_match="line",
+ diff_ignore_lines=None,
+ path=None,
+ diff_replace="line",
+ ):
+ diff = {}
+
+ # prepare candidate configuration
+ candidate_obj = NetworkConfig(indent=2)
+ candidate_obj.load(candidate)
+
+ if running and diff_match != "none" and diff_replace != "config":
+ # running configuration
+ running_obj = NetworkConfig(indent=2, contents=running, ignore_lines=diff_ignore_lines)
+ configdiffobjs = candidate_obj.difference(
+ running_obj,
+ path=path,
+ match=diff_match,
+ replace=diff_replace,
+ )
+
+ else:
+ configdiffobjs = candidate_obj.items
+
+ diff["config_diff"] = dumps(configdiffobjs, "commands") if configdiffobjs else ""
+ return diff
+
+ def load_config(self, commands, return_error=False, opts=None, replace=None):
+ """Sends the ordered set of commands to the device"""
+ if opts is None:
+ opts = {}
+
+ responses = []
+ try:
+ resp = self.edit_config(commands, replace=replace)
+ except ConnectionError as exc:
+ code = getattr(exc, "code", 1)
+ message = getattr(exc, "err", exc)
+ err = to_text(message, errors="surrogate_then_replace")
+ if opts.get("ignore_timeout") and code:
+ responses.append(code)
+ return responses
+ elif opts.get("catch_clierror") and "400" in code:
+ return [code, err]
+ elif code and "no graceful-restart" in err:
+ if "ISSU/HA will be affected if Graceful Restart is disabled" in err:
+ msg = [""]
+ responses.extend(msg)
+ return responses
+ else:
+ self._module.fail_json(msg=err)
+ elif code:
+ self._module.fail_json(msg=err)
+
+ responses.extend(resp)
+ return responses
+
+ def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
+ resp = list()
+
+ self.check_edit_config_capability(candidate, commit, replace, comment)
+
+ if replace:
+ candidate = "config replace {0}".format(replace)
+
+ responses = self._connection.send_request(candidate, output="config")
+ for response in to_list(responses):
+ if response != "{}":
+ resp.append(response)
+ if not resp:
+ resp = [""]
+
+ return resp
+
+ def get_capabilities(self):
+ """Returns platform info of the remove device"""
+ try:
+ capabilities = self._connection.get_capabilities()
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return json.loads(capabilities)
+
+ def check_edit_config_capability(self, candidate=None, commit=True, replace=None, comment=None):
+ operations = self._connection.get_device_operations()
+
+ if not candidate and not replace:
+ raise ValueError("must provide a candidate or replace to load configuration")
+
+ if commit not in (True, False):
+ raise ValueError("'commit' must be a bool, got %s" % commit)
+
+ if replace and not operations.get("supports_replace"):
+ raise ValueError("configuration replace is not supported")
+
+ if comment and not operations.get("supports_commit_comment", False):
+ raise ValueError("commit comment is not supported")
+
+ def read_module_context(self, module_key):
+ try:
+ module_context = self._connection.read_module_context(module_key)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return module_context
+
+ def save_module_context(self, module_key, module_context):
+ try:
+ self._connection.save_module_context(module_key, module_context)
+ except ConnectionError as exc:
+ self._module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return None
+
+
+class NxosCmdRef:
+ """NXOS Command Reference utilities.
+ The NxosCmdRef class takes a yaml-formatted string of nxos module commands
+ and converts it into dict-formatted database of getters/setters/defaults
+ and associated common and platform-specific values. The utility methods
+ add additional data such as existing states, playbook states, and proposed cli.
+ The utilities also abstract away platform differences such as different
+ defaults and different command syntax.
+
+ Callers must provide a yaml formatted string that defines each command and
+ its properties; e.g. BFD global:
+ ---
+ _template: # _template holds common settings for all commands
+ # Enable feature bfd if disabled
+ feature: bfd
+ # Common getter syntax for BFD commands
+ get_command: show run bfd all | incl '^(no )*bfd'
+
+ interval:
+ kind: dict
+ getval: bfd interval (?P<tx>\\d+) min_rx (?P<min_rx>\\d+) multiplier (?P<multiplier>\\d+)
+ setval: bfd interval {tx} min_rx {min_rx} multiplier {multiplier}
+ default:
+ tx: 50
+ min_rx: 50
+ multiplier: 3
+ N3K:
+ # Platform overrides
+ default:
+ tx: 250
+ min_rx: 250
+ multiplier: 3
+ """
+
+ def __init__(self, module, cmd_ref_str, ref_only=False):
+ """Initialize cmd_ref from yaml data."""
+
+ self._module = module
+ self._check_imports()
+ self._yaml_load(cmd_ref_str)
+ self.cache_existing = None
+ self.present_states = ["present", "merged", "replaced"]
+ self.absent_states = ["absent", "deleted"]
+ ref = self._ref
+
+ # Create a list of supported commands based on ref keys
+ ref["commands"] = sorted([k for k in ref if not k.startswith("_")])
+ ref["_proposed"] = []
+ ref["_context"] = []
+ ref["_resource_key"] = None
+
+ if not ref_only:
+ ref["_state"] = module.params.get("state", "present")
+ self.feature_enable()
+ self.get_platform_defaults()
+ self.normalize_defaults()
+
+ def __getitem__(self, key=None):
+ if key is None:
+ return self._ref
+ return self._ref[key]
+
+ def _check_imports(self):
+ module = self._module
+ msg = nxosCmdRef_import_check()
+ if msg:
+ module.fail_json(msg=msg)
+
+ def _yaml_load(self, cmd_ref_str):
+ if PY2:
+ self._ref = yaml.load(cmd_ref_str)
+ elif PY3:
+ self._ref = yaml.load(cmd_ref_str, Loader=yaml.FullLoader)
+
+ def feature_enable(self):
+ """Add 'feature <foo>' to _proposed if ref includes a 'feature' key."""
+ ref = self._ref
+ feature = ref["_template"].get("feature")
+ if feature:
+ show_cmd = "show run | incl 'feature {0}'".format(feature)
+ output = self.execute_show_command(show_cmd, "text")
+ if not output or "CLI command error" in output:
+ msg = "** 'feature {0}' is not enabled. Module will auto-enable feature {0} ** ".format(
+ feature,
+ )
+ self._module.warn(msg)
+ ref["_proposed"].append("feature {0}".format(feature))
+ ref["_cli_is_feature_disabled"] = ref["_proposed"]
+
+ def get_platform_shortname(self):
+ """Query device for platform type, normalize to a shortname/nickname.
+ Returns platform shortname (e.g. 'N3K-3058P' returns 'N3K') or None.
+ """
+ # TBD: add this method logic to get_capabilities() after those methods
+ # are made consistent across transports
+ platform_info = self.execute_show_command("show inventory", "json")
+ if not platform_info or not isinstance(platform_info, dict):
+ return None
+ inventory_table = platform_info["TABLE_inv"]["ROW_inv"]
+ for info in inventory_table:
+ if "Chassis" in info["name"]:
+ network_os_platform = info["productid"]
+ break
+ else:
+ return None
+
+ # Supported Platforms: N3K,N5K,N6K,N7K,N9K,N3K-F,N9K-F
+ m = re.match("(?P<short>N[35679][K57])-(?P<N35>C35)*", network_os_platform)
+ if not m:
+ return None
+ shortname = m.group("short")
+
+ # Normalize
+ if m.groupdict().get("N35"):
+ shortname = "N35"
+ elif re.match("N77", shortname):
+ shortname = "N7K"
+ elif re.match(r"N3K|N9K", shortname):
+ for info in inventory_table:
+ if "-R" in info["productid"]:
+ # Fretta Platform
+ shortname += "-F"
+ break
+ return shortname
+
+ def get_platform_defaults(self):
+ """Update ref with platform specific defaults"""
+ plat = self.get_platform_shortname()
+ if not plat:
+ return
+
+ ref = self._ref
+ ref["_platform_shortname"] = plat
+ # Remove excluded commands (no platform support for command)
+ for k in ref["commands"]:
+ if plat in ref[k].get("_exclude", ""):
+ ref["commands"].remove(k)
+
+ # Update platform-specific settings for each item in ref
+ plat_spec_cmds = [k for k in ref["commands"] if plat in ref[k]]
+ for k in plat_spec_cmds:
+ for plat_key in ref[k][plat]:
+ ref[k][plat_key] = ref[k][plat][plat_key]
+
+ def normalize_defaults(self):
+ """Update ref defaults with normalized data"""
+ ref = self._ref
+ for k in ref["commands"]:
+ if "default" in ref[k] and ref[k]["default"]:
+ kind = ref[k]["kind"]
+ if "int" == kind:
+ ref[k]["default"] = int(ref[k]["default"])
+ elif "list" == kind:
+ ref[k]["default"] = [str(i) for i in ref[k]["default"]]
+ elif "dict" == kind:
+ for key, v in ref[k]["default"].items():
+ if v:
+ v = str(v)
+ ref[k]["default"][key] = v
+
+ def execute_show_command(self, command, format):
+ """Generic show command helper.
+ Warning: 'CLI command error' exceptions are caught, must be handled by caller.
+ Return device output as a newline-separated string or None.
+ """
+ cmds = [{"command": command, "output": format}]
+ output = None
+ try:
+ output = run_commands(self._module, cmds)
+ if output:
+ output = output[0]
+ except ConnectionError as exc:
+ if "CLI command error" in repr(exc):
+ # CLI may be feature disabled
+ output = repr(exc)
+ else:
+ raise
+ return output
+
+ def pattern_match_existing(self, output, k):
+ """Pattern matching helper for `get_existing`.
+ `k` is the command name string. Use the pattern from cmd_ref to
+ find a matching string in the output.
+ Return regex match object or None.
+ """
+ ref = self._ref
+ pattern = re.compile(ref[k]["getval"])
+ multiple = "multiple" in ref[k].keys()
+ match_lines = [re.search(pattern, line) for line in output]
+ if "dict" == ref[k]["kind"]:
+ match = [m for m in match_lines if m]
+ if not match:
+ return None
+ if len(match) > 1 and not multiple:
+ raise ValueError("get_existing: multiple matches found for property {0}".format(k))
+ else:
+ match = [m.groups() for m in match_lines if m]
+ if not match:
+ return None
+ if len(match) > 1 and not multiple:
+ raise ValueError("get_existing: multiple matches found for property {0}".format(k))
+ for item in match:
+ index = match.index(item)
+ match[index] = list(item) # tuple to list
+
+ # Handle config strings that nvgen with the 'no' prefix.
+ # Example match behavior:
+ # When pattern is: '(no )*foo *(\S+)*$' AND
+ # When output is: 'no foo' -> match: ['no ', None]
+ # When output is: 'foo 50' -> match: [None, '50']
+ if None is match[index][0]:
+ match[index].pop(0)
+ elif "no" in match[index][0]:
+ match[index].pop(0)
+ if not match:
+ return None
+
+ return match
+
+ def set_context(self, context=None):
+ """Update ref with command context."""
+ if context is None:
+ context = []
+ ref = self._ref
+ # Process any additional context that this propoerty might require.
+ # 1) Global context from NxosCmdRef _template.
+ # 2) Context passed in using context arg.
+ ref["_context"] = ref["_template"].get("context", [])
+ for cmd in context:
+ ref["_context"].append(cmd)
+ # Last key in context is the resource key
+ ref["_resource_key"] = context[-1] if context else ref["_resource_key"]
+
+ def get_existing(self, cache_output=None):
+ """Update ref with existing command states from the device.
+ Store these states in each command's 'existing' key.
+ """
+ ref = self._ref
+ if ref.get("_cli_is_feature_disabled"):
+ # Add context to proposed if state is present
+ if ref["_state"] in self.present_states:
+ [ref["_proposed"].append(ctx) for ctx in ref["_context"]]
+ return
+
+ show_cmd = ref["_template"]["get_command"]
+ if cache_output:
+ output = cache_output
+ else:
+ output = self.execute_show_command(show_cmd, "text") or []
+ self.cache_existing = output
+
+ # Add additional command context if needed.
+ if ref["_context"]:
+ output = CustomNetworkConfig(indent=2, contents=output)
+ output = output.get_section(ref["_context"])
+
+ if not output:
+ # Add context to proposed if state is present
+ if ref["_state"] in self.present_states:
+ [ref["_proposed"].append(ctx) for ctx in ref["_context"]]
+ return
+
+ # We need to remove the last item in context for state absent case.
+ if ref["_state"] in self.absent_states and ref["_context"]:
+ if ref["_resource_key"] and ref["_resource_key"] == ref["_context"][-1]:
+ if ref["_context"][-1] in output:
+ ref["_context"][-1] = "no " + ref["_context"][-1]
+ else:
+ del ref["_context"][-1]
+ return
+
+ # Walk each cmd in ref, use cmd pattern to discover existing cmds
+ output = output.split("\n")
+ for k in ref["commands"]:
+ match = self.pattern_match_existing(output, k)
+ if not match:
+ continue
+ ref[k]["existing"] = {}
+ for item in match:
+ index = match.index(item)
+ kind = ref[k]["kind"]
+ if "int" == kind:
+ ref[k]["existing"][index] = int(item[0])
+ elif "list" == kind:
+ ref[k]["existing"][index] = [str(i) for i in item[0]]
+ elif "dict" == kind:
+ # The getval pattern should contain regex named group keys that
+ # match up with the setval named placeholder keys; e.g.
+ # getval: my-cmd (?P<foo>\d+) bar (?P<baz>\d+)
+ # setval: my-cmd {foo} bar {baz}
+ ref[k]["existing"][index] = {}
+ for key in item.groupdict().keys():
+ ref[k]["existing"][index][key] = str(item.group(key))
+ elif "str" == kind:
+ ref[k]["existing"][index] = item[0]
+ else:
+ raise ValueError(
+ "get_existing: unknown 'kind' value specified for key '{0}'".format(k),
+ )
+
+ def get_playvals(self):
+ """Update ref with values from the playbook.
+ Store these values in each command's 'playval' key.
+ """
+ ref = self._ref
+ module = self._module
+ params = {}
+ if module.params.get("config"):
+ # Resource module builder packs playvals under 'config' key
+ param_data = module.params.get("config")
+ params["global"] = param_data
+ for key in param_data.keys():
+ if isinstance(param_data[key], list):
+ params[key] = param_data[key]
+ else:
+ params["global"] = module.params
+ for k in ref.keys():
+ for level in params.keys():
+ if isinstance(params[level], dict):
+ params[level] = [params[level]]
+ for item in params[level]:
+ if k in item and item[k] is not None:
+ if not ref[k].get("playval"):
+ ref[k]["playval"] = {}
+ playval = item[k]
+ index = params[level].index(item)
+ # Normalize each value
+ if "int" == ref[k]["kind"]:
+ playval = int(playval)
+ elif "list" == ref[k]["kind"]:
+ playval = [str(i) for i in playval]
+ elif "dict" == ref[k]["kind"]:
+ for key, v in playval.items():
+ playval[key] = str(v)
+ ref[k]["playval"][index] = playval
+
+ def build_cmd_set(self, playval, existing, k):
+ """Helper function to create list of commands to configure device
+ Return a list of commands
+ """
+ ref = self._ref
+ proposed = ref["_proposed"]
+ cmd = None
+ kind = ref[k]["kind"]
+ if "int" == kind:
+ cmd = ref[k]["setval"].format(playval)
+ elif "list" == kind:
+ cmd = ref[k]["setval"].format(*(playval))
+ elif "dict" == kind:
+ # The setval pattern should contain placeholder keys that
+ # match up with the getval regex named group keys; e.g.
+ # getval: my-cmd (?P<foo>\d+) bar (?P<baz>\d+)
+ # setval: my-cmd {foo} bar {baz}
+ if ref[k]["setval"].startswith("path"):
+ tmplt = "path {name}"
+ if "depth" in playval:
+ tmplt += " depth {depth}"
+ if "query_condition" in playval:
+ tmplt += " query-condition {query_condition}"
+ if "filter_condition" in playval:
+ tmplt += " filter-condition {filter_condition}"
+ cmd = tmplt.format(**playval)
+ else:
+ cmd = ref[k]["setval"].format(**playval)
+ elif "str" == kind:
+ if "deleted" in str(playval):
+ if existing:
+ cmd = "no " + ref[k]["setval"].format(existing)
+ else:
+ cmd = ref[k]["setval"].format(playval)
+ else:
+ raise ValueError("get_proposed: unknown 'kind' value specified for key '{0}'".format(k))
+ if cmd:
+ if ref["_state"] in self.absent_states and not re.search(r"^no", cmd):
+ cmd = "no " + cmd
+ # Commands may require parent commands for proper context.
+ # Global _template context is replaced by parameter context
+ [proposed.append(ctx) for ctx in ref["_context"]]
+ [proposed.append(ctx) for ctx in ref[k].get("context", [])]
+ proposed.append(cmd)
+
+ def get_proposed(self):
+ """Compare playbook values against existing states and create a list
+ of proposed commands.
+ Return a list of raw cli command strings.
+ """
+ ref = self._ref
+ # '_proposed' may be empty list or contain initializations; e.g. ['feature foo']
+ proposed = ref["_proposed"]
+
+ if ref["_context"] and ref["_context"][-1].startswith("no"):
+ [proposed.append(ctx) for ctx in ref["_context"]]
+ return proposed
+
+ # Create a list of commands that have playbook values
+ play_keys = [k for k in ref["commands"] if "playval" in ref[k]]
+
+ def compare(playval, existing):
+ if ref["_state"] in self.present_states:
+ if existing is None:
+ return False
+ elif str(playval) == str(existing):
+ return True
+ elif isinstance(existing, dict) and playval in existing.values():
+ return True
+
+ if ref["_state"] in self.absent_states:
+ if isinstance(existing, dict) and all(x is None for x in existing.values()):
+ existing = None
+ if existing is None or playval not in existing.values():
+ return True
+ return False
+
+ # Compare against current state
+ for k in play_keys:
+ playval = ref[k]["playval"]
+ # Create playval copy to avoid RuntimeError
+ # dictionary changed size during iteration error
+ playval_copy = deepcopy(playval)
+ existing = ref[k].get("existing", ref[k]["default"])
+ multiple = "multiple" in ref[k].keys()
+
+ # Multiple Instances:
+ if isinstance(existing, dict) and multiple:
+ for ekey, evalue in existing.items():
+ if isinstance(evalue, dict):
+ # Remove values set to string 'None' from dvalue
+ evalue = dict((k, v) for k, v in evalue.items() if v != "None")
+ for pkey, pvalue in playval.items():
+ if compare(pvalue, evalue):
+ if playval_copy.get(pkey):
+ del playval_copy[pkey]
+ if not playval_copy:
+ continue
+ # Single Instance:
+ else:
+ for pkey, pval in playval.items():
+ if compare(pval, existing):
+ if playval_copy.get(pkey):
+ del playval_copy[pkey]
+ if not playval_copy:
+ continue
+
+ playval = playval_copy
+ # Multiple Instances:
+ if isinstance(existing, dict):
+ for dkey, dvalue in existing.items():
+ for pval in playval.values():
+ self.build_cmd_set(pval, dvalue, k)
+ # Single Instance:
+ else:
+ for pval in playval.values():
+ self.build_cmd_set(pval, existing, k)
+
+ # Remove any duplicate commands before returning.
+ # pylint: disable=unnecessary-lambda
+ cmds = sorted(set(proposed), key=lambda x: proposed.index(x))
+ return cmds
+
+
+def nxosCmdRef_import_check():
+ """Return import error messages or empty string"""
+ msg = ""
+ if not HAS_YAML:
+ msg += "Mandatory python library 'PyYAML' is not present, try 'pip install PyYAML'\n"
+ return msg
+
+
+def is_json(cmd):
+ return to_text(cmd).endswith("| json")
+
+
+def is_text(cmd):
+ return not is_json(cmd)
+
+
+def to_command(module, commands):
+ transform = ComplexList(
+ dict(
+ command=dict(key=True),
+ output=dict(type="str", default="text"),
+ prompt=dict(type="list"),
+ answer=dict(type="list"),
+ newline=dict(type="bool", default=True),
+ sendonly=dict(type="bool", default=False),
+ check_all=dict(type="bool", default=False),
+ ),
+ module,
+ )
+
+ commands = transform(to_list(commands))
+
+ for item in commands:
+ if is_json(item["command"]):
+ item["output"] = "json"
+
+ return commands
+
+
+def get_config(module, flags=None):
+ flags = [] if flags is None else flags
+
+ conn = get_connection(module)
+ return conn.get_config(flags=flags)
+
+
+def run_commands(module, commands, check_rc=True):
+ conn = get_connection(module)
+ return conn.run_commands(to_command(module, commands), check_rc)
+
+
+def load_config(module, config, return_error=False, opts=None, replace=None):
+ conn = get_connection(module)
+ return conn.load_config(config, return_error, opts, replace=replace)
+
+
+def get_capabilities(module):
+ conn = get_connection(module)
+ return conn.get_capabilities()
+
+
+def get_diff(
+ self,
+ candidate=None,
+ running=None,
+ diff_match="line",
+ diff_ignore_lines=None,
+ path=None,
+ diff_replace="line",
+):
+ conn = self.get_connection()
+ return conn.get_diff(
+ candidate=candidate,
+ running=running,
+ diff_match=diff_match,
+ diff_ignore_lines=diff_ignore_lines,
+ path=path,
+ diff_replace=diff_replace,
+ )
+
+
+def normalize_interface(name):
+ """Return the normalized interface name"""
+ if not name:
+ return
+
+ def _get_number(name):
+ digits = ""
+ for char in name:
+ if char.isdigit() or char in "/.":
+ digits += char
+ return digits
+
+ if name.lower().startswith("et"):
+ if_type = "Ethernet"
+ elif name.lower().startswith("vl"):
+ if_type = "Vlan"
+ elif name.lower().startswith("lo"):
+ if_type = "loopback"
+ elif name.lower().startswith("po"):
+ if_type = "port-channel"
+ elif name.lower().startswith("nv"):
+ if_type = "nve"
+ else:
+ if_type = None
+
+ number_list = name.split(" ")
+ if len(number_list) == 2:
+ number = number_list[-1].strip()
+ else:
+ number = _get_number(name)
+
+ if if_type:
+ proper_interface = if_type + number
+ else:
+ proper_interface = name
+
+ return proper_interface
+
+
+def get_interface_type(interface):
+ """Gets the type of interface"""
+ if interface.upper().startswith("ET"):
+ return "ethernet"
+ elif interface.upper().startswith("VL"):
+ return "svi"
+ elif interface.upper().startswith("LO"):
+ return "loopback"
+ elif interface.upper().startswith("MG"):
+ return "management"
+ elif interface.upper().startswith("MA"):
+ return "management"
+ elif interface.upper().startswith("PO"):
+ return "portchannel"
+ elif interface.upper().startswith("NV"):
+ return "nve"
+ else:
+ return "unknown"
+
+
+def default_intf_enabled(name="", sysdefs=None, mode=None):
+ """Get device/version/interface-specific default 'enabled' state.
+ L3:
+ - Most L3 intfs default to 'shutdown'. Loopbacks default to 'no shutdown'.
+ - Some legacy platforms default L3 intfs to 'no shutdown'.
+ L2:
+ - User-System-Default 'system default switchport shutdown' defines the
+ enabled state for L2 intf's. USD defaults may be different on some platforms.
+ - An intf may be explicitly defined as L2 with 'switchport' or it may be
+ implicitly defined as L2 when USD 'system default switchport' is defined.
+ """
+ if not name:
+ return None
+ if sysdefs is None:
+ sysdefs = {}
+ default = False
+
+ if re.search("port-channel|loopback", name):
+ default = True
+ else:
+ if mode is None:
+ # intf 'switchport' cli is not present so use the user-system-default
+ mode = sysdefs.get("mode")
+
+ if mode == "layer3":
+ default = sysdefs.get("L3_enabled")
+ elif mode == "layer2":
+ default = sysdefs.get("L2_enabled")
+ return default
+
+
+def read_module_context(module):
+ conn = get_connection(module)
+ return conn.read_module_context(module._name)
+
+
+def save_module_context(module, module_context):
+ conn = get_connection(module)
+ return conn.save_module_context(module._name, module_context)
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_address_family.py
new file mode 100644
index 00000000..942e993e
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_address_family.py
@@ -0,0 +1,798 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Bgp_address_family parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_aggregate_address(aggaddr):
+ cmd = "aggregate-address {prefix}"
+
+ if aggaddr.get("advertise_map"):
+ cmd += " advertise-map {advertise_map}"
+ if aggaddr.get("as_set"):
+ cmd += " as-set"
+ if aggaddr.get("attribute_map"):
+ cmd += " attribute-map {attribute_map}"
+ if aggaddr.get("summary_only"):
+ cmd += " summary-only"
+ if aggaddr.get("suppress_map"):
+ cmd += " suppress-map {suppress_map}"
+
+ return cmd.format(**aggaddr)
+
+
+def _tmplt_dampening(proc):
+ damp = proc.get("dampening", {})
+ cmd = "dampening"
+
+ if damp.get("set") is False:
+ return "no {0}".format(cmd)
+ if damp.get("route_map"):
+ cmd += " route-map {route_map}".format(**damp)
+ for x in (
+ "decay_half_life",
+ "start_reuse_route",
+ "start_suppress_route",
+ "max_suppress_time",
+ ):
+ if x in damp:
+ cmd += " {0}".format(damp[x])
+ return cmd
+
+
+def _tmplt_redistribute(redis):
+ command = "redistribute {protocol}".format(**redis)
+ if redis.get("id"):
+ command += " {id}".format(**redis)
+ command += " route-map {route_map}".format(**redis)
+ return command
+
+
+class Bgp_address_familyTemplate(NetworkTemplate):
+ def __init__(self, lines=None):
+ super(Bgp_address_familyTemplate, self).__init__(lines=lines, tmplt=self)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "as_number",
+ "getval": re.compile(
+ r"""
+ ^router\sbgp\s(?P<as_number>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "router bgp {{ as_number }}",
+ "result": {
+ "as_number": "{{ as_number }}",
+ },
+ "shared": True,
+ },
+ {
+ "name": "address_family",
+ "getval": re.compile(
+ r"""
+ (\s+vrf\s(?P<vrf>\S+))?
+ (\s+neighbor\s(?P<nbr>\S+))?
+ \s+address-family
+ \s(?P<afi>\S+)
+ (\s(?P<safi>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "address-family {{ afi }}{{ (' ' + safi) if safi is defined else ''}}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "vrf": "{{ vrf }}",
+ "afi": "{{ afi }}",
+ "safi": "{{ safi }}",
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "additional_paths.install_backup",
+ "getval": re.compile(
+ r"""
+ \s+additional-paths
+ \sinstall\s(?P<backup>backup)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "additional-paths install backup",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "additional_paths": {
+ "install_backup": "{{ not not backup }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "additional_paths.receive",
+ "getval": re.compile(
+ r"""
+ \s+additional-paths
+ \s(?P<receive>receive)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "additional-paths receive",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "additional_paths": {
+ "receive": "{{ not not receive }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "additional_paths.selection.route_map",
+ "getval": re.compile(
+ r"""
+ \s+additional-paths
+ \sselection\sroute-map
+ \s(?P<route_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "additional-paths selection route-map {{ additional_paths.selection.route_map }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "additional_paths": {
+ "selection": {
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "additional_paths.send",
+ "getval": re.compile(
+ r"""
+ \s+additional-paths
+ \s(?P<send>send)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "additional-paths send",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "additional_paths": {
+ "send": "{{ not not send }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "advertise_l2vpn_evpn",
+ "getval": re.compile(
+ r"""
+ \s+(?P<advertise_l2vpn_evpn>advertise\sl2vpn\sevpn)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertise l2vpn evpn",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "advertise_l2vpn_evpn": "{{ not not advertise_l2vpn_evpn }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "advertise_pip",
+ "getval": re.compile(
+ r"""
+ \s+(?P<advertise_pip>advertise-pip)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertise-pip",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "advertise_pip": "{{ not not advertise_pip }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "advertise_system_mac",
+ "getval": re.compile(
+ r"""
+ \s+(?P<advertise_system_mac>advertise-system-mac)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertise-system-mac",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "advertise_system_mac": "{{ not not advertise_system_mac }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "allow_vni_in_ethertag",
+ "getval": re.compile(
+ r"""
+ \s+(?P<allow_vni_in_ethertag>allow-vni-in-ethertag)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "allow-vni-in-ethertag",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "allow_vni_in_ethertag": "{{ not not allow_vni_in_ethertag }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "aggregate_address",
+ "getval": re.compile(
+ r"""
+ \s+aggregate-address
+ \s(?P<prefix>\S+)
+ (\s(?P<as_set>as-set))?
+ (\s(?P<summary_only>summary-only))?
+ (\sadvertise-map\s(?P<advertise_map>\S+))?
+ (\sattribute-map\s(?P<attribute_map>\S+))?
+ (\ssuppress-map\s(?P<suppress_map>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_aggregate_address,
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "aggregate_address": [
+ {
+ "prefix": "{{ prefix }}",
+ "as_set": "{{ True if as_set is defined else None }}",
+ "summary_only": "{{ True if summary_only is defined else None }}",
+ "advertise_map": "{{ advertise_map }}",
+ "attribute_map": "{{ attribute_map }}",
+ "suppress_map": "{{ suppress_map }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "client_to_client.no_reflection",
+ "getval": re.compile(
+ r"""
+ \s+no\sclient-to-client
+ \s(?P<reflection>reflection)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "client-to-client reflection",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "client_to_client": {
+ "no_reflection": "{{ not not reflection }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "dampen_igp_metric",
+ "getval": re.compile(
+ r"""
+ \s+dampen-igp-metric
+ \s(?P<dampen_igp_metric>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "dampen-igp-metric {{ dampen_igp_metric }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "dampen_igp_metric": "{{ dampen_igp_metric }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "dampening",
+ "getval": re.compile(
+ r"""
+ \s+(?P<dampening>dampening)
+ (\s(?P<decay_half_life>\d+))?
+ (\s(?P<start_reuse_route>\d+))?
+ (\s(?P<start_suppress_route>\d+))?
+ (\s(?P<max_suppress_time>\d+))?
+ (\sroute-map\s(?P<route_map>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_dampening,
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "dampening": {
+ "set": "{{ True if dampening is defined"
+ " and ((not decay_half_life|d(False),"
+ " not start_reuse_route|d(False), "
+ " not start_suppress_route|d(False), not max_suppress_time|d(False), not route_map|d(""))|all) }}",
+ "decay_half_life": "{{ decay_half_life }}",
+ "start_reuse_route": "{{ start_reuse_route }}",
+ "start_suppress_route": "{{ start_suppress_route }}",
+ "max_suppress_time": "{{ max_suppress_time }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "default_information.originate",
+ "getval": re.compile(
+ r"""
+ \s+default-information
+ \s(?P<originate>originate)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "default-information originate",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "default_information": {
+ "originate": "{{ not not originate }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "default_metric",
+ "getval": re.compile(
+ r"""
+ \s+default-metric
+ \s(?P<default_metric>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "default-metric {{ default_metric }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "default_metric": "{{ default_metric }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "distance",
+ "getval": re.compile(
+ r"""
+ \s+distance
+ \s(?P<ebgp_routes>\d+)
+ \s(?P<ibgp_routes>\d+)
+ \s(?P<local_routes>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "distance {{ distance.ebgp_routes }} {{ distance.ibgp_routes }} {{ distance.local_routes }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "distance": {
+ "ebgp_routes": "{{ ebgp_routes }}",
+ "ibgp_routes": "{{ ibgp_routes }}",
+ "local_routes": "{{ local_routes }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "export_gateway_ip",
+ "getval": re.compile(
+ r"""
+ \s+(?P<export_gateway_ip>export-gateway-ip)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "export-gateway-ip",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "export_gateway_ip": "{{ not not export_gateway_ip }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "inject_map",
+ "getval": re.compile(
+ r"""
+ \s+inject-map
+ \s(?P<route_map>\S+)
+ \sexist-map\s(?P<exist_map>\S+)
+ (\s(?P<copy_attributes>copy-attributes))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "inject-map {{ route_map }} exist-map {{ exist_map }}{{ ' copy-attributes' if copy_attributes|d(False) else '' }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "inject_map": [
+ {
+ "route_map": "{{ route_map }}",
+ "exist_map": "{{ exist_map }}",
+ "copy_attributes": "{{ not not copy_attributes }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths.parallel_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \s(?P<parallel_paths>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "maximum-paths {{ maximum_paths.parallel_paths }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "maximum_paths": {
+ "parallel_paths": "{{ parallel_paths }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths.ibgp.parallel_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \sibgp\s(?P<parallel_paths>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "maximum-paths ibgp {{ maximum_paths.ibgp.parallel_paths }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "maximum_paths": {
+ "ibgp": {
+ "parallel_paths": "{{ parallel_paths }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths.eibgp.parallel_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \seibgp\s(?P<parallel_paths>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "maximum-paths eibgp {{ maximum_paths.eibgp.parallel_paths }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "maximum_paths": {
+ "eibgp": {
+ "parallel_paths": "{{ parallel_paths }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths.local.parallel_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \slocal\s(?P<parallel_paths>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "maximum-paths local {{ maximum_paths.local.parallel_paths }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "maximum_paths": {
+ "local": {
+ "parallel_paths": "{{ parallel_paths }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths.mixed.parallel_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \smixed\s(?P<parallel_paths>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "maximum-paths mixed {{ maximum_paths.mixed.parallel_paths }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "maximum_paths": {
+ "mixed": {
+ "parallel_paths": "{{ parallel_paths }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "networks",
+ "getval": re.compile(
+ r"""
+ \s+network
+ \s(?P<prefix>\S+)
+ (\sroute-map\s(?P<route_map>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "network {{ prefix }}{{ (' route-map ' + route_map) if route_map is defined else '' }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "networks": [
+ {
+ "prefix": "{{ prefix }}",
+ "route_map": "{{ route_map }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "nexthop.route_map",
+ "getval": re.compile(
+ r"""
+ \s+nexthop
+ \sroute-map\s(?P<route_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "nexthop route-map {{ nexthop.route_map }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "nexthop": {
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "nexthop.trigger_delay",
+ "getval": re.compile(
+ r"""
+ \s+nexthop
+ \strigger-delay
+ \scritical\s(?P<critical_delay>\d+)
+ \snon-critical\s(?P<non_critical_delay>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "nexthop trigger-delay critical {{ nexthop.trigger_delay.critical_delay }} non-critical {{ nexthop.trigger_delay.non_critical_delay }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "nexthop": {
+ "trigger_delay": {
+ "critical_delay": "{{ critical_delay }}",
+ "non_critical_delay": "{{ non_critical_delay }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "redistribute",
+ "getval": re.compile(
+ r"""
+ \s+redistribute
+ \s(?P<protocol>\S+)
+ (\s(?P<id>\S+))?
+ \sroute-map\s(?P<rmap>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_redistribute,
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "redistribute": [
+ {
+ "protocol": "{{ protocol }}",
+ "id": "{{ id }}",
+ "route_map": "{{ rmap }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "retain.route_target.retain_all",
+ "getval": re.compile(
+ r"""
+ \s+retain\sroute-target
+ \s(?P<retain_all>all)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "retain route-target all",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "retain": {
+ "route_target": {
+ "retain_all": "{{ not not retain_all }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "retain.route_target.route_map",
+ "getval": re.compile(
+ r"""
+ \s+retain\sroute-target
+ \sroute-map\s(?P<route_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "retain route-target route-map {{ retain.route_target.route_map }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "retain": {
+ "route_target": {
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "suppress_inactive",
+ "getval": re.compile(
+ r"""
+ \s+(?P<suppress_inactive>suppress-inactive)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "suppress-inactive",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "suppress_inactive": "{{ not not suppress_inactive }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "table_map",
+ "getval": re.compile(
+ r"""
+ \s+table-map
+ \s(?P<name>\S+)
+ (\s(?P<filter>filter))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "table-map {{ table_map.name }}{{ ' filter' if table_map.filter|d(False) }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "table_map": {
+ "name": "{{ name }}",
+ "filter": "{{ not not filter }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.bestpath_defer",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \sbestpath-defer\s(?P<defer_time>\d+)
+ \smaximum\s(?P<maximum_defer_time>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers bestpath-defer {{ timers.bestpath_defer.defer_time }} maximum {{ timers.bestpath_defer.maximum_defer_time }}",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "timers": {
+ "bestpath_defer": {
+ "defer_time": "{{ defer_time }}",
+ "maximum_defer_time": "{{ maximum_defer_time }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "wait_igp_convergence",
+ "getval": re.compile(
+ r"""
+ \s+(?P<wait_igp_convergence>wait-igp-convergence)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "wait-igp-convergence",
+ "result": {
+ "address_family": {
+ '{{ nbr|d("nbr_") + afi + "_" + safi|d() + "_" + vrf|d() }}': {
+ "wait_igp_convergence": "{{ not not wait_igp_convergence }}",
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_global.py
new file mode 100644
index 00000000..fa99397a
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_global.py
@@ -0,0 +1,1536 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Bgp_global parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_confederation_peers(proc):
+ cmd = "confederation peers"
+ for peer in proc.get("confederation", {})["peers"]:
+ cmd += " {0}".format(peer)
+ return cmd
+
+
+def _tmplt_path_attribute(proc):
+ cmd = "path-attribute {action}".format(**proc)
+
+ if "type" in proc:
+ cmd += " {type}".format(**proc)
+ elif "range" in proc:
+ cmd += " range {start} {end}".format(**proc["range"])
+ cmd += " in"
+
+ return cmd
+
+
+def _tmplt_bfd(proc):
+ bfd = proc.get("bfd", {})
+ cmd = None
+
+ if bfd.get("set"):
+ cmd = "bfd"
+ if bfd.get("singlehop"):
+ cmd = "bfd singlehop"
+ elif bfd.get("multihop", {}).get("set"):
+ cmd = "bfd multihop"
+
+ if cmd:
+ return cmd
+
+
+class Bgp_globalTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Bgp_globalTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "as_number",
+ "getval": re.compile(
+ r"""
+ ^router\sbgp\s(?P<as_number>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "router bgp {{ as_number }}",
+ "result": {
+ "as_number": "{{ as_number }}",
+ },
+ "shared": True,
+ },
+ {
+ "name": "vrf",
+ "getval": re.compile(
+ r"""
+ \s+vrf
+ \s(?P<vrf>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "vrf {{ vrf }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "vrf": "{{ vrf }}",
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "affinity_group.group_id",
+ "getval": re.compile(
+ r"""
+ \s+affinity-group
+ \sactivate\s(?P<group_id>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "affinity-group activate {{ affinity_group.group_id }}",
+ "result": {
+ "affinity_group": {
+ "group_id": "{{ group_id }}",
+ },
+ },
+ },
+ {
+ "name": "bestpath.always_compare_med",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\s(?P<always_compare_med>always-compare-med)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath always-compare-med",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "always_compare_med": "{{ not not always_compare_med }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.as_path.ignore",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\sas-path\s(?P<ignore>ignore)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath as-path ignore",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "as_path": {
+ "ignore": "{{ not not ignore }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.as_path.multipath_relax",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\sas-path\s(?P<multipath_relax>multipath-relax)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath as-path multipath-relax",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "as_path": {
+ "multipath_relax": "{{ not not multipath_relax }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.compare_neighborid",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\s(?P<compare_neighborid>compare-neighborid)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath compare-neighborid",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "compare_neighborid": "{{ not not compare_neighborid }}",
+ },
+ },
+ },
+
+ },
+ },
+ {
+ "name": "bestpath.compare_routerid",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\s(?P<compare_routerid>compare-routerid)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath compare-routerid",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "compare_routerid": "{{ not not compare_routerid }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.cost_community_ignore",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\scost-community\s(?P<cost_community_ignore>ignore)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath cost-community ignore",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "cost_community_ignore": "{{ not not cost_community_ignore }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.igp_metric_ignore",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\sigp-metric\s(?P<igp_metric_ignore>ignore)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath igp-metric ignore",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "igp_metric_ignore": "{{ not not igp_metric_ignore }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.med.confed",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\smed\s(?P<confed>confed)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath med confed",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "med": {
+ "confed": "{{ not not confed }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.med.missing_as_worst",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\smed\s(?P<missing_as_worst>missing-as-worst)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath med missing-as-worst",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "med": {
+ "missing_as_worst": "{{ not not missing_as_worst }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bestpath.med.non_deterministic",
+ "getval": re.compile(
+ r"""
+ \s+bestpath\smed\s(?P<non_deterministic>non-deterministic)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bestpath med non-deterministic",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bestpath": {
+ "med": {
+ "non_deterministic": "{{ not not non_deterministic }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "cluster_id",
+ "getval": re.compile(
+ r"""
+ \s+cluster-id\s(?P<cluster_id>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "cluster-id {{ cluster_id }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "cluster_id": "{{ cluster_id }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "confederation.identifier",
+ "getval": re.compile(
+ r"""
+ \s+confederation\sidentifier\s(?P<identifier>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "confederation identifier {{ confederation.identifier }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "confederation": {
+ "identifier": "{{ identifier }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "confederation.peers",
+ "getval": re.compile(
+ r"""
+ \s+confederation\speers\s(?P<peers>.*)
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_confederation_peers,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "confederation": {
+ "peers": "{{ peers }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "disable_policy_batching",
+ "getval": re.compile(
+ r"""
+ \s+(?P<disable_policy_batching>disable-policy-batching)
+ $""", re.VERBOSE,
+ ),
+ "setval": "disable-policy-batching",
+ "result": {
+ "disable_policy_batching": {
+ "set": "{{ not not disable_policy_batching }}",
+ },
+ },
+ },
+ {
+ "name": "disable_policy_batching.ipv4.prefix_list",
+ "getval": re.compile(
+ r"""
+ \s+disable-policy-batching\sipv4
+ \sprefix-list\s(?P<ipv4_prefix_list>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "disable-policy-batching ipv4 prefix-list {{ disable_policy_batching.ipv4.prefix_list }}",
+ "result": {
+ "disable_policy_batching": {
+ "ipv4": {
+ "prefix_list": "{{ ipv4_prefix_list }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "disable_policy_batching.ipv6.prefix_list",
+ "getval": re.compile(
+ r"""
+ \s+disable-policy-batching\sipv6
+ \sprefix-list\s(?P<ipv6_prefix_list>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "disable-policy-batching ipv6 prefix-list {{ disable_policy_batching.ipv6.prefix_list }}",
+ "result": {
+ "disable_policy_batching": {
+ "ipv6": {
+ "prefix_list": "{{ ipv6_prefix_list }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "disable_policy_batching.nexthop",
+ "getval": re.compile(
+ r"""
+ \s+disable-policy-batching\s(?P<nexthop>nexthop)
+ $""", re.VERBOSE,
+ ),
+ "setval": "disable-policy-batching nexthop",
+ "result": {
+ "disable_policy_batching": {
+ "nexthop": "{{ not not nexthop }}",
+ },
+ },
+ },
+ {
+ "name": "dynamic_med_interval",
+ "getval": re.compile(
+ r"""
+ \s+dynamic-med-interval\s(?P<dynamic_med_interval>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "dynamic-med-interval {{ dynamic_med_interval }}",
+ "result": {
+ "dynamic_med_interval": "{{ dynamic_med_interval }}",
+ },
+ },
+ {
+ "name": "enforce_first_as",
+ "getval": re.compile(
+ r"""
+ \s+no\s(?P<enforce_first_as>enforce-first-as)
+ $""", re.VERBOSE,
+ ),
+ "setval": "enforce-first-as",
+ "result": {
+ "enforce_first_as": "{{ not enforce_first_as }}",
+ },
+ },
+ {
+ "name": "enhanced_error",
+ "getval": re.compile(
+ r"""
+ \s+no\s(?P<enhanced_error>enhanced-error)
+ $""", re.VERBOSE,
+ ),
+ "setval": "enhanced-error",
+ "result": {
+ "enhanced_error": "{{ not enhanced_error }}",
+ },
+ },
+ {
+ "name": "fast_external_fallover",
+ "getval": re.compile(
+ r"""
+ \s+no\s(?P<fast_external_fallover>fast-external-fallover)
+ $""", re.VERBOSE,
+ ),
+ "setval": "fast-external-fallover",
+ "result": {
+ "fast_external_fallover": "{{ not fast_external_fallover }}",
+ },
+ },
+ {
+ "name": "flush_routes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<flush_routes>flush-routes)
+ $""", re.VERBOSE,
+ ),
+ "setval": "flush-routes",
+ "result": {
+ "flush_routes": "{{ not not flush_routes }}",
+ },
+ },
+ {
+ "name": "graceful_restart",
+ "getval": re.compile(
+ r"""
+ \s+no\s(?P<graceful_restart>graceful-restart)
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-restart",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "set": "{{ not graceful_restart }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.restart_time",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart\srestart-time\s(?P<restart_time>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-restart restart-time {{ graceful_restart.restart_time }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "restart_time": "{{ restart_time }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.stalepath_time",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart\sstalepath-time\s(?P<stalepath_time>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-restart stalepath-time {{ graceful_restart.stalepath_time }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "stalepath_time": "{{ stalepath_time }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.helper",
+ "getval": re.compile(
+ r"""
+ \s+(?P<helper>graceful-restart-helper)
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-restart-helper",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "helper": "{{ not not helper }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_shutdown.activate",
+ "getval": re.compile(
+ r"""
+ \s+graceful-shutdown
+ \s(?P<activate>activate)
+ (\sroute-map
+ \s(?P<route_map>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-shutdown activate{{ ' route-map ' + graceful_shutdown.activate.route_map if graceful_shutdown.activate.route_map is defined }}",
+ "result": {
+ "graceful_shutdown": {
+ "activate": {
+ "set": "{{ True if activate is defined and route_map is undefined else None }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_shutdown.aware",
+ "getval": re.compile(
+ r"""
+ \s+no\sgraceful-shutdown
+ \s(?P<aware>aware)
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-shutdown aware",
+ "result": {
+ "graceful_shutdown": {
+ "aware": "{{ not aware }}",
+ },
+ },
+ },
+ {
+ "name": "isolate",
+ "getval": re.compile(
+ r"""
+ \s+(?P<isolate>isolate)
+ (\s(?P<include_local>include-local))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "isolate{{ ' include-local' if isolate.include_local|d(False) is True }}",
+ "result": {
+ "isolate": {
+ "set": "{{ True if isolate is defined and include_local is not defined else None }}",
+ "include_local": "{{ not not include_local }}",
+ },
+ },
+ },
+ {
+ "name": "log_neighbor_changes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<log_neighbor_changes>log-neighbor-changes)
+ $""", re.VERBOSE,
+ ),
+ "setval": "log-neighbor-changes",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "log_neighbor_changes": "{{ not not log_neighbor_changes }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "maxas_limit",
+ "getval": re.compile(
+ r"""
+ \s+maxas-limit\s(?P<maxas_limit>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "maxas-limit {{ maxas_limit }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "maxas_limit": "{{ maxas_limit }}",
+ },
+ },
+ },
+ },
+ # start neighbor parsers
+ {
+ "name": "neighbor_address",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "neighbor {{ neighbor_address }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "neighbor_address": "{{ neighbor_address }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bfd",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<bfd>bfd)
+ (\s(?P<singlehop>singlehop))?
+ (\s(?P<multihop>multihop))?
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_bfd,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "bfd": {
+ "set": "{{ True if bfd is defined and singlehop is undefined and multihop is undefined else None }}",
+ "singlehop": "{{ not not singlehop }}",
+ "multihop": {
+ "set": "{{ not not multihop }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bfd.multihop.interval",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sbfd\smultihop\sinterval
+ \s(?P<tx_interval>\d+)
+ \smin_rx\s(?P<min_rx_interval>\d+)
+ \smultiplier\s(?P<multiplier>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bfd multihop interval"
+ " {{ bfd.multihop.interval.tx_interval }}"
+ " min_rx {{ bfd.multihop.interval.min_rx_interval }}"
+ " multiplier {{ bfd.multihop.interval.multiplier }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "bfd": {
+ "multihop": {
+ "interval": {
+ "tx_interval": "{{ tx_interval }}",
+ "min_rx_interval": "{{ min_rx_interval }}",
+ "multiplier": "{{ multiplier }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "remote_as",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sremote-as\s(?P<remote_as>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "remote-as {{ remote_as }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "remote_as": "{{ remote_as }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "neighbor_affinity_group.group_id",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \saffinity-group\s(?P<group_id>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "affinity-group {{ neighbor_affinity_group.group_id }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "neighbor_affinity_group": {
+ "group_id": "{{ group_id }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "bmp_activate_server",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sbmp-activate-server\s(?P<bmp_activate_server>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "bmp-activate-server {{ bmp_activate_server }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "bmp_activate_server": "{{ bmp_activate_server }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "capability",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \scapability\ssuppress\s(?P<suppress_4_byte_as>4-byte-as)
+ $""", re.VERBOSE,
+ ),
+ "setval": "capability suppress 4-byte-as",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "capability": {
+ "suppress_4_byte_as": "{{ not not suppress_4_byte_as }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "description",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sdescription\s(?P<description>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "description {{ description }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "description": "{{ description }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "disable_connected_check",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<disable_connected_check>disable-connected-check)
+ $""", re.VERBOSE,
+ ),
+ "setval": "disable-connected-check",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "disable_connected_check": "{{ not not disable_connected_check }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "dont_capability_negotiate",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<dont_capability_negotiate>dont-capability-negotiate)
+ $""", re.VERBOSE,
+ ),
+ "setval": "dont-capability-negotiate",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "dont_capability_negotiate": "{{ not not dont_capability_negotiate}}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "dscp",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sdscp\s(?P<dscp>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "dscp {{ dscp }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "dscp": "{{ dscp }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "dynamic_capability",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<dynamic_capability>dynamic-capability)
+ $""", re.VERBOSE,
+ ),
+ "setval": "dynamic-capability",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "dynamic_capability": "{{ not not dynamic_capability }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "ebgp_multihop",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sebgp-multihop\s(?P<ebgp_multihop>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ebgp-multihop {{ ebgp_multihop }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "ebgp_multihop": "{{ ebgp_multihop }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_shutdown",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sgraceful-shutdown
+ \s(?P<activate>activate)
+ (\sroute-map\s(?P<route_map>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "graceful-shutdown{{ (' route-map ' + graceful_shutdown.route_map) if graceful_shutdown.route_map is defined }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "graceful_shutdown": {
+ "activate": {
+ "set": "{{ True if activate is defined and route_map is undefined else None }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "inherit.peer",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sinherit
+ \speer\s(?P<peer>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "inherit peer {{ inherit.peer }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "inherit": {
+ "peer": "{{ peer }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "inherit.peer_session",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sinherit
+ \speer-session\s(?P<peer_session>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "inherit peer-session {{ inherit.peer_session }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "inherit": {
+ "peer_session": "{{ peer_session }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "local_as",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \slocal-as\s(?P<local_as>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "local-as {{ local_as }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "local_as": "{{ local_as }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "log_neighbor_changes",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<log_neighbor_changes>log-neighbor-changes)
+ (\s(?P<disable>disable))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "log-neighbor-changes{{ ' disable' if log_neighbor_changes.disable is defined }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "log_neighbor_changes": {
+ "set": "{{ True if log_neighbor_changes is defined and disable is undefined }}",
+ "disable": "{{ not not disable }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "low_memory",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \slow-memory\s(?P<exempt>exempt)
+ $""", re.VERBOSE,
+ ),
+ "setval": "low-memory exempt",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "low_memory": {
+ "exempt": "{{ not not exempt }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "password",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \spassword\s(?P<encryption>\d+)
+ \s(?P<key>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "password{{ (' ' + password.encryption|string) if password.encryption is defined }} {{ password.key }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "password": {
+ "encryption": "{{ encryption }}",
+ "key": "{{ key }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "path_attribute",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \spath-attribute\s(?P<action>\S+)\s
+ (?P<type>\d+)?
+ (range\s(?P<start>\d+)\s(?P<end>\d+))?
+ \sin
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_path_attribute,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "path_attribute": [
+ {
+ "action": "{{ action }}",
+ "type": "{{ type if type is defined else None }}",
+ "range": {
+ "start": "{{ start if start is defined }}",
+ "end": "{{ end if end is defined }}",
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "peer_type",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \speer-type\s(?P<peer_type>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "peer-type {{ peer_type }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "peer_type": "{{ peer_type }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "remove_private_as",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<remove_private_as>remove-private-as)
+ (\s(?P<all>all))?
+ (\s(?P<replace_as>replace-as))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "remove-private-as",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "remove_private_as": {
+ "set": "{{ True if remove_private_as is defined and replace_as is undefined and all is undefined else None }}",
+ "replace_as": "{{ not not replace_as }}",
+ "all": "{{ not not all }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "shutdown",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \s(?P<shutdown>shutdown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "shutdown",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "shutdown": "{{ not not shutdown }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \stimers\s(?P<keepalive>\d+)\s(?P<holdtime>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "timers {{ timers.keepalive }} {{ timers.holdtime }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "timers": {
+ "keepalive": "{{ keepalive }}",
+ "holdtime": "{{ holdtime }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "transport",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \stransport\sconnection-mode
+ \s(?P<passive>passive)
+ $""", re.VERBOSE,
+ ),
+ "setval": "transport connection-mode passive",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "transport": {
+ "connection_mode": {
+ "passive": "{{ not not passive }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "ttl_security",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \sttl-security\shops\s(?P<hops>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ttl-security hops {{ ttl_security.hops }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "ttl_security": {
+ "hops": "{{ hops }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "update_source",
+ "getval": re.compile(
+ r"""
+ \s+neighbor\s(?P<neighbor_address>\S+)
+ \supdate-source\s(?P<update_source>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "update-source {{ update_source }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbors": {
+ "{{ neighbor_address }}": {
+ "update_source": "{{ update_source }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ # end neighbor parsers
+ {
+ "name": "neighbor_down.fib_accelerate",
+ "getval": re.compile(
+ r"""
+ \s+neighbor-down\s(?P<fib_accelerate>fib-accelerate)
+ $""", re.VERBOSE,
+ ),
+ "setval": "neighbor-down fib-accelerate",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "neighbor_down": {
+ "fib_accelerate": "{{ not not fib_accelerate }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "nexthop.suppress_default_resolution",
+ "getval": re.compile(
+ r"""
+ \s+nexthop
+ \s(?P<suppress_default_resolution>suppress-default-resolution)
+ $""", re.VERBOSE,
+ ),
+ "setval": "nexthop suppress-default-resolution",
+ "result": {
+ "nexthop": {
+ "suppress_default_resolution": "{{ not not suppress_default_resolution }}",
+ },
+ },
+ },
+ {
+ "name": "reconnect_interval",
+ "getval": re.compile(
+ r"""
+ \s+reconnect-interval
+ \s(?P<reconnect_interval>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "reconnect-interval {{ reconnect_interval }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "reconnect_interval": "{{ reconnect_interval }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "router_id",
+ "getval": re.compile(
+ r"""
+ \s+router-id
+ \s(?P<router_id>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "router-id {{ router_id }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "router_id": "{{ router_id }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "shutdown",
+ "getval": re.compile(
+ r"""
+ \s+(?P<shutdown>shutdown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "shutdown",
+ "result": {
+ "shutdown": "{{ not not shutdown }}",
+ },
+ },
+ {
+ "name": "suppress_fib_pending",
+ "getval": re.compile(
+ r"""
+ \s+no\s(?P<suppress_fib_pending>suppress-fib-pending)
+ $""", re.VERBOSE,
+ ),
+ "setval": "suppress-fib-pending",
+ "result": {
+ "suppress_fib_pending": "{{ not suppress_fib_pending }}",
+ },
+ },
+ {
+ "name": "timers.bestpath_limit",
+ "getval": re.compile(
+ r"""
+ \s+timers\sbestpath-limit
+ \s(?P<timeout>\d+)
+ (\s(?P<always>always))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "timers bestpath-limit {{ timers.bestpath_limit.timeout }}{{ ' always' if timers.bestpath_limit.timeout is defined }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "bestpath_limit": {
+ "timeout": "{{ timeout }}",
+ "always": "{{ not not always }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.bgp",
+ "getval": re.compile(
+ r"""
+ \s+timers\sbgp
+ \s(?P<keepalive>\d+)
+ (\s(?P<holdtime>\d+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "timers bgp {{ timers.bgp.keepalive }} {{ timers.bgp.holdtime }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "bgp": {
+ "keepalive": "{{ keepalive }}",
+ "holdtime": "{{ holdtime }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.prefix_peer_timeout",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \sprefix-peer-timeout\s(?P<prefix_peer_timeout>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "timers prefix-peer-timeout {{ timers.prefix_peer_timeout }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "prefix_peer_timeout": "{{ prefix_peer_timeout }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.prefix_peer_wait",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \sprefix-peer-wait\s(?P<prefix_peer_wait>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "timers prefix-peer-wait {{ timers.prefix_peer_wait }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "prefix_peer_wait": "{{ prefix_peer_wait }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "fabric_soo",
+ "getval": re.compile(
+ r"""
+ \s+fabric-soo
+ \s(?P<fabric_soo>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "fabric-soo {{ fabric_soo }}",
+ "result": {
+ "fabric_soo": "{{ fabric_soo }}",
+ },
+ },
+ {
+ "name": "rd",
+ "getval": re.compile(
+ r"""
+ \s+rd\s(?P<dual>dual)
+ (\sid\s(?P<id>\d+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "rd dual{{' id ' + rd.id if rd.id is defined }}",
+ "result": {
+ "rd": {
+ "dual": "{{ not not dual }}",
+ "id": "{{ id }}",
+ },
+ },
+ },
+ # VRF only
+ {
+ "name": "allocate_index",
+ "getval": re.compile(
+ r"""
+ \s+allocate-index\s(?P<allocate_index>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "allocate-index {{ allocate_index }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "allocate_index": "{{ allocate_index }}",
+ },
+ },
+ },
+ },
+ # VRF only
+ {
+ "name": "local_as",
+ "getval": re.compile(
+ r"""
+ \s+local-as\s(?P<local_as>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "local-as {{ local_as }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "local_as": "{{ local_as }}",
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_neighbor_address_family.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_neighbor_address_family.py
new file mode 100644
index 00000000..327ba4d2
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/bgp_neighbor_address_family.py
@@ -0,0 +1,894 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Bgp_neighbor_address_family parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_maximum_prefix(data):
+ data = data["maximum_prefix"]
+ cmd = "maximum-prefix {max_prefix_limit}".format(**data)
+ if "generate_warning_threshold" in data:
+ cmd += " {generate_warning_threshold}".format(**data)
+ if "restart_interval" in data:
+ cmd += " restart {restart_interval}".format(**data)
+ if data.get("warning_only"):
+ cmd += " warning-only"
+ return cmd
+
+
+class Bgp_neighbor_address_familyTemplate(NetworkTemplate):
+ def __init__(self, lines=None):
+ super(Bgp_neighbor_address_familyTemplate, self).__init__(lines=lines, tmplt=self)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "as_number",
+ "getval": re.compile(
+ r"""
+ ^router\sbgp\s(?P<as_number>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "router bgp {{ as_number }}",
+ "result": {
+ "as_number": "{{ as_number }}",
+ },
+ "shared": True,
+ },
+ {
+ "name": "address_family",
+ "getval": re.compile(
+ r"""
+ (vrf\s(?P<vrf>\S+))?
+ \s*neighbor\s(?P<neighbor>\S+)
+ \saddress-family
+ \s(?P<afi>\S+)
+ (\s(?P<safi>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "address-family {{ afi }}{{ (' ' + safi) if safi is defined else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "neighbor_address": "{{ neighbor }}",
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "afi": "{{ afi }}",
+ "safi": "{{ safi }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "advertise_map.exist_map",
+ "getval": re.compile(
+ r"""
+ advertise-map
+ \s(?P<route_map>\S+)
+ \sexist-map\s(?P<exist_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertise-map {{ advertise_map.route_map }} exist-map {{ advertise_map.exist_map }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "advertise_map": {
+ "route_map": "{{ route_map }}",
+ "exist_map": "{{ exist_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "advertise_map.non_exist_map",
+ "getval": re.compile(
+ r"""
+ advertise-map
+ \s(?P<route_map>\S+)
+ \snon-exist-map\s(?P<non_exist_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertise-map {{ advertise_map.route_map }} non-exist-map {{ advertise_map.non_exist_map }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "advertise_map": {
+ "route_map": "{{ route_map }}",
+ "non_exist_map": "{{ non_exist_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "advertisement_interval",
+ "getval": re.compile(
+ r"""
+ advertisement-interval
+ \s(?P<advertisement_interval>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "advertisement-interval {{ advertisement_interval }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "advertisement_interval": "{{ advertisement_interval }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "allowas_in",
+ "getval": re.compile(
+ r"""
+ (?P<allowas_in>allowas-in)
+ \s(?P<max_occurences>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "allowas-in{{ ' ' + allowas_in.max_occurences|string if allowas_in.max_occurences is defined else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "allowas_in": {
+ "set": "{{ True if allowas_in is defined and max_occurences is undefined }}",
+ "max_occurences": "{{ max_occurences }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "as_override",
+ "getval": re.compile(
+ r"""
+ (?P<as_override>as-override)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "as-override",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "as_override": "{{ not not as_override }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "capability.additional_paths.receive",
+ "getval": re.compile(
+ r"""
+ capability\sadditional-paths
+ \s(?P<receive>receive)
+ (\s(?P<disable>disable))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "capability additional-paths receive{{ ' disable' if capability.additional_paths.receive == 'disable' else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "capability": {
+ "additional_paths": {
+ "receive": "{{ 'disable' if disable is defined else 'enable' }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "capability.additional_paths.send",
+ "getval": re.compile(
+ r"""
+ capability\sadditional-paths
+ \s(?P<send>send)
+ (\s(?P<disable>disable))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "capability additional-paths send{{ ' disable' if capability.additional_paths.send == 'disable' else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "capability": {
+ "additional_paths": {
+ "send": "{{ 'disable' if disable is defined else 'enable' }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "default_originate",
+ "getval": re.compile(
+ r"""
+ (?P<default_originate>default-originate)
+ (\sroute-map\s(?P<route_map>\S+))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "default-originate{{ ' route-map ' + default_originate.route_map if default_originate.route_map is defined else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "default_originate": {
+ "set": "{{ True if default_originate is defined and route_map is not defined }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "disable_peer_as_check",
+ "getval": re.compile(
+ r"""
+ (?P<disable_peer_as_check>disable-peer-as-check)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "disable-peer-as-check",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "disable_peer_as_check": "{{ not not disable_peer_as_check }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "filter_list.inbound",
+ "getval": re.compile(
+ r"""
+ filter-list
+ \s(?P<in>\S+)\s(?:in)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "filter-list {{ filter_list.inbound }} in",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "filter_list": {
+ "inbound": "{{ in }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "filter_list.outbound",
+ "getval": re.compile(
+ r"""
+ filter-list
+ \s(?P<out>\S+)\s(?:out)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "filter-list {{ filter_list.outbound }} out",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "filter_list": {
+ "outbound": "{{ out }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "inherit",
+ "getval": re.compile(
+ r"""
+ inherit\speer-policy
+ \s(?P<template>\S+)
+ \s(?P<sequence>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "inherit peer-policy {{ inherit.template }} {{ inherit.sequence }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "inherit": {
+ "template": "{{ template }}",
+ "sequence": "{{ sequence }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_prefix",
+ "getval": re.compile(
+ r"""
+ maximum-prefix
+ \s(?P<max_prefix_limit>\d+)
+ (\s(?P<generate_warning_threshold>\d+))?
+ (\srestart\s(?P<restart_interval>\d+))?
+ (\s(?P<warning_only>warning-only))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_maximum_prefix,
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "maximum_prefix": {
+ "max_prefix_limit": "{{ max_prefix_limit }}",
+ "generate_warning_threshold": "{{ generate_warning_threshold }}",
+ "restart_interval": "{{ restart_interval }}",
+ "warning_only": "{{ not not warning_only }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "next_hop_self",
+ "getval": re.compile(
+ r"""
+ (?P<next_hop_self>next-hop-self)
+ (\s(?P<all_routes>all))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "next-hop-self{{ ' all' if next_hop_self.all_routes|d(False) else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "next_hop_self": {
+ "set": "{{ True if next_hop_self is defined and all_routes is not defined }}",
+ "all_routes": "{{ not not all_routes }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "next_hop_third_party",
+ "getval": re.compile(
+ r"""
+ no\s(?P<next_hop_third_party>next-hop-third-party)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "next-hop-third-party",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "next_hop_third_party": "{{ not next_hop_third_party }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "prefix_list.inbound",
+ "getval": re.compile(
+ r"""
+ prefix-list
+ \s(?P<in>\S+)\s(?:in)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "prefix-list {{ prefix_list.inbound }} in",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "prefix_list": {
+ "inbound": "{{ in }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "prefix_list.outbound",
+ "getval": re.compile(
+ r"""
+ prefix-list
+ \s(?P<out>\S+)\s(?:out)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "prefix-list {{ prefix_list.outbound }} out",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "prefix_list": {
+ "outbound": "{{ out }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "rewrite_evpn_rt_asn",
+ "getval": re.compile(
+ r"""
+ (?P<rewrite_evpn_rt_asn>rewrite-evpn-rt-asn)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "rewrite-evpn-rt-asn",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "rewrite_evpn_rt_asn": "{{ not not rewrite_evpn_rt_asn }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "route_map.inbound",
+ "getval": re.compile(
+ r"""
+ route-map
+ \s(?P<in>\S+)\s(?:in)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "route-map {{ route_map.inbound }} in",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "route_map": {
+ "inbound": "{{ in }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "route_map.outbound",
+ "getval": re.compile(
+ r"""
+ route-map
+ \s(?P<out>\S+)\s(?:out)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "route-map {{ route_map.outbound }} out",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "route_map": {
+ "outbound": "{{ out }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "route_reflector_client",
+ "getval": re.compile(
+ r"""
+ (?P<route_reflector_client>route-reflector-client)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "route-reflector-client",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "route_reflector_client": "{{ not not route_reflector_client }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "send_community.standard",
+ "getval": re.compile(
+ r"""
+ (?P<send_community>send-community)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "send-community",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "send_community": {
+ "standard": "{{ True if send_community is defined }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "send_community.extended",
+ "getval": re.compile(
+ r"""
+ send-community
+ \s(?P<extended>extended)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "send-community extended",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "send_community": {
+ "extended": "{{ True if extended is defined }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "soft_reconfiguration_inbound",
+ "getval": re.compile(
+ r"""
+ (?P<soft_reconfiguration_inbound>soft-reconfiguration\sinbound)
+ (\s(?P<always>always))?
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "soft-reconfiguration inbound{{ ' always' if soft_reconfiguration_inbound.always|d(False) else '' }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "soft_reconfiguration_inbound": {
+ "set": "{{ True if soft_reconfiguration_inbound is defined and always is undefined }}",
+ "always": "{{ not not always }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "soo",
+ "getval": re.compile(
+ r"""
+ soo\s(?P<soo>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "soo {{ soo }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "soo": "{{ soo }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "suppress_inactive",
+ "getval": re.compile(
+ r"""
+ (?P<suppress_inactive>suppress-inactive)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "suppress-inactive",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "suppress_inactive": "{{ not not suppress_inactive }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "unsuppress_map",
+ "getval": re.compile(
+ r"""
+ unsuppress-map\s(?P<unsuppress_map>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "unsuppress-map {{ unsuppress_map }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "unsuppress_map": "{{ unsuppress_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "weight",
+ "getval": re.compile(
+ r"""
+ weight\s(?P<weight>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "weight {{ weight }}",
+ "result": {
+ "vrfs": {
+ "{{ 'vrf_' + vrf|d() }}": {
+ "vrf": "{{ vrf }}",
+ "neighbors": {
+ "{{ neighbor }}": {
+ "address_family": {
+ '{{ afi + "_" + safi|d() }}': {
+ "weight": "{{ weight }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/hostname.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/hostname.py
new file mode 100644
index 00000000..bf922fb4
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/hostname.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Hostname parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+class HostnameTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(HostnameTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "hostname",
+ "getval": re.compile(
+ r"""
+ ^hostname\s(?P<hostname>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "hostname {{ hostname }}",
+ "result": {
+ "hostname": "{{ hostname }}",
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/logging_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/logging_global.py
new file mode 100644
index 00000000..8287794e
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/logging_global.py
@@ -0,0 +1,480 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Logging_global parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_hosts(data):
+ cmd = "logging server {host}"
+ data["client_identity"] = data.get("secure", {}).get("trustpoint", {}).get("client_identity")
+
+ if "severity" in data:
+ cmd += " {severity}"
+ if "port" in data:
+ cmd += " port {port}"
+ if data["client_identity"]:
+ cmd += " secure trustpoint client-identity {client_identity}"
+ if "facility" in data:
+ cmd += " facility {facility}"
+ if "use_vrf" in data:
+ cmd += " use-vrf {use_vrf}"
+
+ cmd = cmd.format(**data)
+
+ return cmd
+
+
+class Logging_globalTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Logging_globalTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "console",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\sconsole
+ (\s(?P<severity>\d))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'no ' if console.state|d('') == 'disabled' else '' }}"
+ "logging console"
+ "{{ (' ' + console.severity|string) if console.severity is defined else '' }}",
+ "result": {
+ "console": {
+ "state": "{{ 'disabled' if negated is defined else None }}",
+ "severity": "{{ severity }}",
+ },
+ },
+ },
+ {
+ "name": "event.link_status.enable",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\sevent\slink-status\senable
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging event link-status enable",
+ "result": {
+ "event": {
+ "link_status": {
+ "enable": "{{ False if negated is defined else True }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "event.link_status.default",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\sevent\slink-status\sdefault
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging event link-status default",
+ "result": {
+ "event": {
+ "link_status": {
+ "default": "{{ False if negated is defined else True }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "event.trunk_status.enable",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\sevent\strunk-status\senable
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging event trunk-status enable",
+ "result": {
+ "event": {
+ "trunk_status": {
+ "enable": "{{ False if negated is defined else True }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "event.trunk_status.default",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\sevent\strunk-status\sdefault
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging event trunk-status default",
+ "result": {
+ "event": {
+ "trunk_status": {
+ "default": "{{ False if negated is defined else True }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "history.severity",
+ "getval": re.compile(
+ r"""
+ ^logging\shistory
+ \s(?P<severity>\d)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging history {{ history.severity }}",
+ "result": {
+ "history": {
+ "severity": "{{ severity }}",
+ },
+ },
+ },
+ {
+ "name": "history.size",
+ "getval": re.compile(
+ r"""
+ ^logging\shistory\ssize
+ \s(?P<size>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging history size {{ history.size }}",
+ "result": {
+ "history": {
+ "size": "{{ size }}",
+ },
+ },
+ },
+ {
+ "name": "ip.access_list.cache.entries",
+ "getval": re.compile(
+ r"""
+ ^logging\sip\saccess-list\scache
+ \sentries\s(?P<entries>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging ip access-list cache entries {{ ip.access_list.cache.entries }}",
+ "result": {
+ "ip": {
+ "access_list": {
+ "cache": {
+ "entries": "{{ entries }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "ip.access_list.cache.interval",
+ "getval": re.compile(
+ r"""
+ ^logging\sip\saccess-list\scache
+ \sinterval\s(?P<interval>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging ip access-list cache interval {{ ip.access_list.cache.interval }}",
+ "result": {
+ "ip": {
+ "access_list": {
+ "cache": {
+ "interval": "{{ interval }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "ip.access_list.cache.threshold",
+ "getval": re.compile(
+ r"""
+ ^logging\sip\saccess-list\scache
+ \sthreshold\s(?P<threshold>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging ip access-list cache threshold {{ ip.access_list.cache.threshold }}",
+ "result": {
+ "ip": {
+ "access_list": {
+ "cache": {
+ "threshold": "{{ threshold }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "ip.access_list.detailed",
+ "getval": re.compile(
+ r"""
+ ^logging\sip\saccess-list
+ \s(?P<detailed>detailed)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging ip access-list detailed",
+ "result": {
+ "ip": {
+ "access_list": {
+ "detailed": "{{ not not detailed }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "ip.access_list.include.sgt",
+ "getval": re.compile(
+ r"""
+ ^logging\sip\saccess-list\sinclude
+ \s(?P<sgt>sgt)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging ip access-list include sgt",
+ "result": {
+ "ip": {
+ "access_list": {
+ "include": {
+ "sgt": "{{ not not sgt }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ # in some cases, the `logging level` command
+ # has an extra space at the end
+ "name": "facilities",
+ "getval": re.compile(
+ r"""
+ ^logging\slevel
+ \s(?P<facility>\S+)
+ \s(?P<severity>\d+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging level {{ facility }} {{ severity }}",
+ "result": {
+ "facilities": [
+ {
+ "facility": "{{ facility }}",
+ "severity": "{{ severity }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "logfile",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\slogfile
+ (\s(?P<name>\S+))?
+ (\s(?P<severity>\d+))?
+ (\ssize\s(?P<size>\d+))?
+ (\spersistent\sthreshold\s(?P<persistent_threshold>\d+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'no ' if logfile.state|d('') == 'disabled' else '' }}"
+ "logging logfile"
+ "{{ ' ' + logfile.name if logfile.name|d('') else '' }}"
+ "{{ (' ' + logfile.severity|string) if logfile.severity is defined else '' }}"
+ "{{ (' size ' + logfile.size|string) if logfile.size is defined else '' }}"
+ "{{ (' persistent threshold ' + logfile.persistent_threshold|string) if logfile.persistent_threshold is defined else '' }}",
+ "result": {
+ "logfile": {
+ "state": "{{ 'disabled' if negated is defined else None }}",
+ "name": "{{ name }}",
+ "severity": "{{ severity }}",
+ "persistent_threshold": "{{ persistent_threshold }}",
+ "size": "{{ size }}",
+ },
+ },
+ },
+ {
+ "name": "module",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\smodule
+ (\s(?P<severity>\d))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'no ' if module.state|d('') == 'disabled' else '' }}"
+ "logging module"
+ "{{ (' ' + module.severity|string) if module.severity is defined else '' }}",
+ "result": {
+ "module": {
+ "state": "{{ 'disabled' if negated is defined else None }}",
+ "severity": "{{ severity }}",
+ },
+ },
+ },
+ {
+ "name": "monitor",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging\smonitor
+ (\s(?P<severity>\d))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'no ' if monitor.state|d('') == 'disabled' else '' }}"
+ "logging monitor"
+ "{{ (' ' + monitor.severity|string) if monitor.severity is defined else '' }}",
+ "result": {
+ "monitor": {
+ "state": "{{ 'disabled' if negated is defined else None }}",
+ "severity": "{{ severity }}",
+ },
+ },
+ },
+ {
+ "name": "origin_id.hostname",
+ "getval": re.compile(
+ r"""
+ ^logging\sorigin-id
+ \s(?P<hostname>hostname)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging origin-id hostname",
+ "result": {
+ "origin_id": {
+ "hostname": "{{ not not hostname }}",
+ },
+ },
+ },
+ {
+ "name": "origin_id.ip",
+ "getval": re.compile(
+ r"""
+ ^logging\sorigin-id
+ \sip\s(?P<ip>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging origin-id ip {{ origin_id.ip }}",
+ "result": {
+ "origin_id": {
+ "ip": "{{ ip }}",
+ },
+ },
+ },
+ {
+ "name": "origin_id.string",
+ "getval": re.compile(
+ r"""
+ ^logging\sorigin-id
+ \sstring\s(?P<string>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging origin-id string {{ origin_id.string }}",
+ "result": {
+ "origin_id": {
+ "string": "{{ string }}",
+ },
+ },
+ },
+ {
+ "name": "rate_limit",
+ "getval": re.compile(
+ r"""
+ ^(?P<negated>no\s)?
+ logging
+ \s(?P<rate_limit>rate-limit)
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'no ' if rate_limit|d('') == 'disabled' else '' }}"
+ "logging rate-limit",
+ "result": {
+ "rate_limit": "{{ 'disabled' if negated is defined else None }}",
+ },
+ },
+ {
+ "name": "rfc_strict",
+ "getval": re.compile(
+ r"""
+ logging\srfc-strict
+ \s(?P<rfc_strict>5424)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging rfc-strict 5424",
+ "result": {
+ "rfc_strict": "{{ not not rfc_strict }}",
+ },
+ },
+ {
+ "name": "hosts",
+ "getval": re.compile(
+ r"""
+ ^logging\sserver
+ \s(?P<host>\S+)
+ (\s(?P<severity>\d))?
+ (\sport\s(?P<port>\d+))?
+ (\ssecure\strustpoint\sclient-identity\s(?P<client_identity>\S+))?
+ (\suse-vrf\s(?P<use_vrf>\S+))?
+ (\sfacility\s(?P<facility>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_hosts,
+ "result": {
+ "hosts": [
+ {
+ "host": "{{ host }}",
+ "severity": "{{ severity }}",
+ "secure": {
+ "trustpoint": {
+ "client_identity": "{{ client_identity }}",
+ },
+ },
+ "port": "{{ port }}",
+ "facility": "{{ facility }}",
+ "use_vrf": "{{ use_vrf }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "source_interface",
+ "getval": re.compile(
+ r"""
+ ^logging\ssource-interface
+ \s(?P<source_interface>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging source-interface {{ source_interface }}",
+ "result": {
+ "source_interface": "{{ source_interface }}",
+ },
+ },
+ {
+ "name": "timestamp",
+ "getval": re.compile(
+ r"""
+ ^logging\stimestamp
+ \s(?P<timestamp>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "logging timestamp {{ timestamp }}",
+ "result": {
+ "timestamp": "{{ timestamp }}",
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ntp_global.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ntp_global.py
new file mode 100644
index 00000000..8d5a354d
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ntp_global.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Ntp_global parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+class Ntp_globalTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Ntp_globalTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "access_group.match_all",
+ "getval": re.compile(
+ r"""
+ ^ntp\saccess-group\s(?P<match_all>match-all)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp access-group match-all",
+ "result": {
+ "access_group": {
+ "match_all": "{{ True if match_all is defined else None }}",
+ },
+ },
+ },
+ {
+ "name": "peer",
+ "getval": re.compile(
+ r"""
+ ^ntp\saccess-group\speer\s(?P<acl>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp access-group peer {{ access_list }}",
+ "result": {
+ "access_group": {
+ "peer": [
+ {
+ "access_list": "{{ acl }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "query_only",
+ "getval": re.compile(
+ r"""
+ ^ntp\saccess-group\squery-only\s(?P<acl>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp access-group query-only {{ access_list }}",
+ "result": {
+ "access_group": {
+ "query_only": [
+ {
+ "access_list": "{{ acl }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "serve",
+ "getval": re.compile(
+ r"""
+ ^ntp\saccess-group\sserve\s(?P<acl>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp access-group serve {{ access_list }}",
+ "result": {
+ "access_group": {
+ "serve": [
+ {
+ "access_list": "{{ acl }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "serve_only",
+ "getval": re.compile(
+ r"""
+ ^ntp\saccess-group\sserve-only\s(?P<acl>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp access-group serve-only {{ access_list }}",
+ "result": {
+ "access_group": {
+ "serve_only": [
+ {
+ "access_list": "{{ acl }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "allow.control.rate_limit",
+ "getval": re.compile(
+ r"""
+ ^ntp\sallow\scontrol\srate-limit\s(?P<rate_limit>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp allow control rate-limit {{ allow.control.rate_limit }}",
+ "result": {
+ "allow": {
+ "control": {
+ "rate_limit": "{{ rate_limit }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "allow.private",
+ "getval": re.compile(
+ r"""
+ ^ntp\sallow\s(?P<private>private)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp allow private",
+ "result": {
+ "allow": {
+ "private": "{{ not not private }}",
+ },
+ },
+ },
+ {
+ "name": "authenticate",
+ "getval": re.compile(
+ r"""
+ ^ntp\s(?P<authenticate>authenticate)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp authenticate",
+ "result": {
+ "authenticate": "{{ not not authenticate }}",
+ },
+ },
+ {
+ "name": "authentication_keys",
+ "getval": re.compile(
+ r"""
+ ^ntp\sauthentication-key\s(?P<id>\d+)\smd5\s(?P<key>\S+)\s(?P<encryption>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp authentication-key {{ id }} md5 {{ key }} {{ encryption }}",
+ "result": {
+ "authentication_keys": [
+ {
+ "id": "{{ id }}",
+ "key": "{{ key }}",
+ "encryption": "{{ encryption }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "logging",
+ "getval": re.compile(
+ r"""
+ ^ntp\s(?P<logging>logging)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp logging",
+ "result": {
+ "logging": "{{ not not logging }}",
+ },
+ },
+ {
+ "name": "master.stratum",
+ "getval": re.compile(
+ r"""
+ ^ntp\smaster\s(?P<stratum>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp master {{ master.stratum }}",
+ "result": {
+ "master": {
+ "stratum": "{{ stratum }}",
+ },
+ },
+ },
+ {
+ "name": "passive",
+ "getval": re.compile(
+ r"""
+ ^ntp\s(?P<passive>passive)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp passive",
+ "result": {
+ "passive": "{{ not not passive }}",
+ },
+ },
+ {
+ "name": "peers",
+ "getval": re.compile(
+ r"""
+ ^ntp\speer
+ \s(?P<peer>\S+)
+ (\s(?P<prefer>prefer))?
+ (\suse-vrf\s(?P<use_vrf>\S+))?
+ (\skey\s(?P<key>\d+))?
+ (\sminpoll\s(?P<minpoll>\d+))?
+ (\smaxpoll\s(?P<maxpoll>\d+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp peer {{ peer }}"
+ "{{ ' prefer' if prefer is defined else ''}}"
+ "{{ (' use-vrf ' + vrf) if vrf is defined else '' }}"
+ "{{ (' key ' + key_id|string) if key_id is defined else '' }}"
+ "{{ (' minpoll ' + minpoll|string) if minpoll is defined else '' }}"
+ "{{ (' maxpoll ' + maxpoll|string) if maxpoll is defined else '' }}",
+ "result": {
+ "peers": [
+ {
+ "peer": "{{ peer }}",
+ "prefer": "{{ not not prefer }}",
+ "vrf": "{{ use_vrf }}",
+ "key_id": "{{ key }}",
+ "minpoll": "{{ minpoll }}",
+ "maxpoll": "{{ maxpoll }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "servers",
+ "getval": re.compile(
+ r"""
+ ^ntp\sserver
+ \s(?P<server>\S+)
+ (\s(?P<prefer>prefer))?
+ (\suse-vrf\s(?P<use_vrf>\S+))?
+ (\skey\s(?P<key>\d+))?
+ (\sminpoll\s(?P<minpoll>\d+))?
+ (\smaxpoll\s(?P<maxpoll>\d+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp server {{ server }}"
+ "{{ ' prefer' if prefer is defined else ''}}"
+ "{{ (' use-vrf ' + vrf) if vrf is defined else '' }}"
+ "{{ (' key ' + key_id|string) if key_id is defined else '' }}"
+ "{{ (' minpoll ' + minpoll|string) if minpoll is defined else '' }}"
+ "{{ (' maxpoll ' + maxpoll|string) if maxpoll is defined else '' }}",
+ "result": {
+ "servers": [
+ {
+ "server": "{{ server }}",
+ "prefer": "{{ not not prefer }}",
+ "vrf": "{{ use_vrf }}",
+ "key_id": "{{ key }}",
+ "minpoll": "{{ minpoll }}",
+ "maxpoll": "{{ maxpoll }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "source",
+ "getval": re.compile(
+ r"""
+ ^ntp\ssource\s(?P<source>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp source {{ source }}",
+ "result": {
+ "source": "{{ source }}",
+ },
+ },
+ {
+ "name": "source_interface",
+ "getval": re.compile(
+ r"""
+ ^ntp\ssource-interface(\s)+(?P<source_interface>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp source-interface {{ source_interface }}",
+ "result": {
+ "source_interface": "{{ source_interface }}",
+ },
+ },
+ {
+ "name": "trusted_keys",
+ "getval": re.compile(
+ r"""
+ ^ntp\strusted-key\s(?P<key>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "ntp trusted-key {{ key_id|string }}",
+ "result": {
+ "trusted_keys": [
+ {
+ "key_id": "{{ key }}",
+ },
+ ],
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospf_interfaces.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospf_interfaces.py
new file mode 100644
index 00000000..2321fcaf
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospf_interfaces.py
@@ -0,0 +1,510 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Ospf_interfaces parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_authentication(data):
+ auth = data.get("authentication")
+ cmd = "ip ospf authentication"
+
+ if auth.get("enable") is False:
+ cmd = "no " + cmd
+ else:
+ if auth.get("message_digest"):
+ cmd += " message-digest"
+ elif auth.get("null_auth"):
+ cmd += " null"
+ return cmd
+
+
+class Ospf_interfacesTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Ospf_interfacesTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "interface",
+ "getval": re.compile(
+ r'''
+ ^interface
+ \s(?P<name>\S+)$''', re.VERBOSE,
+ ),
+ "setval": "interface {{ name }}",
+ "result": {
+ "{{ name }}": {
+ "name": "{{ name }}",
+ "address_family": {},
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "area",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip|ipv6)
+ \srouter\s(ospf|ospfv3)
+ \s(?P<process_id>\S+)
+ \sarea\s(?P<area_id>\S+)
+ (\s(?P<secondaries>secondaries\snone))?$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip' if afi == 'ipv4' else 'ipv6' }} "
+ "router {{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "{{ process_id }} area {{ area.area_id }}{{ ' secondaries none' if area.secondaries|default('True') == False else '' }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "processes": {
+ "{{ process_id }}": {
+ "process_id": "{{ process_id }}",
+ "area": {
+ "area_id": "{{ area_id }}",
+ "secondaries": "{{ False if secondaries is defined else None }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "processes_multi_areas",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip|ipv6)
+ \srouter\s(ospf|ospfv3)
+ \s(?P<process_id>\S+)
+ \smulti-area\s(?P<area>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip' if afi == 'ipv4' else 'ipv6' }} "
+ "router {{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "{{ process_id }} multi-area {{ area }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "processes": {
+ "{{ process_id }}": {
+ "process_id": "{{ process_id }}",
+ "multi_areas": [
+ "{{ area }}",
+ ],
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "multi_areas",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip|ipv6)
+ \srouter\s(ospf|ospfv3)
+ \smulti-area\s(?P<area>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip' if afi == 'ipv4' else 'ipv6' }} "
+ "router {{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "multi-area {{ area }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "multi_areas": [
+ "{{ area }}",
+ ],
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "authentication",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip|ipv6)
+ \s(ospf|ospfv3)
+ \s(?P<authentication>authentication)
+ (\s(?P<opt>(message-digest|null)))?$""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_authentication,
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "authentication": {
+ "enable": "{{ True if authentication is defined and opt is undefined }}",
+ "message_digest": "{{ True if opt == 'message-digest' else None }}",
+ "null_auth": "{{ True if opt == 'null' else None }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "authentication.key_chain",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)
+ \sospf
+ \s(?P<authentication>authentication)
+ \skey-chain\s(?P<key_chain>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "ip ospf authentication key-chain {{ authentication.key_chain }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ afi|replace('ip', 'ipv4') }}",
+ "authentication": {
+ "key_chain": "{{ key_chain }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "authentication_key",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)
+ \sospf
+ \sauthentication-key
+ \s(?P<encryption>\d)
+ \s(?P<key>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "ip ospf authentication-key "
+ "{{ authentication_key.encryption }} {{ authentication_key.key }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ afi|replace('ip', 'ipv4') }}",
+ "authentication_key": {
+ "encryption": "{{ encryption }}",
+ "key": "{{ key }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "message_digest_key",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)
+ \sospf
+ \smessage-digest-key
+ \s(?P<key_id>\d+)
+ \smd5
+ \s(?P<encryption>\d)
+ \s(?P<key>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "ip ospf "
+ "message-digest-key {{ message_digest_key.key_id }} "
+ "md5 {{ message_digest_key.encryption|default('') }} {{ message_digest_key.key }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi }}": {
+ "afi": "{{ afi|replace('ip', 'ipv4') }}",
+ "message_digest_key": {
+ "key_id": "{{ key_id }}",
+ "encryption": "{{ encryption }}",
+ "key": "{{ key }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "cost",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \scost\s(?P<cost>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "cost {{ cost }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "cost": "{{ cost }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "dead_interval",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \sdead-interval\s(?P<dead_interval>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "dead-interval {{ dead_interval }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "dead_interval": "{{ dead_interval }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "hello_interval",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \shello-interval\s(?P<hello_interval>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "hello-interval {{ hello_interval }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "hello_interval": "{{ hello_interval }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "instance",
+ "getval": re.compile(
+ r"""
+ \s+(ospf|ospfv3)
+ \sinstance\s(?P<instance>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "ospfv3 instance {{ instance }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "ipv6",
+ "instance": "{{ instance }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "mtu_ignore",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \s(?P<mtu_ignore>mtu-ignore)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "mtu-ignore",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "mtu_ignore": "{{ not not mtu_ignore }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "network",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \snetwork\s(?P<network>(broadcast|point-to-point))$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "network {{ network }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "network": "{{ network }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "passive_interface",
+ "getval": re.compile(
+ r"""
+ (\s+(?P<negated>no))?
+ (\s+(?P<afi>ip))?
+ \s*(ospf|ospfv3)
+ \s(?P<passive_interface>passive-interface)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "passive-interface",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "passive_interface": "{{ False if negated is defined else (not not passive_interface) }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "priority",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \spriority\s(?P<priority>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "priority {{ priority }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "priority": "{{ priority }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "retransmit_interval",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \sretransmit-interval\s(?P<retransmit_interval>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "retransmit-interval {{ retransmit_interval }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "retransmit_interval": "{{ retransmit_interval }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "shutdown",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \s(?P<shutdown>shutdown)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "shutdown",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "shutdown": "{{ not not shutdown }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "transmit_delay",
+ "getval": re.compile(
+ r"""
+ \s+(?P<afi>ip)?
+ \s(ospf|ospfv3)
+ \stransmit-delay\s(?P<transmit_delay>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "{{ 'ip ' if afi == 'ipv4' else '' }}"
+ "{{ 'ospf' if afi == 'ipv4' else 'ospfv3' }} "
+ "transmit-delay {{ transmit_delay }}",
+ "result": {
+ "{{ name }}": {
+ "address_family": {
+ "{{ afi|d('ipv6') }}": {
+ "afi": "{{ 'ipv4' if afi is defined else 'ipv6' }}",
+ "transmit_delay": "{{ transmit_delay }}",
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv2.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv2.py
new file mode 100644
index 00000000..c8b518dd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv2.py
@@ -0,0 +1,1101 @@
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_area_range(arange):
+ command = "area {area} range {range}".format(**arange)
+ if arange.get("not_advertise") is True:
+ command += " not-advertise"
+ if "cost" in arange:
+ command += " cost {cost}".format(**arange)
+ return command
+
+
+def _tmplt_default_information(proc):
+ command = "default-information originate"
+ if "always" in proc["default_information"] and proc["default_information"]["always"]:
+ command += " always"
+ if "route_map" in proc["default_information"]:
+ command += " route-map"
+ command += " {default_information[route_map]}".format(**proc)
+ return command
+
+
+def _tmplt_log_adjacency_changes(proc):
+ command = "log-adjacency-changes"
+ if proc.get("log_adjacency_changes").get("detail", False) is True:
+ command += " detail"
+ return command
+
+
+def _tmplt_area_authentication(area):
+ command = "area {area_id} authentication".format(**area)
+ if area.get("authentication", {}).get("message_digest"):
+ command += " message-digest"
+ return command
+
+
+def _tmplt_max_lsa(proc):
+ max_lsa = proc["max_lsa"]
+ command = "max-lsa {max_non_self_generated_lsa}".format(**max_lsa)
+ if max_lsa.get("threshold"):
+ command += " {threshold}".format(**max_lsa)
+ if max_lsa.get("warning_only"):
+ command += " warning-only"
+ if max_lsa.get("ignore_time"):
+ command += " ignore-time {ignore_time}".format(**max_lsa)
+ if max_lsa.get("ignore_count"):
+ command += " ignore-count {ignore_count}".format(**max_lsa)
+ if max_lsa.get("reset_time"):
+ command += " reset-time {reset_time}".format(**max_lsa)
+ return command
+
+
+def _tmplt_default_information(proc):
+ default_information = proc["default_information"]["originate"]
+ command = "default-information originate"
+
+ if default_information.get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ if default_information.get("always"):
+ command += " always"
+ if default_information.get("route_map"):
+ command += " route-map {route_map}".format(**default_information)
+
+ return command
+
+
+def _tmplt_table_map(proc):
+ table_map = proc["table_map"]
+ command = "table-map"
+
+ if table_map.get("name"):
+ command += " {name}".format(**table_map)
+ if table_map.get("filter"):
+ command += " filter"
+
+ return command
+
+
+def _tmplt_max_metric(proc):
+ max_metric = proc["max_metric"]
+ command = "max-metric router-lsa"
+
+ if max_metric.get("router_lsa", {}).get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ external_lsa = max_metric.get("router_lsa", {}).get("external_lsa", {})
+ include_stub = max_metric.get("router_lsa", {}).get("include_stub", {})
+ on_startup = max_metric.get("router_lsa", {}).get("on_startup", {})
+ summary_lsa = max_metric.get("router_lsa", {}).get("summary_lsa", {})
+ if external_lsa:
+ command += " external-lsa"
+ if external_lsa.get("max_metric_value"):
+ command += " {max_metric_value}".format(**external_lsa)
+ if include_stub:
+ command += " include-stub"
+ if on_startup:
+ command += " on-startup"
+ if on_startup.get("wait_period"):
+ command += " {wait_period}".format(**on_startup)
+ if on_startup.get("wait_for_bgp_asn"):
+ command += " wait-for bgp {wait_for_bgp_asn}".format(**on_startup)
+ if summary_lsa:
+ command += " summary-lsa"
+ if summary_lsa.get("max_metric_value"):
+ command += " {max_metric_value}".format(**summary_lsa)
+
+ return command
+
+
+def _tmplt_area_nssa(area):
+ nssa = area["nssa"]
+ command = "area {area_id} nssa".format(**area)
+ if nssa.get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ for attrib in [
+ "no_summary",
+ "no_redistribution",
+ "default_information_originate",
+ ]:
+ if nssa.get(attrib):
+ command += " {0}".format(attrib.replace("_", "-"))
+ return command
+
+
+def _tmplt_area_nssa_translate(area):
+ translate = area["nssa"]["translate"]["type7"]
+ command = "area {area_id} nssa translate type7".format(**area)
+ for attrib in ["always", "never", "supress_fa"]:
+ if translate.get(attrib):
+ command += " {0}".format(attrib.replace("_", "-"))
+ return command
+
+
+def _tmplt_area_ranges(arange):
+ command = "area {area_id} range {prefix}".format(**arange)
+ if arange.get("not_advertise") is True:
+ command += " not-advertise"
+ if "cost" in arange:
+ command += " cost {cost}".format(**arange)
+ return command
+
+
+def _tmplt_area_ranges(arange):
+ command = "area {area_id} range {prefix}".format(**arange)
+ if arange.get("not_advertise") is True:
+ command += " not-advertise"
+ if "cost" in arange:
+ command += " cost {cost}".format(**arange)
+ return command
+
+
+def _tmplt_summary_address(proc):
+ command = "summary-address {prefix}".format(**proc)
+ if proc.get("tag"):
+ command += " tag {tag}".format(**proc)
+ elif proc.get("not_advertise"):
+ command += " not-advertise"
+ return command
+
+
+def _tmplt_area_stub(area):
+ stub = area["stub"]
+ command = "area {area_id} stub".format(**area)
+ if stub.get("set") is False:
+ command = "no {0}".format(command)
+ elif stub.get("no_summary"):
+ command += " no-summary"
+ return command
+
+
+def _tmplt_redistribute(redis):
+ command = "redistribute {protocol}".format(**redis)
+ if redis.get("id"):
+ command += " {id}".format(**redis)
+ if redis.get("route_map"):
+ command += " route-map {route_map}".format(**redis)
+ return command
+
+
+def _tmplt_capability_vrf_lite(proc):
+ command = "capability vrf-lite"
+ vrf_lite = proc["capability"]["vrf_lite"]
+ if vrf_lite.get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ if vrf_lite.get("evpn"):
+ command += " evpn"
+ return command
+
+
+class Ospfv2Template(NetworkTemplate):
+ def __init__(self, lines=None):
+ super(Ospfv2Template, self).__init__(lines=lines, tmplt=self)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "vrf",
+ "getval": re.compile(
+ r"""
+ \s+vrf
+ \s(?P<vrf>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "vrf {{ vrf }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "vrf": "{{ vrf }}",
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "bfd",
+ "getval": re.compile(
+ r"""
+ \s+(?P<bfd>bfd)$""",
+ re.VERBOSE,
+ ),
+ "setval": "bfd",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "bfd": "{{ not not bfd }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "process_id",
+ "getval": re.compile(
+ r"""
+ ospf(?:v3)*\s
+ (?P<process_id>\S+)""",
+ re.VERBOSE,
+ ),
+ "setval": "router ospf {{ process_id }}",
+ "result": {
+ "process_id": "{{ process_id }}",
+ },
+ "shared": True,
+ },
+ {
+ "name": "down_bit_ignore",
+ "getval": re.compile(
+ r"""
+ \s+(?P<down_bit_ignore>down-bit-ignore)$""",
+ re.VERBOSE,
+ ),
+ "setval": "down-bit-ignore",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "down_bit_ignore": "{{ not not down_bit_ignore }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "capability.vrf_lite",
+ "getval": re.compile(
+ r"""
+ \s+capability
+ \s(?P<vrf_lite>vrf-lite)
+ \s*(?P<evpn>evpn)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_capability_vrf_lite,
+ "remval": "capability vrf-lite",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "capability": {
+ "vrf_lite": {
+ "set": "{{ True if vrf_lite is defined and evpn is undefined else None }}",
+ "evpn": "{{ not not evpn }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "auto_cost",
+ "getval": re.compile(
+ r"""
+ \s+auto-cost\sreference-bandwidth\s
+ (?P<acrb>\d+)\s(?P<unit>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": (
+ "auto-cost reference-bandwidth"
+ " {{ auto_cost.reference_bandwidth }}"
+ " {{ auto_cost.unit }}"
+ ),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "auto_cost": {
+ "reference_bandwidth": "{{ acrb }}",
+ "unit": "{{ unit }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "flush_routes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<flush_routes>flush-routes)$""",
+ re.VERBOSE,
+ ),
+ "setval": "flush-routes",
+ "result": {
+ "flush_routes": "{{ not not flush_routes }}",
+ },
+ },
+ {
+ "name": "graceful_restart.set",
+ "getval": re.compile(
+ r"""
+ \s+(?P<graceful_restart>no\sgraceful-restart)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart",
+ "remval": "no graceful-restart",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "set": "{{ not graceful_restart }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.helper_disable",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart
+ \s+(?P<helper_disable>helper-disable)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart helper-disable",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "helper_disable": "{{ not not helper_disable }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.grace_period",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart
+ \s+grace-period
+ \s+(?P<grace_period>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart helper-disable",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "grace_period": "{{ grace_period }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "isolate",
+ "getval": re.compile(
+ r"""
+ \s+(?P<isolate>isolate)$""",
+ re.VERBOSE,
+ ),
+ "setval": "isolate",
+ "result": {"isolate": "{{ not not isolate }}"},
+ },
+ {
+ "name": "log_adjacency_changes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<log>log-adjacency-changes)
+ \s*(?P<detail>detail)*$""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_log_adjacency_changes,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "log_adjacency_changes": {
+ "log": "{{ True if log is defined and detail is undefined else None }}",
+ "detail": "{{ True if detail is defined else None }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "max_lsa",
+ "getval": re.compile(
+ r"""
+ \s+max-lsa
+ \s(?P<max_gen_lsa>\d+)
+ \s*(?P<threshold>\d*)
+ \s*(?P<warning_only>warning-only)*
+ \s*(ignore-time)*\s*(?P<ig_time>\d*)
+ \s*(ignore-count)*\s*(?P<ig_count>\d*)
+ \s*(reset-time)*\s*(?P<rst_time>\d*)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_max_lsa,
+ "remval": "max-lsa {{ max_lsa.max_non_self_generated_lsa }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "max_lsa": {
+ "max_non_self_generated_lsa": "{{ max_gen_lsa }}",
+ "threshold": "{{ threshold }}",
+ "ignore_time": "{{ ig_time }}",
+ "ignore_count": "{{ ig_count }}",
+ "reset_time": "{{ rst_time }}",
+ "warning_only": "{{ not not warning_only }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "mpls.traffic_eng.areas",
+ "getval": re.compile(
+ r"""
+ \s+mpls\straffic-eng\sarea
+ \s(?P<area_id>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("mpls traffic-eng area {{ area_id }}"),
+ "result": {
+ "mpls": {
+ "traffic_eng": {
+ "areas": [
+ {
+ "area_id": "{{ area_id }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "mpls.traffic_eng.router_id",
+ "getval": re.compile(
+ r"""
+ \s+mpls\straffic-eng\srouter-id
+ \s(?P<router_id>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": (
+ "mpls traffic-eng router-id" " {{ mpls.traffic_eng.router_id }}"
+ ),
+ "result": {"mpls": {"traffic_eng": {"router_id": "{{ router_id }}"}}},
+ },
+ {
+ "name": "mpls.traffic_eng.multicast_intact",
+ "getval": re.compile(
+ r"""
+ \s+mpls\straffic-eng
+ \s(?P<multicast_intact>multicast-intact)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("mpls traffic-eng multicast-intact"),
+ "result": {
+ "mpls": {
+ "traffic_eng": {
+ "multicast_intact": "{{ not not multicast_intact }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "name_lookup",
+ "getval": re.compile(
+ r"""
+ \s+(?P<name_lookup>name-lookup)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("name-lookup"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "name_lookup": "{{ not not name_lookup }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "passive_interface.default",
+ "getval": re.compile(
+ r"""
+ \s+passive-interface
+ \s+(?P<default>default)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("passive-interface default"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "passive_interface": {"default": "{{ not not default }}"},
+ },
+ },
+ },
+ },
+ {
+ "name": "rfc1583compatibility",
+ "getval": re.compile(
+ r"""
+ \s+(?P<rfc>rfc1583compatibility)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("rfc1583compatibility"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "rfc1583compatibility": "{{ not not rfc }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "router_id",
+ "getval": re.compile(
+ r"""
+ \s+router-id
+ \s(?P<router_id>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("router-id" " {{ router_id }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "router_id": "{{ router_id }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "shutdown",
+ "getval": re.compile(
+ r"""
+ \s+(?P<shutdown>shutdown)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("shutdown"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "shutdown": "{{ not not shutdown }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "default_information.originate",
+ "getval": re.compile(
+ r"""
+ \s+default-information
+ \s(?P<originate>originate)
+ \s*(?P<always>always)*
+ \s*(route-map)*
+ \s*(?P<route_map>\S+)*$""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_default_information,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "default_information": {
+ "originate": {
+ "set": "{{ True if originate is defined and always is undefined and route_map is undefined else None }}",
+ "always": "{{ not not always }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "default_metric",
+ "getval": re.compile(
+ r"""
+ \s+default-metric
+ \s(?P<default_metric>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("default-metric {{ default_metric }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "default_metric": "{{ default_metric }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "distance",
+ "getval": re.compile(
+ r"""
+ \s+distance
+ \s(?P<distance>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("distance {{ distance }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "distance": "{{ distance }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "table_map",
+ "getval": re.compile(
+ r"""
+ \s+table-map
+ \s(?P<rmap>\S+)
+ \s*(?P<filter>filter)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_table_map,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "table_map": {
+ "name": "{{ rmap }}",
+ "filter": "{{ not not filter }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.lsa_arrival",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \slsa-arrival
+ \s(?P<lsa_arrival_val>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("timers lsa-arrival {{ timers.lsa_arrival }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "lsa_arrival": "{{ lsa_arrival_val }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.lsa_group_pacing",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \slsa-group-pacing
+ \s(?P<lsa_group_pacing>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers lsa-group-pacing {{ timers.lsa_group_pacing }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "lsa_group_pacing": "{{ lsa_group_pacing }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.throttle.lsa",
+ "getval": re.compile(
+ r"""
+ \s+timers\sthrottle\slsa
+ \s(?P<start>\d+)
+ \s(?P<hold>\d+)
+ \s(?P<max>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers throttle lsa {{ timers.throttle.lsa.start_interval }}"
+ " {{ timers.throttle.lsa.hold_interval }}"
+ " {{ timers.throttle.lsa.max_interval }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "throttle": {
+ "lsa": {
+ "start_interval": "{{ start }}",
+ "hold_interval": "{{ hold }}",
+ "max_interval": "{{ max }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.throttle.spf",
+ "getval": re.compile(
+ r"""
+ \s+timers\sthrottle\sspf
+ \s(?P<initial>\d+)
+ \s(?P<min>\d+)
+ \s(?P<max>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers throttle spf {{ timers.throttle.spf.initial_spf_delay }}"
+ " {{ timers.throttle.spf.min_hold_time }}"
+ " {{ timers.throttle.spf.max_wait_time }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "throttle": {
+ "spf": {
+ "initial_spf_delay": "{{ initial }}",
+ "min_hold_time": "{{ min }}",
+ "max_wait_time": "{{ max }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.default_cost",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)\s
+ default-cost\s(?P<default_cost>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "area {{ area_id }} default-cost {{ default_cost }}",
+ "compval": "default_cost",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "default_cost": "{{ default_cost|int }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.authentication",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \s(?P<auth>authentication)
+ \s*(?P<md>message-digest)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_authentication,
+ "remval": "area {{ area_id }} authentication",
+ "compval": "authentication",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "authentication": {
+ "set": "{{ True if auth is defined and md is undefined }}",
+ "message_digest": "{{ True if md is defined else False }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.filter_list",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \sfilter-list
+ \sroute-map\s(?P<rmap>\S+)
+ \s(?P<dir>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "area {{ area_id }} filter-list route-map {{ route_map }} {{ direction }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "filter_list": [
+ {
+ "route_map": "{{ rmap }}",
+ "direction": "{{ dir }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "redistribute",
+ "getval": re.compile(
+ r"""
+ \s+redistribute
+ \s(?P<protocol>\S+)
+ \s*(?P<id>\S+)*
+ \sroute-map\s(?P<rmap>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_redistribute,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "redistribute": [
+ {
+ "protocol": "{{ protocol }}",
+ "id": "{{ id }}",
+ "route_map": "{{ rmap }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "area.nssa",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \s(?P<nssa>nssa)
+ \s*(?P<no_sum>no-summary)*
+ \s*(?P<no_redis>no-redistribution)*
+ \s*(?P<def_info>default-information-originate)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_nssa,
+ "remval": "area {{ area_id }} nssa",
+ "compval": "nssa",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "nssa": {
+ "set": "{{ True if nssa is defined and no_sum is undefined and no_redis is undefined and def_info is undefined }}",
+ "no_summary": "{{ not not no_sum }}",
+ "no_redistribution": "{{ not not no_redis }}",
+ "default_information_originate": "{{ not not def_info }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.nssa.translate",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)\snssa
+ \stranslate
+ \stype7
+ \s(?P<choice>always|never)
+ \s*(?P<supress_fa>supress-fa)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_nssa_translate,
+ "compval": "nssa.translate",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "nssa": {
+ "translate": {
+ "type7": {
+ "always": '{{ True if choice == "always" else None }}',
+ "never": '{{ True if choice == "never" else None }}',
+ "supress_fa": "{{ not not supress_fa }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.ranges",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \srange\s(?P<prefix>\S+)
+ \s*(cost)*\s*(?P<cost>\d+)*
+ \s*(?P<not_adver>not-advertise)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_ranges,
+ "remval": "area {{ area_id }} range {{ prefix }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "ranges": [
+ {
+ "prefix": "{{ prefix }}",
+ "cost": "{{ cost }}",
+ "not_advertise": "{{ not not not_adver }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "summary_address",
+ "getval": re.compile(
+ r"""
+ \s+summary-address
+ \s(?P<prefix>\S+)
+ \s*(?P<not_adver>not-advertise)*
+ \s*(tag)*\s*(?P<tag>\d+)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_summary_address,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "summary_address": [
+ {
+ "prefix": "{{ prefix }}",
+ "not_advertise": "{{ not not not_adver }}",
+ "tag": "{{ tag }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ {
+ "name": "area.stub",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \s(?P<stub>stub)
+ \s*(?P<no_summary>no-summary)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_stub,
+ "remval": "area {{ area_id }} stub",
+ "compval": "stub",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "stub": {
+ "set": "{{ True if stub is defined and no_summary is undefined else None }}",
+ "no_summary": "{{ not not no_summary }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "maximum_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \s(?P<maximum_paths>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("maximum-paths {{ maximum_paths }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {"maximum_paths": "{{ maximum_paths }}"},
+ },
+ },
+ },
+ {
+ "name": "max_metric",
+ "getval": re.compile(
+ r"""
+ \s+max-metric
+ \s+(?P<router_lsa>router-lsa)
+ \s*(?P<external_lsa>external-lsa)*
+ \s*(?P<max_metric_value>\d+)*
+ \s*(?P<include_stub>include-stub)*
+ \s*(?P<on_startup>on-startup)*
+ \s*(?P<wait_period>\d+)*
+ \s*(wait-for\sbgp)*
+ \s*(?P<bgp_asn>\d+)*
+ \s*(?P<summary_lsa>summary-lsa)*
+ \s*(?P<sum_lsa_max_metric_value>\d+)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_max_metric,
+ "remval": "max-metric router-lsa",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "max_metric": {
+ "router_lsa": {
+ "set": "{{ True if router_lsa is defined and external_lsa is undefined else None }}",
+ "external_lsa": {
+ "set": "{{ True if external_lsa is defined and max_metric_value is undefined else None }}",
+ "max_metric_value": "{{ max_metric_value }}",
+ },
+ "include_stub": "{{ not not include_stub }}",
+ "on_startup": {
+ "set": "{{ True if on_startup is defined and (wait_period and bgp_asn) is undefined else None }}",
+ "wait_period": "{{ wait_period }}",
+ "wait_for_bgp_asn": "{{ bgp_asn }}",
+ },
+ "summary_lsa": {
+ "set": "{{ True if summary_lsa is defined and sum_lsa_max_metric_value is undefined else None }}",
+ "max_metric_value": "{{ sum_lsa_max_metric_value }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv3.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv3.py
new file mode 100644
index 00000000..b46c3cf4
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/ospfv3.py
@@ -0,0 +1,945 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Ospfv3 parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_area_nssa(area):
+ nssa = area["nssa"]
+ command = "area {area_id} nssa".format(**area)
+ if nssa.get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ for attrib in [
+ "no_summary",
+ "no_redistribution",
+ "default_information_originate",
+ ]:
+ if nssa.get(attrib):
+ command += " {0}".format(attrib.replace("_", "-"))
+ if nssa.get("route_map"):
+ command += " route-map {route_map}".format(**nssa)
+ return command
+
+
+def _tmplt_area_nssa_translate(area):
+ translate = area["nssa"]["translate"]["type7"]
+ command = "area {area_id} nssa translate type7".format(**area)
+ for attrib in ["always", "never", "supress_fa"]:
+ if translate.get(attrib):
+ command += " {0}".format(attrib.replace("_", "-"))
+ return command
+
+
+def _tmplt_area_stub(area):
+ stub = area["stub"]
+ command = "area {area_id} stub".format(**area)
+ if stub.get("set") is False:
+ command = "no {0}".format(command)
+ elif stub.get("no_summary"):
+ command += " no-summary"
+ return command
+
+
+def _tmplt_log_adjacency_changes(proc):
+ command = "log-adjacency-changes"
+ if proc.get("log_adjacency_changes").get("detail", False) is True:
+ command += " detail"
+ return command
+
+
+def _tmplt_max_lsa(proc):
+ max_lsa = proc["max_lsa"]
+ command = "max-lsa {max_non_self_generated_lsa}".format(**max_lsa)
+ if max_lsa.get("threshold"):
+ command += " {threshold}".format(**max_lsa)
+ if max_lsa.get("warning_only"):
+ command += " warning-only"
+ if max_lsa.get("ignore_time"):
+ command += " ignore-time {ignore_time}".format(**max_lsa)
+ if max_lsa.get("ignore_count"):
+ command += " ignore-count {ignore_count}".format(**max_lsa)
+ if max_lsa.get("reset_time"):
+ command += " reset-time {reset_time}".format(**max_lsa)
+ return command
+
+
+def _tmplt_max_metric(proc):
+ max_metric = proc["max_metric"]
+ command = "max-metric router-lsa"
+
+ if max_metric.get("router_lsa", {}).get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ external_lsa = max_metric.get("router_lsa", {}).get("external_lsa", {})
+ stub_prefix_lsa = max_metric.get("router_lsa", {}).get("stub_prefix_lsa", {})
+ on_startup = max_metric.get("router_lsa", {}).get("on_startup", {})
+ inter_area_prefix_lsa = max_metric.get("router_lsa", {}).get("inter_area_prefix_lsa", {})
+ if external_lsa:
+ command += " external-lsa"
+ if external_lsa.get("max_metric_value"):
+ command += " {max_metric_value}".format(**external_lsa)
+ if stub_prefix_lsa:
+ command += " stub-prefix-lsa"
+ if on_startup:
+ command += " on-startup"
+ if on_startup.get("wait_period"):
+ command += " {wait_period}".format(**on_startup)
+ if on_startup.get("wait_for_bgp_asn"):
+ command += " wait-for bgp {wait_for_bgp_asn}".format(**on_startup)
+ if inter_area_prefix_lsa:
+ command += " inter-area-prefix-lsa"
+ if inter_area_prefix_lsa.get("max_metric_value"):
+ command += " {max_metric_value}".format(**inter_area_prefix_lsa)
+
+ return command
+
+
+def _tmplt_area_ranges(arange):
+ command = "area {area_id} range {prefix}".format(**arange)
+ if arange.get("not_advertise") is True:
+ command += " not-advertise"
+ if "cost" in arange:
+ command += " cost {cost}".format(**arange)
+ return command
+
+
+def _tmplt_default_information(proc):
+ default_information = proc["default_information"]["originate"]
+ command = "default-information originate"
+
+ if default_information.get("set") is False:
+ command = "no {0}".format(command)
+ else:
+ if default_information.get("always"):
+ command += " always"
+ if default_information.get("route_map"):
+ command += " route-map {route_map}".format(**default_information)
+
+ return command
+
+
+def _tmplt_redistribute(redis):
+ command = "redistribute {protocol}".format(**redis)
+ if redis.get("id"):
+ command += " {id}".format(**redis)
+ command += " route-map {route_map}".format(**redis)
+ return command
+
+
+def _tmplt_summary_address(proc):
+ command = "summary-address {prefix}".format(**proc)
+ if proc.get("tag"):
+ command += " tag {tag}".format(**proc)
+ elif proc.get("not_advertise"):
+ command += " not-advertise"
+ return command
+
+
+def _tmplt_table_map(proc):
+ table_map = proc["table_map"]
+ command = "table-map {name}".format(**table_map)
+ if table_map.get("filter"):
+ command += " filter"
+
+ return command
+
+
+class Ospfv3Template(NetworkTemplate):
+ def __init__(self, lines=None):
+ super(Ospfv3Template, self).__init__(lines=lines, tmplt=self)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "vrf",
+ "getval": re.compile(
+ r"""
+ \s+vrf
+ \s(?P<vrf>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "vrf {{ vrf }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "vrf": "{{ vrf }}",
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "process_id",
+ "getval": re.compile(
+ r"""
+ ospfv3
+ \s(?P<process_id>\S+)""",
+ re.VERBOSE,
+ ),
+ "setval": "router ospfv3 {{ process_id }}",
+ "result": {
+ "process_id": "{{ process_id }}",
+ },
+ "shared": True,
+ },
+ {
+ "name": "router_id",
+ "getval": re.compile(
+ r"""
+ \s+router-id
+ \s(?P<router_id>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "router-id {{ router_id }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "router_id": "{{ router_id }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "address_family",
+ "getval": re.compile(
+ r"""
+ \s+address-family
+ \s(?P<afi>\S+)
+ \s(?P<safi>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "address-family {{ afi }} {{ safi }}",
+ "result": {
+ "address_family": {
+ "afi": "{{ afi }}",
+ "safi": "{{ safi }}",
+ },
+ },
+ 'shared': True,
+ },
+ {
+ "name": "area.default_cost",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)\s
+ default-cost\s(?P<default_cost>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "area {{ area_id }} default-cost {{ default_cost }}",
+ "compval": "default_cost",
+ "result": {
+ 'address_family': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "default_cost": "{{ default_cost|int }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.filter_list",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \sfilter-list
+ \sroute-map\s(?P<rmap>\S+)
+ \s(?P<dir>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "area {{ area_id }} filter-list route-map {{ route_map }} {{ direction }}",
+ "result": {
+ 'address_family': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "filter_list": [
+ {
+ "route_map": "{{ rmap }}",
+ "direction": "{{ dir }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.ranges",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \srange\s(?P<prefix>\S+)
+ \s*(cost)*\s*(?P<cost>\d+)*
+ \s*(?P<not_adver>not-advertise)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_ranges,
+ "remval": "area {{ area_id }} range {{ prefix }}",
+ "result": {
+ "address_family": {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "ranges": [
+ {
+ "prefix": "{{ prefix }}",
+ "cost": "{{ cost }}",
+ "not_advertise": "{{ not not not_adver }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "default_information.originate",
+ "getval": re.compile(
+ r"""
+ \s+default-information
+ \s(?P<originate>originate)
+ \s*(?P<always>always)*
+ \s*(route-map)*
+ \s*(?P<route_map>\S+)*$""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_default_information,
+ "result": {
+ "address_family": {
+ "default_information": {
+ "originate": {
+ "set": "{{ True if originate is defined and always is undefined and route_map is undefined else None }}",
+ "always": "{{ not not always }}",
+ "route_map": "{{ route_map }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "distance",
+ "getval": re.compile(
+ r"""
+ \s+distance
+ \s(?P<distance>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": "distance {{ distance }}",
+ "result": {
+ "address_family": {
+ "distance": "{{ distance }}",
+ },
+ },
+ },
+ {
+ "name": "maximum_paths",
+ "getval": re.compile(
+ r"""
+ \s+maximum-paths
+ \s(?P<maximum_paths>\d+)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("maximum-paths {{ maximum_paths }}"),
+ "result": {
+ "address_family": {
+ "maximum_paths": "{{ maximum_paths }}",
+ },
+ },
+ },
+ {
+ "name": "redistribute",
+ "getval": re.compile(
+ r"""
+ \s+redistribute
+ \s(?P<protocol>\S+)
+ \s*(?P<id>\S+)*
+ \sroute-map\s(?P<rmap>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_redistribute,
+ "result": {
+ "address_family": {
+ "redistribute": [
+ {
+ "protocol": "{{ protocol }}",
+ "id": "{{ id }}",
+ "route_map": "{{ rmap }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "summary_address",
+ "getval": re.compile(
+ r"""
+ \s+summary-address
+ \s(?P<prefix>\S+)
+ \s*(?P<not_adver>not-advertise)*
+ \s*(tag)*\s*(?P<tag>\d+)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_summary_address,
+ "result": {
+ "address_family": {
+ "summary_address": [
+ {
+ "prefix": "{{ prefix }}",
+ "not_advertise": "{{ not not not_adver }}",
+ "tag": "{{ tag }}",
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "table_map",
+ "getval": re.compile(
+ r"""
+ \s+table-map
+ \s(?P<rmap>\S+)
+ \s*(?P<filter>filter)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_table_map,
+ "result": {
+ "address_family": {
+ "table_map": {
+ "name": "{{ rmap }}",
+ "filter": "{{ not not filter }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.throttle.spf",
+ "getval": re.compile(
+ r"""
+ \s+timers\sthrottle\sspf
+ \s(?P<initial>\d+)
+ \s(?P<min>\d+)
+ \s(?P<max>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers throttle spf {{ timers.throttle.spf.initial_spf_delay }}"
+ " {{ timers.throttle.spf.min_hold_time }}"
+ " {{ timers.throttle.spf.max_wait_time }}",
+ "result": {
+ "address_family": {
+ "timers": {
+ "throttle": {
+ "spf": {
+ "initial_spf_delay": "{{ initial }}",
+ "min_hold_time": "{{ min }}",
+ "max_wait_time": "{{ max }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.nssa",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \s(?P<nssa>nssa)
+ \s*(?P<no_sum>no-summary)*
+ \s*(?P<no_redis>no-redistribution)*
+ \s*(?P<def_info>default-information-originate)*
+ \s*(route-map)*\s*(?P<rmap>\S+)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_nssa,
+ "remval": "area {{ area_id }} nssa",
+ "compval": "nssa",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "nssa": {
+ "set": "{{ True if nssa is defined and no_sum is undefined and no_redis is undefined and \
+ def_info is undefined and rmap is undefined }}",
+ "no_summary": "{{ not not no_sum }}",
+ "no_redistribution": "{{ not not no_redis }}",
+ "default_information_originate": "{{ not not def_info }}",
+ "route_map": "{{ rmap }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.nssa.translate",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)\snssa
+ \stranslate
+ \stype7
+ \s(?P<choice>always|never)
+ \s*(?P<supress_fa>supress-fa)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_nssa_translate,
+ "compval": "nssa.translate",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "nssa": {
+ "translate": {
+ "type7": {
+ "always": '{{ True if choice == "always" else None }}',
+ "never": '{{ True if choice == "never" else None }}',
+ "supress_fa": "{{ not not supress_fa }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.stub",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \s(?P<stub>stub)
+ \s*(?P<no_summary>no-summary)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_area_stub,
+ "remval": "area {{ area_id }} stub",
+ "compval": "stub",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "stub": {
+ "set": "{{ True if stub is defined and no_summary is undefined else None }}",
+ "no_summary": "{{ not not no_summary }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "area.virtual_link",
+ "getval": re.compile(
+ r"""
+ \s+area\s(?P<area_id>\S+)
+ \svirtual-link
+ \s(?P<virtual_link>\S+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "area {{ area_id }} virtual-link {{ virtual_link }}",
+ "compval": "virtual_link",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "areas": {
+ "{{ area_id }}": {
+ "area_id": "{{ area_id }}",
+ "virtual_link": "{{ virtual_link }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "auto_cost",
+ "getval": re.compile(
+ r"""
+ \s+auto-cost\sreference-bandwidth\s
+ (?P<acrb>\d+)\s(?P<unit>\S+)$""",
+ re.VERBOSE,
+ ),
+ "setval": (
+ "auto-cost reference-bandwidth"
+ " {{ auto_cost.reference_bandwidth }}"
+ " {{ auto_cost.unit }}"
+ ),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "auto_cost": {
+ "reference_bandwidth": "{{ acrb }}",
+ "unit": "{{ unit }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "flush_routes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<flush_routes>flush-routes)$""",
+ re.VERBOSE,
+ ),
+ "setval": "flush-routes",
+ "result": {
+ "flush_routes": "{{ not not flush_routes }}",
+ },
+ },
+ {
+ "name": "graceful_restart.set",
+ "getval": re.compile(
+ r"""
+ \s+(?P<graceful_restart>no\sgraceful-restart)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart",
+ "remval": "no graceful-restart",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "set": "{{ not graceful_restart }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.helper_disable",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart
+ \s+(?P<helper_disable>helper-disable)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart helper-disable",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "helper_disable": "{{ not not helper_disable }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.grace_period",
+ "getval": re.compile(
+ r"""
+ \s+graceful-restart
+ \s+grace-period
+ \s+(?P<grace_period>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart grace-period {{ graceful_restart.grace_period }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "grace_period": "{{ grace_period }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "graceful_restart.planned_only",
+ "getval": re.compile(
+ r"""
+ \s+no
+ \s+graceful-restart
+ \s+(?P<planned_only>planned-only)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "graceful-restart planned-only",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "graceful_restart": {
+ "planned_only": "{{ not planned_only }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "isolate",
+ "getval": re.compile(
+ r"""
+ \s+(?P<isolate>isolate)$""",
+ re.VERBOSE,
+ ),
+ "setval": "isolate",
+ "result": {"isolate": "{{ not not isolate }}"},
+ },
+ {
+ "name": "log_adjacency_changes",
+ "getval": re.compile(
+ r"""
+ \s+(?P<log>log-adjacency-changes)
+ \s*(?P<detail>detail)*$""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_log_adjacency_changes,
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "log_adjacency_changes": {
+ "log": "{{ True if log is defined and detail is undefined else None }}",
+ "detail": "{{ True if detail is defined else None }}",
+ },
+ },
+ },
+ },
+ },
+ {
+
+ "name": "max_lsa",
+ "getval": re.compile(
+ r"""
+ \s+max-lsa
+ \s(?P<max_gen_lsa>\d+)
+ \s*(?P<threshold>\d*)
+ \s*(?P<warning_only>warning-only)*
+ \s*(ignore-time)*\s*(?P<ig_time>\d*)
+ \s*(ignore-count)*\s*(?P<ig_count>\d*)
+ \s*(reset-time)*\s*(?P<rst_time>\d*)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_max_lsa,
+ "remval": "max-lsa {{ max_lsa.max_non_self_generated_lsa }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "max_lsa": {
+ "max_non_self_generated_lsa": "{{ max_gen_lsa }}",
+ "threshold": "{{ threshold }}",
+ "ignore_time": "{{ ig_time }}",
+ "ignore_count": "{{ ig_count }}",
+ "reset_time": "{{ rst_time }}",
+ "warning_only": "{{ not not warning_only }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "max_metric",
+ "getval": re.compile(
+ r"""
+ \s+max-metric
+ \s+(?P<router_lsa>router-lsa)
+ \s*(?P<external_lsa>external-lsa)*
+ \s*(?P<max_metric_value>\d+)*
+ \s*(?P<stub_prefix_lsa>stub-prefix-lsa)*
+ \s*(?P<on_startup>on-startup)*
+ \s*(?P<wait_period>\d+)*
+ \s*(wait-for\sbgp)*
+ \s*(?P<bgp_asn>\d+)*
+ \s*(?P<inter_area_prefix_lsa>inter-area-prefix-lsa)*
+ \s*(?P<max_metric_summary_lsa>\d+)*
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": _tmplt_max_metric,
+ "remval": "max-metric router-lsa",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "max_metric": {
+ "router_lsa": {
+ "set": "{{ True if router_lsa is defined and (external_lsa is undefined) and (inter_area_prefix_lsa is undefined) and \
+ (stub_prefix_lsa is undefined) and (on_startup is undefined) else None }}",
+ "external_lsa": {
+ "set": "{{ True if external_lsa is defined and max_metric_value is undefined else None }}",
+ "max_metric_value": "{{ max_metric_value }}",
+ },
+ "stub_prefix_lsa": "{{ not not stub_prefix_lsa }}",
+ "on_startup": {
+ "set": "{{ True if on_startup is defined and (wait_period and bgp_asn) is undefined else None }}",
+ "wait_period": "{{ wait_period }}",
+ "wait_for_bgp_asn": "{{ bgp_asn }}",
+ },
+ "inter_area_prefix_lsa": {
+ "set": "{{ True if inter_area_prefix_lsa is defined and max_metric_summary_lsa is undefined else None }}",
+ "max_metric_value": "{{ max_metric_summary_lsa }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "name_lookup",
+ "getval": re.compile(
+ r"""
+ \s+(?P<name_lookup>name-lookup)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("name-lookup"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "name_lookup": "{{ not not name_lookup }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "passive_interface.default",
+ "getval": re.compile(
+ r"""
+ \s+passive-interface
+ \s+(?P<default>default)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("passive-interface default"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "passive_interface": {"default": "{{ not not default }}"},
+ },
+ },
+ },
+ },
+ {
+ "name": "shutdown",
+ "getval": re.compile(
+ r"""
+ \s+(?P<shutdown>shutdown)$""",
+ re.VERBOSE,
+ ),
+ "setval": ("shutdown"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "shutdown": "{{ not not shutdown }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.lsa_arrival",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \slsa-arrival
+ \s(?P<lsa_arrival_val>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": ("timers lsa-arrival {{ timers.lsa_arrival }}"),
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "lsa_arrival": "{{ lsa_arrival_val }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.lsa_group_pacing",
+ "getval": re.compile(
+ r"""
+ \s+timers
+ \slsa-group-pacing
+ \s(?P<lsa_group_pacing>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers lsa-group-pacing {{ timers.lsa_group_pacing }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "lsa_group_pacing": "{{ lsa_group_pacing }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "timers.throttle.lsa",
+ "getval": re.compile(
+ r"""
+ \s+timers\sthrottle\slsa
+ \s(?P<start>\d+)
+ \s(?P<hold>\d+)
+ \s(?P<max>\d+)
+ $""",
+ re.VERBOSE,
+ ),
+ "setval": "timers throttle lsa {{ timers.throttle.lsa.start_interval }}"
+ " {{ timers.throttle.lsa.hold_interval }}"
+ " {{ timers.throttle.lsa.max_interval }}",
+ "result": {
+ "vrfs": {
+ '{{ "vrf_" + vrf|d() }}': {
+ "timers": {
+ "throttle": {
+ "lsa": {
+ "start_interval": "{{ start }}",
+ "hold_interval": "{{ hold }}",
+ "max_interval": "{{ max }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/prefix_lists.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/prefix_lists.py
new file mode 100644
index 00000000..0bb66ead
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/prefix_lists.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Prefix_lists parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+class Prefix_listsTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Prefix_listsTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "entry",
+ "getval": re.compile(
+ r"""
+ ^(?P<afi>ip|ipv6)
+ \sprefix-list
+ \s(?P<name>\S+)
+ \sseq\s(?P<sequence>\d+)
+ \s(?P<action>permit|deny)
+ \s(?P<prefix>\S+)
+ (\seq\s(?P<eq>\d+))?
+ (\sge\s(?P<ge>\d+))?
+ (\sle\s(?P<le>\d+))?
+ (\smask\s(?P<mask>\S+))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'ip' if afi == 'ipv4' else afi }} prefix-list {{ name }}"
+ "{{ (' seq ' + sequence|string) if sequence|d('') else '' }}"
+ " {{ action }}"
+ " {{ prefix }}"
+ "{{ (' eq ' + eq|string) if eq|d('') else '' }}"
+ "{{ (' ge ' + ge|string) if ge|d('') else '' }}"
+ "{{ (' le ' + le|string) if le|d('') else '' }}"
+ "{{ (' mask ' + mask) if mask|d('') else '' }}",
+ "result": {
+ "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "prefix_lists": {
+ "{{ name }}": {
+ "name": "{{ name }}",
+ "entries": [
+ {
+ "sequence": "{{ sequence|d(None) }}",
+ "action": "{{ action }}",
+ "prefix": "{{ prefix }}",
+ "eq": "{{ eq }}",
+ "ge": "{{ ge }}",
+ "le": "{{ le }}",
+ "mask": "{{ mask }}",
+ },
+ ],
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "description",
+ "getval": re.compile(
+ r"""
+ ^(?P<afi>ip|ipv6)
+ \sprefix-list
+ \s(?P<name>\S+)
+ \sdescription\s(?P<description>.+)\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "{{ 'ip' if afi == 'ipv4' else afi }} prefix-list {{ name }} description {{ description }}",
+ "result": {
+ "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}": {
+ "afi": "{{ 'ipv4' if afi == 'ip' else 'ipv6' }}",
+ "prefix_lists": {
+ "{{ name }}": {
+ "name": "{{ name }}",
+ "description": "{{ description }}",
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/route_maps.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/route_maps.py
new file mode 100644
index 00000000..8267a905
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/route_maps.py
@@ -0,0 +1,1367 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Route_maps parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _tmplt_match_ip_multicast(data):
+ cmd = "match ip multicast"
+ multicast = data["match"]["ip"]["multicast"]
+
+ if "source" in multicast:
+ cmd += " source {source}".format(**multicast)
+
+ if "prefix" in multicast.get("group", {}):
+ cmd += " group {prefix}".format(**multicast["group"])
+ else:
+ if "first" in multicast.get("group_range", {}):
+ cmd += " group-range {first}".format(**multicast["group_range"])
+ if "last" in multicast.get("group_range", {}):
+ cmd += " to {last}".format(**multicast["group_range"])
+
+ if "rp" in multicast:
+ cmd += " rp {prefix}".format(**multicast["rp"])
+ if "rp_type" in multicast["rp"]:
+ cmd += " rp-type {rp_type}".format(**multicast["rp"])
+
+ return cmd
+
+
+def _tmplt_match_ipv6_multicast(data):
+ cmd = "match ipv6 multicast"
+ multicast = data["match"]["ipv6"]["multicast"]
+
+ if "source" in multicast:
+ cmd += " source {source}".format(**multicast)
+
+ if "prefix" in multicast.get("group", {}):
+ cmd += " group {prefix}".format(**multicast["group"])
+ else:
+ if "first" in multicast.get("group_range", {}):
+ cmd += " group-range {first}".format(**multicast["group_range"])
+ if "last" in multicast.get("group_range", {}):
+ cmd += " to {last}".format(**multicast["group_range"])
+
+ if "rp" in multicast:
+ cmd += " rp {prefix}".format(**multicast["rp"])
+ if "rp_type" in multicast["rp"]:
+ cmd += " rp-type {rp_type}".format(**multicast["rp"])
+
+ return cmd
+
+
+def _tmplt_set_metric(data):
+ cmd = "set metric"
+ metric = data["set"]["metric"]
+
+ for x in [
+ "bandwidth",
+ "igrp_delay_metric",
+ "igrp_reliability_metric",
+ "igrp_effective_bandwidth_metric",
+ "igrp_mtu",
+ ]:
+ if x in metric:
+ cmd += " {0}".format(metric[x])
+
+ return cmd
+
+
+class Route_mapsTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Route_mapsTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "route_map",
+ "getval": re.compile(
+ r"""
+ ^route-map\s(?P<route_map>\S+)\s(?P<action>\S+)\s(?P<sequence>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "route-map {{ route_map }}"
+ "{{ ' ' + action if action is defined else '' }}"
+ "{{ ' ' + sequence|string if sequence is defined else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "route_map": "{{ route_map }}",
+ "entries": {
+ "{{ sequence }}": {
+ "sequence": "{{ sequence }}",
+ "action": "{{ action }}",
+ },
+ },
+ },
+ },
+ "shared": True,
+ },
+ {
+ "name": "continue_sequence",
+ "getval": re.compile(
+ r"""
+ \s+continue\s(?P<continue_sequence>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "continue {{ continue_sequence }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "continue_sequence": "{{ continue_sequence }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "description",
+ "getval": re.compile(
+ r"""
+ \s+description\s(?P<description>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "description {{ description }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "description": "{{ description }}",
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.as_number.asn",
+ "getval": re.compile(
+ r"""
+ \s+match\sas-number
+ (?!\sas-path-list)
+ \s(?P<asn>.+)\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match as-number {{ match.as_number.asn|join(', ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "route_map": "{{ route_map }}",
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "as_number": {
+ "asn": "{{ asn.rstrip().split(', ') }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.as_number.as_path_list",
+ "getval": re.compile(
+ r"""
+ \s+match\sas-number
+ \sas-path-list\s(?P<as_path_list>.+)\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match as-number as-path-list {{ match.as_number.as_path_list|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "as_number": {
+ "as_path_list": "{{ as_path_list.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.as_path",
+ "getval": re.compile(
+ r"""
+ \s+match\sas-path\s(?P<as_path>.+)\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match as-path {{ match.as_path|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "as_path": "{{ as_path.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.community.community_list",
+ "getval": re.compile(
+ r"""
+ \s+match\scommunity
+ \s(?P<community_list>.+)
+ (\s(?P<exact_match>exact-match))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match community {{ match.community.community_list|join(' ') }}{{ ' exact-match' if match.community.exact_match|d(False) else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "community": {
+ "community_list": "{{ community_list.split() }}",
+ "exact_match": "{{ not not exact_match }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.evpn.route_types",
+ "getval": re.compile(
+ r"""
+ \s+match\sevpn
+ \sroute-type
+ \s(?P<route_types>.+)\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match evpn route-type {{ match.evpn.route_types|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "evpn": {
+ "route_types": "{{ route_types.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.extcommunity.extcommunity_list",
+ "getval": re.compile(
+ r"""
+ \s+match\sextcommunity
+ \s(?P<extcommunity_list>.+)
+ \s(?P<exact_match>exact-match)?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match extcommunity {{ match.extcommunity.extcommunity_list|join(' ') }}"
+ "{{ ' exact-match' if match.extcommunity.exact_match|d(False) else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "extcommunity": {
+ "extcommunity_list": "{{ extcommunity_list.split() }}",
+ "exact_match": "{{ not not exact_match }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.interfaces",
+ "getval": re.compile(
+ r"""
+ \s+match\sinterface
+ \s(?P<interfaces>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match interface {{ match.interfaces|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "interfaces": "{{ interfaces.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ip.address.access_list",
+ "getval": re.compile(
+ r"""
+ \s+match\sip\saddress
+ \s(?P<access_list>\S+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ip address {{ match.ip.address.access_list }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ip": {
+ "address": {
+ "access_list": "{{ access_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ip.address.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sip\saddress
+ \sprefix-list
+ \s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ip address prefix-list {{ match.ip.address.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ip": {
+ "address": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ # match ip multicast source 192.1.2.0/24 group-range 239.0.0.1 to 239.255.255.255 rp 209.165.201.0/27 rp-type Bidir
+ {
+ "name": "match.ip.multicast",
+ "getval": re.compile(
+ r"""
+ \s+match\sip\smulticast
+ (\ssource\s(?P<source>\S+))?
+ (\sgroup\s(?P<prefix>\S+))?
+ (\sgroup-range
+ (\s(?P<first>\S+))?
+ (\sto)?
+ (\s(?P<last>\S+)))?
+ (\srp\s(?P<rp>\S+))?
+ (\srp-type\s(?P<rp_type>\S+))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_match_ip_multicast,
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ip": {
+ "multicast": {
+ "group": {
+ "prefix": "{{ prefix }}",
+ },
+ "group_range": {
+ "first": "{{ first }}",
+ "last": "{{ last }}",
+ },
+ "rp": {
+ "prefix": "{{ rp }}",
+ "rp_type": "{{ rp_type }}",
+ },
+ "source": "{{ source }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ip.next_hop.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sip\snext-hop
+ \sprefix-list\s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ip next-hop prefix-list {{ match.ip.next_hop.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ip": {
+ "next_hop": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ip.route_source.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sip\sroute-source
+ \sprefix-list\s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ip route-source prefix-list {{ match.ip.route_source.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ip": {
+ "route_source": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ipv6.address.access_list",
+ "getval": re.compile(
+ r"""
+ \s+match\sipv6\saddress
+ \s(?P<access_list>\S+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ipv6 address {{ match.ipv6.address.access_list }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ipv6": {
+ "address": {
+ "access_list": "{{ access_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ipv6.address.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sipv6\saddress
+ \sprefix-list
+ \s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ipv6 address prefix-list {{ match.ipv6.address.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ipv6": {
+ "address": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ipv6.multicast",
+ "getval": re.compile(
+ r"""
+ \s+match\sipv6\smulticast
+ (\ssource\s(?P<source>\S+))?
+ (\sgroup\s(?P<prefix>\S+))?
+ (\sgroup-range
+ (\s(?P<first>\S+))?
+ (\sto)?
+ (\s(?P<last>\S+)))?
+ (\srp\s(?P<rp>\S+))?
+ (\srp-type\s(?P<rp_type>\S+))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_match_ipv6_multicast,
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ipv6": {
+ "multicast": {
+ "group": {
+ "prefix": "{{ prefix }}",
+ },
+ "group_range": {
+ "first": "{{ first }}",
+ "last": "{{ last }}",
+ },
+ "rp": {
+ "prefix": "{{ rp }}",
+ "rp_type": "{{ rp_type }}",
+ },
+ "source": "{{ source }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ipv6.next_hop.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sipv6\snext-hop
+ \sprefix-list\s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ipv6 next-hop prefix-list {{ match.ipv6.next_hop.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ipv6": {
+ "next_hop": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ipv6.route_source.prefix_lists",
+ "getval": re.compile(
+ r"""
+ \s+match\sipv6\sroute-source
+ \sprefix-list\s(?P<prefix_lists>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ipv6 route-source prefix-list {{ match.ipv6.route_source.prefix_lists|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ipv6": {
+ "route_source": {
+ "prefix_lists": "{{ prefix_lists.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.mac_list",
+ "getval": re.compile(
+ r"""
+ \s+match\smac-list
+ \s(?P<mac_list>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match mac-list {{ match.mac_list|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "mac_list": "{{ mac_list.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.metric",
+ "getval": re.compile(
+ r"""
+ \s+match\smetric
+ \s(?P<metric>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match metric {{ match.metric|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "metric": "{{ metric.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.ospf_area",
+ "getval": re.compile(
+ r"""
+ \s+match\sospf-area
+ \s(?P<ospf_area>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match ospf-area {{ match.ospf_area|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "ospf_area": "{{ ospf_area.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.route_types",
+ "getval": re.compile(
+ r"""
+ \s+match\sroute-type
+ \s(?P<route_types>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match route-type {{ match.route_types|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "route_types": "{{ route_types.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.source_protocol",
+ "getval": re.compile(
+ r"""
+ \s+match\ssource-protocol
+ \s(?P<route_type>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match source-protocol {{ match.source_protocol|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "source_protocol": "{{ source_protocol.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "match.tags",
+ "getval": re.compile(
+ r"""
+ \s+match\stag
+ \s(?P<tags>.+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "match tag {{ match.tags|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "match": {
+ "tags": "{{ tags.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.as_path.prepend.as_number",
+ "getval": re.compile(
+ r"""
+ \s+set\sas-path\sprepend
+ \s(?P<as_number>(?!last-as).+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set as-path prepend {{ set.as_path.prepend.as_number|join(' ') }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "as_path": {
+ "prepend": {
+ "as_number": "{{ as_number.split() }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.as_path.prepend.last_as",
+ "getval": re.compile(
+ r"""
+ \s+set\sas-path\sprepend
+ \slast-as\s(?P<last_as>\d+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set as-path prepend last-as {{ set.as_path.prepend.last_as|string }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "as_path": {
+ "prepend": {
+ "last_as": "{{ last_as }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.as_path.tag",
+ "getval": re.compile(
+ r"""
+ \s+set\sas-path
+ \s(?P<tag>tag)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set as-path tag",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "as_path": {
+ "tag": "{{ not not tag }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.comm_list",
+ "getval": re.compile(
+ r"""
+ \s+set\scomm-list
+ \s(?P<comm_list>\S+)
+ \s*delete
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set comm-list {{ set.comm_list }} delete",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "comm_list": "{{ comm_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.community",
+ "getval": re.compile(
+ r"""
+ \s+set\scommunity
+ (\s(?P<internet>internet))?
+ (?P<number>(\s\d+:\d+)*)
+ (\s(?P<no_export>no-export))?
+ (\s(?P<no_advertise>no-advertise))?
+ (\s(?P<local_as>local-AS))?
+ (\s(?P<graceful_shutdown>graceful-shutdown))?
+ (\s(?P<additive>additive))?\s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set community"
+ "{{ ' internet' if set.community.internet|d(False) else '' }}"
+ "{{ ' ' + set.community.number|join(' ') if set.community.number|d(False) else '' }}"
+ "{{ ' no-export' if set.community.no_export|d(False) else '' }}"
+ "{{ ' no-advertise' if set.community.no_advertise|d(False) else '' }}"
+ "{{ ' local-AS' if set.community.local_as|d(False) else '' }}"
+ "{{ ' graceful-shutdown' if set.community.graceful_shutdown|d(False) else '' }}"
+ "{{ ' additive' if set.community.additive|d(False) else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "community": {
+ "internet": "{{ not not internet }}",
+ "number": "{{ number.split() }}",
+ "no_export": "{{ not not no_export }}",
+ "no_advertise": "{{ not not no_advertise }}",
+ "local_as": "{{ not not local_as }}",
+ "graceful_shutdown": "{{ not not graceful_shutdown }}",
+ "additive": "{{ not not additive }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.dampening",
+ "getval": re.compile(
+ r"""
+ \s+set\sdampening
+ \s(?P<half_life>\d+)
+ \s(?P<start_reuse_route>\d+)
+ \s(?P<start_suppress_route>\d+)
+ \s(?P<max_suppress_time>\d+)
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set dampening {{ set.dampening.half_life }}"
+ " {{ set.dampening.start_reuse_route }}"
+ " {{ set.dampening.start_suppress_route }}"
+ " {{ set.dampening.max_suppress_time }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "dampening": {
+ "half_life": "{{ half_life }}",
+ "start_reuse_route": "{{ start_reuse_route }}",
+ "start_suppress_route": "{{ start_suppress_route }}",
+ "max_suppress_time": "{{ max_suppress_time }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.distance",
+ "getval": re.compile(
+ r"""
+ \s+set\sdistance
+ \s(?P<igp_ebgp_routes>\d+)
+ (\s(?P<internal_routes>\d+))?
+ (\s(?P<local_routes>\d+))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set distance {{ set.distance.igp_ebgp_routes }}"
+ "{{ ' ' + set.distance.internal_routes|string if set.distance.internal_routes|d(False) else '' }}"
+ "{{ ' ' + set.distance.local_routes|string if set.distance.internal_routes|d(False) else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "distance": {
+ "igp_ebgp_routes": "{{ igp_ebgp_routes }}",
+ "internal_routes": "{{ internal_routes }}",
+ "local_routes": "{{ local_routes }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.evpn.gateway_ip",
+ "getval": re.compile(
+ r"""
+ \s+set\sevpn
+ \sgateway-ip
+ (\s(?P<ip>(?!use-nexthop)\S+))?
+ (\s(?P<use_nexthop>use-nexthop))?
+ \s*
+ $""", re.VERBOSE,
+ ),
+ "setval": "set evpn gateway-ip"
+ "{{ ' ' + set.evpn.gateway_ip.ip if set.evpn.gateway_ip.ip|d(False) else ''}}"
+ "{{ ' use-nexthop' if set.evpn.gateway_ip.use_nexthop|d(False) else '' }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "evpn": {
+ "gateway_ip": {
+ "ip": "{{ ip }}",
+ "use_nexthop": "{{ not not use_nexthop }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.extcomm_list",
+ "getval": re.compile(
+ r"""
+ \s+set\sextcomm-list
+ \s(?P<extcomm_list>\S+)
+ \s*delete
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set extcomm-list {{ set.extcomm_list }} delete",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "extcomm_list": "{{ extcomm_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.forwarding_address",
+ "getval": re.compile(
+ r"""
+ \s+set
+ \s(?P<forwarding_address>forwarding-address)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set forwarding-address",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "forwarding_address": "{{ not not forwarding_address }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.null_interface",
+ "getval": re.compile(
+ r"""
+ \s+set\sinterface
+ \s(?P<interface>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set interface {{ set.null_interface }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "null_interface": "{{ interface }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.ip.address.prefix_list",
+ "getval": re.compile(
+ r"""
+ \s+set\sip\saddress
+ \sprefix-list\s(?P<prefix_list>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set ip address prefix-list {{ set.ip.address.prefix_list }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "ip": {
+ "address": {
+ "prefix_list": "{{ prefix_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.ip.precedence",
+ "getval": re.compile(
+ r"""
+ \s+set\sip
+ \sprecedence\s(?P<precedence>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set ip precedence {{ set.ip.precedence }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "ip": {
+ "precedence": "{{ precedence }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.ipv6.address.prefix_list",
+ "getval": re.compile(
+ r"""
+ \s+set\sipv6\saddress
+ \sprefix-list\s(?P<prefix_list>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set ipv6 address prefix-list {{ set.ipv6.address.prefix_list }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "ipv6": {
+ "address": {
+ "prefix_list": "{{ prefix_list }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.ipv6.precedence",
+ "getval": re.compile(
+ r"""
+ \s+set\sipv6
+ \sprecedence\s(?P<precedence>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set ipv6 precedence {{ set.ipv6.precedence }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "ipv6": {
+ "precedence": "{{ precedence }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.label_index",
+ "getval": re.compile(
+ r"""
+ \s+set\slabel-index
+ \s(?P<label_index>\d+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set label-index {{ set.label_index }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "label_index": "{{ label_index }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.level",
+ "getval": re.compile(
+ r"""
+ \s+set\slevel
+ \s(?P<level>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set level {{ set.level }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "level": "{{ level }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.local_preference",
+ "getval": re.compile(
+ r"""
+ \s+set\slocal-preference
+ \s(?P<local_preference>\d+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set local-preference {{ set.local_preference }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "local_preference": "{{ local_preference }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.metric",
+ "getval": re.compile(
+ r"""
+ \s+set\smetric
+ \s(?P<bandwidth>\d+)
+ (\s(?P<igrp_delay_metric>\d+))?
+ (\s(?P<igrp_reliability_metric>\d+))?
+ (\s(?P<igrp_effective_bandwidth_metric>\d+))?
+ (\s(?P<igrp_mtu>\d+))?
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": _tmplt_set_metric,
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "metric": {
+ "bandwidth": "{{ bandwidth }}",
+ "igrp_delay_metric": "{{ igrp_delay_metric }}",
+ "igrp_reliability_metric": "{{ igrp_reliability_metric }}",
+ "igrp_effective_bandwidth_metric": "{{ igrp_effective_bandwidth_metric }}",
+ "igrp_mtu": "{{ igrp_mtu }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.metric_type",
+ "getval": re.compile(
+ r"""
+ \s+set\smetric-type
+ \s(?P<metric_type>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set metric-type {{ set.metric_type }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "metric_type": "{{ metric_type }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.nssa_only",
+ "getval": re.compile(
+ r"""
+ \s+set
+ \s(?P<nssa_only>nssa-only)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set nssa-only",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "nssa_only": "{{ not not nssa_only }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.origin",
+ "getval": re.compile(
+ r"""
+ \s+set\sorigin
+ \s(?P<origin>\S+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set origin {{ set.origin }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "origin": "{{ origin }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.path_selection",
+ "getval": re.compile(
+ r"""
+ \s+set\spath-selection
+ \s(?P<path_selection>\S+)
+ \sadvertise
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set path-selection {{ set.path_selection }} advertise",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "path_selection": "{{ path_selection }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.tag",
+ "getval": re.compile(
+ r"""
+ \s+set\stag
+ \s(?P<tag>\d+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set tag {{ set.tag }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "tag": "{{ tag }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "name": "set.weight",
+ "getval": re.compile(
+ r"""
+ \s+set\sweight
+ \s(?P<weight>\d+)
+ \s*$""", re.VERBOSE,
+ ),
+ "setval": "set weight {{ set.weight }}",
+ "result": {
+ "{{ route_map }}": {
+ "entries": {
+ "{{ sequence }}": {
+ "set": {
+ "weight": "{{ weight }}",
+ },
+ },
+ },
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/snmp_server.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/snmp_server.py
new file mode 100644
index 00000000..8615bb2c
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/rm_templates/snmp_server.py
@@ -0,0 +1,1550 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+"""
+The Snmp_server parser templates file. This contains
+a list of parser definitions and associated functions that
+facilitates both facts gathering and native command generation for
+the given network resource.
+"""
+
+import re
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
+ NetworkTemplate,
+)
+
+
+def _template_hosts(data):
+ cmd = "snmp-server host {0}".format(data["host"])
+ if data.get("traps"):
+ cmd += " traps"
+ if data.get("informs"):
+ cmd += " informs"
+ if data.get("use_vrf"):
+ cmd += " use-vrf {0}".format(data["use_vrf"])
+ if data.get("filter_vrf"):
+ cmd += " filter-vrf {0}".format(data["filter_vrf"])
+ if data.get("source_interface"):
+ cmd += " source-interface {0}".format(data["source_interface"])
+ if data.get("version"):
+ cmd += " version {0}".format(data["version"])
+ if data.get("community"):
+ cmd += " " + data["community"]
+ elif data.get("auth"):
+ cmd += " auth {0}".format(data["auth"])
+ elif data.get("priv"):
+ cmd += " priv {0}".format(data["priv"])
+ if data.get("udp_port"):
+ cmd += " udp-port {0}".format(data["udp_port"])
+
+ return cmd
+
+
+def _tmplt_users_auth(data):
+ cmd = "snmp-server user {0}".format(data["user"])
+
+ if "group" in data:
+ cmd += " {0}".format(data["group"])
+ if "authentication" in data:
+ auth = data["authentication"]
+ if "algorithm" in auth:
+ cmd += " auth {0}".format(auth["algorithm"])
+ if "password" in auth:
+ cmd += " {0}".format(auth["password"])
+ priv = auth.get("priv", {})
+ if priv:
+ cmd += " priv"
+ if priv.get("aes_128", False):
+ cmd += " aes-128"
+ if "privacy_password" in priv:
+ cmd += " {0}".format(priv["privacy_password"])
+ if auth.get("localized_key", False):
+ cmd += " localizedkey"
+ elif auth.get("localizedv2_key", False):
+ cmd += " localizedV2key"
+ if "engine_id" in auth:
+ cmd += " engineID {0}".format(auth["engine_id"])
+
+ return cmd
+
+
+def _template_communities(data):
+ cmd = "snmp-server community {0}".format(data["name"])
+
+ if "group" in data:
+ cmd += " group {0}".format(data["group"])
+ elif "use_ipv4acl" in data:
+ cmd += " use-ipv4acl {0}".format(data["use_ipv4acl"])
+ elif "use_ipv6acl" in data:
+ cmd += " use-ipv6acl {0}".format(data["use_ipv6acl"])
+ elif data.get("ro", False):
+ cmd += " ro"
+ elif data.get("rw", False):
+ cmd += " rw"
+
+ return cmd
+
+
+class Snmp_serverTemplate(NetworkTemplate):
+ def __init__(self, lines=None, module=None):
+ super(Snmp_serverTemplate, self).__init__(lines=lines, tmplt=self, module=module)
+
+ # fmt: off
+ PARSERS = [
+ {
+ "name": "aaa_user.cache_timeout",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\saaa-user
+ \scache-timeout\s(?P<cache_timeout>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server aaa-user cache-timeout {{ aaa_user.cache_timeout }}",
+ "result": {
+ "aaa_user": {
+ "cache_timeout": "{{ cache_timeout }}",
+ },
+ },
+ },
+ {
+ "name": "communities",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \scommunity\s(?P<community>\S+)
+ (\sgroup\s(?P<group>\S+))?
+ (\suse-ipv4acl\s(?P<use_ipv4acl>\S+))?
+ (\suse-ipv6acl\s(?P<use_ipv6acl>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": _template_communities,
+ "result": {
+ "communities": [
+ {
+ "name": "{{ community }}",
+ "group": "{{ group }}",
+ "use_ipv4acl": "{{ use_ipv4acl }}",
+ "use_ipv6acl": "{{ use_ipv6acl }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "contact",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \scontact\s(?P<contact>.+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server contact {{ contact }}",
+ "result": {
+ "contact": "{{ contact }}",
+ },
+ },
+ {
+ "name": "context",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \scontext\s(?P<name>\S+)
+ (\sinstance\s(?P<instance>\S+))?
+ (\svrf\s(?P<vrf>\S+))?
+ (\stopology\s(?P<topology>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server context {{ context.name }}"
+ "{{ ' instance ' + context.instance if context.instance is defined else '' }}"
+ "{{ ' topology ' + context.topology if context.topology is defined else '' }}"
+ "{{ ' vrf ' + context.vrf if context.vrf is defined else '' }}",
+ "result": {
+ "context": {
+ "name": "{{ name }}",
+ "instance": "{{ instance }}",
+ "vrf": "{{ vrf }}",
+ "topology": "{{ topology }}",
+ },
+
+ },
+ },
+ {
+ "name": "counter.enable",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \scounter
+ \scache\s(?P<enable>enable)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server counter cache enable",
+ "result": {
+ "counter": {
+ "cache": {
+ "enable": "{{ True if enable is defined else None }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "counter.cache.timeout",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \scounter
+ \scache\stimeout\s(?P<timeout>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server counter cache timeout {{ counter.cache.timeout }}",
+ "result": {
+ "counter": {
+ "cache": {
+ "timeout": "{{ timeout }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "drop.unknown_engine_id",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\sdrop
+ \s(?P<unknown_engine_id>unknown-engine-id)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server drop unknown-engine-id",
+ "result": {
+ "drop": {
+ "unknown_engine_id": "{{ not not unknown_engine_id }}",
+ },
+ },
+ },
+ {
+ "name": "drop.unknown_user",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\sdrop
+ \s(?P<unknown_user>unknown-user)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server drop unknown-user",
+ "result": {
+ "drop": {
+ "unknown_user": "{{ not not unknown_user }}",
+ },
+ },
+ },
+ {
+ "name": "traps.aaa",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps\saaa\s(?P<server_state_change>server-state-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps aaa"
+ "{{ ' server-state-change' if traps.aaa.server_state_change|d(False) else ''}}",
+ "result": {
+ "traps": {
+ "aaa": {
+ "server_state_change": "{{ not not server_state_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.bgp",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps\s(?P<enable>bgp)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps bgp",
+ "result": {
+ "traps": {
+ "bgp": {
+ "enable": "{{ not not enable }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.bridge.newroot",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sbridge\s(?P<newroot>newroot)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps bridge newroot",
+ "result": {
+ "traps": {
+ "bridge": {
+ "newroot": "{{ not not newroot }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.bridge.topologychange",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sbridge\s(?P<topologychange>topologychange)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps bridge topologychange",
+ "result": {
+ "traps": {
+ "bridge": {
+ "topologychange": "{{ not not topologychange }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.callhome.event_notify",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \scallhome\s(?P<event_notify>event-notify)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps callhome event-notify",
+ "result": {
+ "traps": {
+ "callhome": {
+ "event_notify": "{{ not not event_notify }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.callhome.smtp_send_fail",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \scallhome\s(?P<smtp_send_fail>smtp-send-fail)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps callhome smtp-send-fail",
+ "result": {
+ "traps": {
+ "callhome": {
+ "smtp_send_fail": "{{ not not smtp_send_fail }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.cfs.merge_failure",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \scfs\s(?P<merge_failure>merge-failure)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps cfs merge-failure",
+ "result": {
+ "traps": {
+ "cfs": {
+ "merge_failure": "{{ not not merge_failure }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.cfs.state_change_notif",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \scfs\s(?P<state_change_notif>state-change-notif)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps cfs state-change-notif",
+ "result": {
+ "traps": {
+ "cfs": {
+ "state_change_notif": "{{ not not state_change_notif }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.config.ccmCLIRunningConfigChanged",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sconfig\s(?P<ccmCLIRunningConfigChanged>ccmCLIRunningConfigChanged)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps config ccmCLIRunningConfigChanged",
+ "result": {
+ "traps": {
+ "config": {
+ "ccmCLIRunningConfigChanged": "{{ not not ccmCLIRunningConfigChanged }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.cefcMIBEnableStatusNotification",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<cefcMIBEnableStatusNotification>cefcMIBEnableStatusNotification)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity cefcMIBEnableStatusNotification",
+ "result": {
+ "traps": {
+ "entity": {
+ "cefcMIBEnableStatusNotification": "{{ not not cefcMIBEnableStatusNotification }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_fan_status_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_fan_status_change>entity-fan-status-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-fan-status-change",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_fan_status_change": "{{ not not entity_fan_status_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_mib_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_mib_change>entity-mib-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-mib-change",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_mib_change": "{{ not not entity_mib_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_module_inserted",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_module_inserted>entity-module-inserted)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-module-inserted",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_module_inserted": "{{ not not entity_module_inserted }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_module_status_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_module_status_change>entity-module-status-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-module-status-change",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_module_status_change": "{{ not not entity_module_status_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_power_out_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_power_out_change>entity-power-out-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-power-out-change",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_power_out_change": "{{ not not entity_power_out_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_power_status_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_power_status_change>entity_power_status_change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-power-status-change",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_power_status_change": "{{ not not entity_power_status_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_sensor",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_sensor>entity-sensor)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-sensor",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_sensor": "{{ not not entity_sensor }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.entity.entity_unrecognised_module",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sentity\s(?P<entity_unrecognised_module>entity-unrecognised-module)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps entity entity-unrecognised-module",
+ "result": {
+ "traps": {
+ "entity": {
+ "entity_unrecognised_module": "{{ not not entity_unrecognised_module }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.feature_control.featureOpStatusChange",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sfeature-control\s(?P<featureOpStatusChange>featureOpStatusChange)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps feature-control featureOpStatusChange",
+ "result": {
+ "traps": {
+ "feature_control": {
+ "featureOpStatusChange": "{{ not not featureOpStatusChange }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.feature_control.ciscoFeatOpStatusChange",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sfeature-control\s(?P<ciscoFeatOpStatusChange>ciscoFeatOpStatusChange)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps feature-control ciscoFeatOpStatusChange",
+ "result": {
+ "traps": {
+ "feature_control": {
+ "ciscoFeatOpStatusChange": "{{ not not ciscoFeatOpStatusChange }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.generic.coldStart",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sgeneric\s(?P<coldStart>coldStart)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps generic coldStart",
+ "result": {
+ "traps": {
+ "generic": {
+ "coldStart": "{{ not not coldStart }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.generic.warmStart",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sgeneric\s(?P<warmStart>warmStart)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps generic warmStart",
+ "result": {
+ "traps": {
+ "generic": {
+ "warmStart": "{{ not not warmStart }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.license.notify_license_expiry",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slicense\s(?P<notify_license_expiry>notify_license_expiry)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps license notify-license-expiry",
+ "result": {
+ "traps": {
+ "license": {
+ "notify_license_expiry": "{{ not not notify_license_expiry }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.license.notify_license_expiry_warning",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slicense\s(?P<notify_license_expiry_warning>notify-license-expiry-warning)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps license notify-license-expiry-warning",
+ "result": {
+ "traps": {
+ "license": {
+ "notify_license_expiry_warning": "{{ not not notify_license_expiry_warning }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.license.notify_licensefile_missing",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slicense\s(?P<notify_licensefile_missing>notify-licensefile-missing)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps license notify-licensefile-missing",
+ "result": {
+ "traps": {
+ "license": {
+ "notify_licensefile_missing": "{{ not not notify_licensefile_missing }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.license.notify_no_license_for_feature",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slicense\s(?P<notify_no_license_for_feature>notify-no-license-for-feature)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps license notify-no-license-for-feature",
+ "result": {
+ "traps": {
+ "license": {
+ "notify_no_license_for_feature": "{{ not not notify_no_license_for_feature }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.cErrDisableInterfaceEventRev1",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<cErrDisableInterfaceEventRev1>cErrDisableInterfaceEventRev1)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link cErrDisableInterfaceEventRev1",
+ "result": {
+ "traps": {
+ "link": {
+ "cErrDisableInterfaceEventRev1": "{{ not not cErrDisableInterfaceEventRev1 }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.cieLinkDown",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<cieLinkDown>cieLinkDown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link cieLinkDown",
+ "result": {
+ "traps": {
+ "link": {
+ "cieLinkDown": "{{ not not cieLinkDown }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.cieLinkUp",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<cieLinkUp>cieLinkUp)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link cieLinkUp",
+ "result": {
+ "traps": {
+ "link": {
+ "cieLinkUp": "{{ not not cieLinkUp }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.cisco_xcvr_mon_status_chg",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<cisco_xcvr_mon_status_chg>cisco-xcvr-mon-status-chg)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link cisco-xcvr-mon-status-chg",
+ "result": {
+ "traps": {
+ "link": {
+ "cisco_xcvr_mon_status_chg": "{{ not not cisco_xcvr_mon_status_chg }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.cmn_mac_move_notification",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<cmn_mac_move_notification>cmn-mac-move-notification)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link cmn-mac-move-notification",
+ "result": {
+ "traps": {
+ "link": {
+ "cmn_mac_move_notification": "{{ not not cmn_mac_move_notification }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.delayed_link_state_change",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<delayed_link_state_change>delayed-link-state-change)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link delayed-link-state-change",
+ "result": {
+ "traps": {
+ "link": {
+ "delayed_link_state_change": "{{ not not delayed_link_state_change }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.extended_linkDown",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<extended_linkDown>extended-linkDown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link extended-linkDown",
+ "result": {
+ "traps": {
+ "link": {
+ "extended_linkDown": "{{ not not extended_linkDown }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.extended_linkUp",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<extended_linkUp>extended-linkUp)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link extended-linkUp",
+ "result": {
+ "traps": {
+ "link": {
+ "extended_linkUp": "{{ not not extended_linkUp }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.linkDown",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<linkDown>linkDown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link linkDown",
+ "result": {
+ "traps": {
+ "link": {
+ "linkDown": "{{ not not linkDown }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.link.linkUp",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \slink\s(?P<linkUp>linkUp)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps link linkUp",
+ "result": {
+ "traps": {
+ "link": {
+ "linkUp": "{{ not not linkUp }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.mmode.cseMaintModeChangeNotify",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \smmode\s(?P<cseMaintModeChangeNotify>cseMaintModeChangeNotify)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps mmode cseMaintModeChangeNotify",
+ "result": {
+ "traps": {
+ "mmode": {
+ "cseMaintModeChangeNotify": "{{ not not cseMaintModeChangeNotify }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.mmode.cseNormalModeChangeNotify",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \smmode\s(?P<cseNormalModeChangeNotify>cseNormalModeChangeNotify)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps mmode cseNormalModeChangeNotify",
+ "result": {
+ "traps": {
+ "mmode": {
+ "cseNormalModeChangeNotify": "{{ not not cseNormalModeChangeNotify }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.ospf",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps\s(?P<enable>ospf)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps ospf",
+ "result": {
+ "traps": {
+ "ospf": {
+ "enable": "{{ not not enable }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.ospfv3",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps\s(?P<enable>ospfv3)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps ospfv3",
+ "result": {
+ "traps": {
+ "ospfv3": {
+ "enable": "{{ not not enable }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.rf.redundancy_framework",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \srf\s(?P<redundancy_framework>redundancy-framework)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps rf redundancy-framework",
+ "result": {
+ "traps": {
+ "rf": {
+ "redundancy_framework": "{{ not not redundancy_framework }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.rmon.fallingAlarm",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \srmon\s(?P<fallingAlarm>fallingAlarm)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps rmon fallingAlarm",
+ "result": {
+ "traps": {
+ "rmon": {
+ "fallingAlarm": "{{ not not fallingAlarm }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.rmon.hcFallingAlarm",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \srmon\s(?P<hcFallingAlarm>hcFallingAlarm)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps rmon hcFallingAlarm",
+ "result": {
+ "traps": {
+ "rmon": {
+ "hcFallingAlarm": "{{ not not hcFallingAlarm }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.rmon.hcRisingAlarm",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \srmon\s(?P<hcRisingAlarm>hcRisingAlarm)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps rmon hcRisingAlarm",
+ "result": {
+ "traps": {
+ "rmon": {
+ "hcRisingAlarm": "{{ not not hcRisingAlarm }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.rmon.risingAlarm",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \srmon\s(?P<risingAlarm>risingAlarm)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps rmon risingAlarm",
+ "result": {
+ "traps": {
+ "rmon": {
+ "risingAlarm": "{{ not not risingAlarm }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.snmp.authentication",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \ssnmp\s(?P<authentication>authentication)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps snmp authentication",
+ "result": {
+ "traps": {
+ "snmp": {
+ "authentication": "{{ not not authentication }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.storm_control.cpscEventRev1",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sstorm-control\s(?P<cpscEventRev1>cpscEventRev1)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps storm-control cpscEventRev1",
+ "result": {
+ "traps": {
+ "storm_control": {
+ "cpscEventRev1n": "{{ not not cpscEventRev1 }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.storm_control.trap_rate",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sstorm-control\s(?P<trap_rate>trap-rate)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps storm-control trap-rate",
+ "result": {
+ "traps": {
+ "storm_control": {
+ "trap_rate": "{{ not not trap_rate }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.stpx.inconsistency",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sstpx\s(?P<inconsistency>inconsistency)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps stpx inconsistency",
+ "result": {
+ "traps": {
+ "stpx": {
+ "inconsistency": "{{ not not inconsistency }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.stpx.root_inconsistency",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sstpx\s(?P<root_inconsistency>root-inconsistency)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps stpx root-inconsistency",
+ "result": {
+ "traps": {
+ "stpx": {
+ "root_inconsistency": "{{ not not root_inconsistency }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.stpx.loop_inconsistency",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \sstpx\s(?P<loop_inconsistency>loop-inconsistency)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps stpx loop-inconsistency",
+ "result": {
+ "traps": {
+ "stpx": {
+ "loop_inconsistency": "{{ not not loop_inconsistency }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.syslog.message_generated",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \ssyslog\s(?P<message_generated>message-generated)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps syslog message-generated",
+ "result": {
+ "traps": {
+ "syslog": {
+ "message_generated": "{{ not not message_generated }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.sysmgr.cseFailSwCoreNotifyExtended",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \ssysmgr\s(?P<cseFailSwCoreNotifyExtended>cseFailSwCoreNotifyExtended)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps sysmgr cseFailSwCoreNotifyExtended",
+ "result": {
+ "traps": {
+ "sysmgr": {
+ "cseFailSwCoreNotifyExtended": "{{ not not cseFailSwCoreNotifyExtended }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.system.clock_change_notification",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \ssystem\s(?P<clock_change_notification>Clock-change-notification)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps system Clock-change-notification",
+ "result": {
+ "traps": {
+ "system": {
+ "clock_change_notification": "{{ not not clock_change_notification }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.upgrade.upgradeJobStatusNotify",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \supgrade\s(?P<upgradeJobStatusNotify>upgradeJobStatusNotify)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps upgrade upgradeJobStatusNotify",
+ "result": {
+ "traps": {
+ "upgrade": {
+ "upgradeJobStatusNotify": "{{ not not upgradeJobStatusNotify }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.upgrade.upgradeOpNotifyOnCompletion",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \supgrade\s(?P<upgradeOpNotifyOnCompletion>upgradeOpNotifyOnCompletion)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps upgrade upgradeOpNotifyOnCompletion",
+ "result": {
+ "traps": {
+ "upgrade": {
+ "upgradeOpNotifyOnCompletion": "{{ not not upgradeOpNotifyOnCompletion }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.vtp.notifs",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \svtp\s(?P<notifs>notifs)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps vtp notifs",
+ "result": {
+ "traps": {
+ "vtp": {
+ "notifs": "{{ not not notifs }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.vtp.vlancreate",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \svtp\s(?P<vlancreate>vlancreate)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps vtp vlancreate",
+ "result": {
+ "traps": {
+ "vtp": {
+ "vlancreate": "{{ not not vlancreate }}",
+ },
+ },
+ },
+ },
+ {
+ "name": "traps.vtp.vlandelete",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\senable
+ \straps
+ \svtp\s(?P<vlandelete>vlandelete)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server enable traps vtp vlandelete",
+ "result": {
+ "traps": {
+ "vtp": {
+ "vlandelete": "{{ not not vlandelete }}",
+ },
+ },
+ },
+ },
+
+ {
+ "name": "engine_id.local",
+ "getval": re.compile(
+ r"""
+ ^snmp-server\sengineID
+ \slocal\s(?P<local>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server engineID local {{ engine_id.local }}",
+ "result": {
+ "engine_id": {
+ "local": "{{ local }}",
+ },
+ },
+ },
+ {
+ "name": "global_enforce_priv",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \s(?P<global_enforce_priv>globalEnforcePriv)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server globalEnforcePriv",
+ "result": {
+ "global_enforce_priv": "{{ not not global_enforce_priv }}",
+ },
+ },
+ {
+ "name": "hosts",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \shost\s(?P<host>\S+)
+ (\s((?P<traps>traps)|(?P<informs>informs)|(use-vrf\s(?P<use_vrf>\S+)|(filter-vrf\s(?P<filter_vrf>\S+))|(source-interface\s(?P<source_interface>\S+)))))
+ (\sversion\s(?P<version>\S+))?
+ (\s((auth\s(?P<auth>\S+))|(priv\s(?P<priv>\S+))|((?P<community>\S+))))?
+ (\sudp-port\s(?P<udp_port>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": _template_hosts,
+ "result": {
+ "hosts": [
+ {
+ "host": "{{ host }}",
+ "community": "{{ community }}",
+ "filter_vrf": "{{ filter_vrf }}",
+ "informs": "{{ not not informs }}",
+ "source_interface": "{{ source_interface }}",
+ "traps": "{{ not not traps }}",
+ "use_vrf": "{{ use_vrf }}",
+ "version": "{{ version }}",
+ "udp_port": "{{ udp_port }}",
+ "auth": "{{ auth }}",
+ "priv": "{{ priv }}",
+ },
+ ],
+ },
+ },
+ {
+ "name": "location",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \slocation\s(?P<location>.+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server location {{ location }}",
+ "result": {
+ "location": "{{ location }}",
+ },
+ },
+ {
+ "name": "mib.community_map",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \smib
+ \scommunity-map\s(?P<community>\S+)
+ \scontext\s(?P<context>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server mib community-map {{ mib.community_map.community }} context {{ mib.community_map.context }}",
+ "result": {
+ "mib": {
+ "community_map": {
+ "community": "{{ community }}",
+ "context": "{{ context }}",
+
+ },
+ },
+ },
+ },
+ {
+ "name": "packetsize",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \spacketsize\s(?P<packetsize>\d+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server packetsize {{ packetsize }}",
+ "result": {
+ "packetsize": "{{ packetsize }}",
+ },
+ },
+ {
+ "name": "protocol.enable",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \sprotocol\s(?P<enable>enable)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server protocol enable",
+ "result": {
+ "protocol": {
+ "enable": "{{ not not enable }}",
+ },
+ },
+ },
+ {
+ "name": "source_interface.informs",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \ssource-interface\sinforms\s(?P<informs>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server source-interface informs {{ source_interface.informs }}",
+ "result": {
+ "source_interface": {
+ "informs": "{{ informs }}",
+ },
+ },
+ },
+ {
+ "name": "source_interface.traps",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \ssource-interface\straps\s(?P<traps>\S+)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server source-interface traps {{ source_interface.traps }}",
+ "result": {
+ "source_interface": {
+ "traps": "{{ traps }}",
+ },
+ },
+ },
+ {
+ "name": "system_shutdown",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \s(?P<system_shutdown>system-shutdown)
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server system-shutdown",
+ "result": {
+ "system_shutdown": "{{ not not system_shutdown }}",
+ },
+ },
+ {
+ "name": "tcp_session",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \s(?P<tcp_session>tcp-session)
+ (\s(?P<auth>auth))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server tcp-session"
+ "{{ ' auth' if tcp_session.auth|d(False) else '' }}",
+ "result": {
+ "tcp_session": {
+ "enable": "{{ True if tcp_session is defined and auth is not defined else None }}",
+ "auth": "{{ not not auth }}",
+ },
+ },
+ },
+ {
+ "name": "users.auth",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \suser\s(?P<user>\S+)
+ (\s(?P<group>[^auth]\S+))?
+ (\sauth\s(?P<algorithm>md5|sha|sha-256)\s(?P<password>\S+))?
+ (\spriv(\s(?P<aes_128>aes-128))?\s(?P<privacy_password>\S+))?
+ (\s(?P<localized_key>localizedkey))?
+ (\s(?P<localizedv2_key>localizedV2key))?
+ (\sengineID\s(?P<engine_id>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": _tmplt_users_auth,
+ "result": {
+ "users": {
+ "auth": [
+ {
+ "user": "{{ user }}",
+ "group": "{{ group }}",
+ "authentication": {
+ "algorithm": "{{ algorithm }}",
+ "password": "'{{ password }}'",
+ "engine_id": "'{{ engine_id }}'",
+ "localized_key": "{{ not not localized_key }}",
+ "localizedv2_key": "{{ not not localizedv2_key }}",
+ "priv": {
+ "privacy_password": "'{{ privacy_password }}'",
+ "aes_128": "{{ not not aes_128 }}",
+ },
+ },
+
+ },
+ ],
+ },
+ },
+ },
+ {
+ "name": "users.use_acls",
+ "getval": re.compile(
+ r"""
+ ^snmp-server
+ \suser\s(?P<user>\S+)
+ (\suse-ipv4acl\s(?P<ipv4>\S+))?
+ (\suse-ipv6acl\s(?P<ipv6>\S+))?
+ $""", re.VERBOSE,
+ ),
+ "setval": "snmp-server user {{ user }}"
+ "{{ (' use-ipv4acl ' + ipv4) if ipv4 is defined else '' }}"
+ "{{ (' use-ipv6acl ' + ipv6) if ipv6 is defined else '' }}",
+ "result": {
+ "users": {
+ "use_acls": [
+ {
+ "user": "{{ user }}",
+ "ipv4": "{{ ipv4 }}",
+ "ipv6": "{{ ipv6 }}",
+ },
+ ],
+ },
+ },
+ },
+ ]
+ # fmt: on
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/__init__.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/__init__.py
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/telemetry.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/telemetry.py
new file mode 100644
index 00000000..aa540ade
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/telemetry/telemetry.py
@@ -0,0 +1,264 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Cisco and/or its affiliates.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The nxos telemetry utility library
+"""
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+import re
+
+from copy import deepcopy
+
+
+def get_module_params_subsection(module_params, tms_config, resource_key=None):
+ """
+ Helper method to get a specific module_params subsection
+ """
+ mp = {}
+ if tms_config == "TMS_GLOBAL":
+ relevant_keys = [
+ "certificate",
+ "compression",
+ "source_interface",
+ "vrf",
+ ]
+ for key in relevant_keys:
+ mp[key] = module_params[key]
+
+ if tms_config == "TMS_DESTGROUP":
+ mp["destination_groups"] = []
+ for destgrp in module_params["destination_groups"]:
+ if destgrp["id"] == resource_key:
+ mp["destination_groups"].append(destgrp)
+
+ if tms_config == "TMS_SENSORGROUP":
+ mp["sensor_groups"] = []
+ for sensor in module_params["sensor_groups"]:
+ if sensor["id"] == resource_key:
+ mp["sensor_groups"].append(sensor)
+
+ if tms_config == "TMS_SUBSCRIPTION":
+ mp["subscriptions"] = []
+ for sensor in module_params["subscriptions"]:
+ if sensor["id"] == resource_key:
+ mp["subscriptions"].append(sensor)
+
+ return mp
+
+
+def valiate_input(playvals, type, module):
+ """
+ Helper method to validate playbook values for destination groups
+ """
+ if type == "destination_groups":
+ if not playvals.get("id"):
+ msg = "Invalid playbook value: {0}.".format(playvals)
+ msg += " Parameter <id> under <destination_groups> is required"
+ module.fail_json(msg=msg)
+ if playvals.get("destination") and not isinstance(playvals["destination"], dict):
+ msg = "Invalid playbook value: {0}.".format(playvals)
+ msg += " Parameter <destination> under <destination_groups> must be a dict"
+ module.fail_json(msg=msg)
+ if not playvals.get("destination") and len(playvals) > 1:
+ msg = "Invalid playbook value: {0}.".format(playvals)
+ msg += " Playbook entry contains unrecongnized parameters."
+ msg += (
+ " Make sure <destination> keys under <destination_groups> are specified as follows:"
+ )
+ msg += " destination: {ip: <ip>, port: <port>, protocol: <prot>, encoding: <enc>}}"
+ module.fail_json(msg=msg)
+
+ if type == "sensor_groups":
+ if not playvals.get("id"):
+ msg = "Invalid playbook value: {0}.".format(playvals)
+ msg += " Parameter <id> under <sensor_groups> is required"
+ module.fail_json(msg=msg)
+ if playvals.get("path") and "name" not in playvals["path"].keys():
+ msg = "Invalid playbook value: {0}.".format(playvals)
+ msg += " Parameter <path> under <sensor_groups> requires <name> key"
+ module.fail_json(msg=msg)
+
+
+def get_instance_data(key, cr_key, cr, existing_key):
+ """
+ Helper method to get instance data used to populate list structure in config
+ fact dictionary
+ """
+ data = {}
+ if existing_key is None:
+ instance = None
+ else:
+ instance = cr._ref[cr_key]["existing"][existing_key]
+
+ patterns = {
+ "destination_groups": r"destination-group (\S+)",
+ "sensor_groups": r"sensor-group (\S+)",
+ "subscriptions": r"subscription (\S+)",
+ }
+ if key in patterns.keys():
+ m = re.search(patterns[key], cr._ref["_resource_key"])
+ instance_key = m.group(1)
+ data = {"id": instance_key, cr_key: instance}
+
+ # Remove None values
+ data = dict((k, v) for k, v in data.items() if v is not None)
+ return data
+
+
+def cr_key_lookup(key, mo):
+ """
+ Helper method to get instance key value for Managed Object (mo)
+ """
+ cr_keys = [key]
+ if key == "destination_groups" and mo == "TMS_DESTGROUP":
+ cr_keys = ["destination"]
+ elif key == "sensor_groups" and mo == "TMS_SENSORGROUP":
+ cr_keys = ["data_source", "path"]
+ elif key == "subscriptions" and mo == "TMS_SUBSCRIPTION":
+ cr_keys = ["destination_group", "sensor_group"]
+
+ return cr_keys
+
+
+def normalize_data(cmd_ref):
+ """Normalize playbook values and get_existing data"""
+
+ playval = cmd_ref._ref.get("destination").get("playval")
+ existing = cmd_ref._ref.get("destination").get("existing")
+
+ dest_props = ["protocol", "encoding"]
+ if playval:
+ for prop in dest_props:
+ for key in playval.keys():
+ playval[key][prop] = playval[key][prop].lower()
+ if existing:
+ for key in existing.keys():
+ for prop in dest_props:
+ existing[key][prop] = existing[key][prop].lower()
+
+
+def remove_duplicate_context(cmds):
+ """Helper method to remove duplicate telemetry context commands"""
+ if not cmds:
+ return cmds
+ feature_indices = [i for i, x in enumerate(cmds) if x == "feature telemetry"]
+ telemetry_indices = [i for i, x in enumerate(cmds) if x == "telemetry"]
+ if len(feature_indices) == 1 and len(telemetry_indices) == 1:
+ return cmds
+ if len(feature_indices) == 1 and not telemetry_indices:
+ return cmds
+ if len(telemetry_indices) == 1 and not feature_indices:
+ return cmds
+ if feature_indices and feature_indices[-1] > 1:
+ cmds.pop(feature_indices[-1])
+ return remove_duplicate_context(cmds)
+ if telemetry_indices and telemetry_indices[-1] > 1:
+ cmds.pop(telemetry_indices[-1])
+ return remove_duplicate_context(cmds)
+
+
+def get_setval_path(module_or_path_data):
+ """Build setval for path parameter based on playbook inputs
+ Full Command:
+ - path {name} depth {depth} query-condition {query_condition} filter-condition {filter_condition}
+ Required:
+ - path {name}
+ Optional:
+ - depth {depth}
+ - query-condition {query_condition},
+ - filter-condition {filter_condition}
+ """
+ if isinstance(module_or_path_data, dict):
+ path = module_or_path_data
+ else:
+ path = module_or_path_data.params["config"]["sensor_groups"][0].get("path")
+ if path is None:
+ return path
+
+ setval = "path {name}"
+ if "depth" in path.keys():
+ if path.get("depth") != "None":
+ setval = setval + " depth {depth}"
+ if "query_condition" in path.keys():
+ if path.get("query_condition") != "None":
+ setval = setval + " query-condition {query_condition}"
+ if "filter_condition" in path.keys():
+ if path.get("filter_condition") != "None":
+ setval = setval + " filter-condition {filter_condition}"
+
+ return setval
+
+
+def remove_duplicate_commands(commands_list):
+ # Remove any duplicate commands.
+ # pylint: disable=unnecessary-lambda
+ return sorted(set(commands_list), key=lambda x: commands_list.index(x))
+
+
+def massage_data(have_or_want):
+ # Massage non global into a data structure that is indexed by id and
+ # normalized for destination_groups, sensor_groups and subscriptions.
+ data = deepcopy(have_or_want)
+ massaged = {}
+ massaged["destination_groups"] = {}
+ massaged["sensor_groups"] = {}
+ massaged["subscriptions"] = {}
+ from pprint import pprint
+
+ for subgroup in ["destination_groups", "sensor_groups", "subscriptions"]:
+ for item in data.get(subgroup, []):
+ id = str(item.get("id"))
+ if id not in massaged[subgroup].keys():
+ massaged[subgroup][id] = []
+ item.pop("id")
+ if not item:
+ item = None
+ else:
+ if item.get("destination"):
+ if item.get("destination").get("port"):
+ item["destination"]["port"] = str(item["destination"]["port"])
+ if item.get("destination").get("protocol"):
+ item["destination"]["protocol"] = item["destination"]["protocol"].lower()
+ if item.get("destination").get("encoding"):
+ item["destination"]["encoding"] = item["destination"]["encoding"].lower()
+ if item.get("path"):
+ for key in [
+ "filter_condition",
+ "query_condition",
+ "depth",
+ ]:
+ if item.get("path").get(key) == "None":
+ del item["path"][key]
+ if item.get("path").get("depth") is not None:
+ item["path"]["depth"] = str(item["path"]["depth"])
+ if item.get("destination_group"):
+ item["destination_group"] = str(item["destination_group"])
+ if item.get("sensor_group"):
+ if item.get("sensor_group").get("id"):
+ item["sensor_group"]["id"] = str(item["sensor_group"]["id"])
+ if item.get("sensor_group").get("sample_interval"):
+ item["sensor_group"]["sample_interval"] = str(
+ item["sensor_group"]["sample_interval"],
+ )
+ if item.get("destination_group") and item.get("sensor_group"):
+ item_copy = deepcopy(item)
+ del item_copy["sensor_group"]
+ del item["destination_group"]
+ massaged[subgroup][id].append(item_copy)
+ massaged[subgroup][id].append(item)
+ continue
+ if item.get("path") and item.get("data_source"):
+ item_copy = deepcopy(item)
+ del item_copy["data_source"]
+ del item["path"]
+ massaged[subgroup][id].append(item_copy)
+ massaged[subgroup][id].append(item)
+ continue
+ massaged[subgroup][id].append(item)
+ return massaged
diff --git a/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/utils.py b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/utils.py
new file mode 100644
index 00000000..01468edd
--- /dev/null
+++ b/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/utils/utils.py
@@ -0,0 +1,214 @@
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+import socket
+
+from functools import total_ordering
+from itertools import count, groupby
+
+from ansible.module_utils.six import iteritems
+
+
+LOGGING_SEVMAP = {
+ 0: "emergency",
+ 1: "alert",
+ 2: "critical",
+ 3: "error",
+ 4: "warning",
+ 5: "notification",
+ 6: "informational",
+ 7: "debugging",
+}
+
+
+def search_obj_in_list(name, lst, identifier):
+ for o in lst:
+ if o[identifier] == name:
+ return o
+ return None
+
+
+def flatten_dict(x):
+ result = {}
+ if not isinstance(x, dict):
+ return result
+
+ for key, value in iteritems(x):
+ if isinstance(value, dict):
+ result.update(flatten_dict(value))
+ else:
+ result[key] = value
+
+ return result
+
+
+def validate_ipv4_addr(address):
+ address = address.split("/")[0]
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ipv6_addr(address):
+ address = address.split("/")[0]
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def normalize_interface(name):
+ """Return the normalized interface name"""
+ if not name:
+ return
+
+ def _get_number(name):
+ digits = ""
+ for char in name:
+ if char.isdigit() or char in "/.":
+ digits += char
+ return digits
+
+ if name.lower().startswith("et"):
+ if_type = "Ethernet"
+ elif name.lower().startswith("vl"):
+ if_type = "Vlan"
+ elif name.lower().startswith("lo"):
+ if_type = "loopback"
+ elif name.lower().startswith("po"):
+ if_type = "port-channel"
+ elif name.lower().startswith("nv"):
+ if_type = "nve"
+ else:
+ if_type = None
+
+ number_list = name.split(" ")
+ if len(number_list) == 2:
+ number = number_list[-1].strip()
+ else:
+ number = _get_number(name)
+
+ if if_type:
+ proper_interface = if_type + number
+ else:
+ proper_interface = name
+
+ return proper_interface
+
+
+def get_interface_type(interface):
+ """Gets the type of interface"""
+ if interface.upper().startswith("ET"):
+ return "ethernet"
+ elif interface.upper().startswith("VL"):
+ return "svi"
+ elif interface.upper().startswith("LO"):
+ return "loopback"
+ elif interface.upper().startswith("MG"):
+ return "management"
+ elif interface.upper().startswith("MA"):
+ return "management"
+ elif interface.upper().startswith("PO"):
+ return "portchannel"
+ elif interface.upper().startswith("NV"):
+ return "nve"
+ else:
+ return "unknown"
+
+
+def remove_rsvd_interfaces(interfaces):
+ """Exclude reserved interfaces from user management"""
+ if not interfaces:
+ return []
+ return [i for i in interfaces if get_interface_type(i["name"]) != "management"]
+
+
+def vlan_range_to_list(vlans):
+ result = []
+ if vlans:
+ for part in vlans.split(","):
+ if part == "none":
+ break
+ if "-" in part:
+ a, b = part.split("-")
+ a, b = int(a), int(b)
+ result.extend(range(a, b + 1))
+ else:
+ a = int(part)
+ result.append(a)
+ return numerical_sort(result)
+ return result
+
+
+def numerical_sort(string_int_list):
+ """Sorts list of integers that are digits in numerical order."""
+
+ as_int_list = []
+
+ for vlan in string_int_list:
+ as_int_list.append(int(vlan))
+ as_int_list.sort()
+ return as_int_list
+
+
+def get_logging_sevmap(invert=False):
+ x = LOGGING_SEVMAP
+ if invert:
+ # cannot use dict comprehension yet
+ # since we still test with Python 2.6
+ x = dict(map(reversed, iteritems(x)))
+ return x
+
+
+def get_ranges(data):
+ """
+ Returns a generator object that yields lists of
+ consequtive integers from a list of integers.
+ """
+ for _k, group in groupby(data, lambda t, c=count(): int(t) - next(c)):
+ yield list(group)
+
+
+def vlan_list_to_range(cmd):
+ """
+ Converts a comma separated list of vlan IDs
+ into ranges.
+ """
+ ranges = []
+ for v in get_ranges(cmd):
+ ranges.append("-".join(map(str, (v[0], v[-1])[: len(v)])))
+ return ",".join(ranges)
+
+
+@total_ordering
+class Version:
+ """Simple class to compare arbitrary versions"""
+
+ def __init__(self, version_string):
+ self.components = version_string.split(".")
+
+ def __eq__(self, other):
+ other = _coerce(other)
+ if not isinstance(other, Version):
+ return NotImplemented
+
+ return self.components == other.components
+
+ def __lt__(self, other):
+ other = _coerce(other)
+ if not isinstance(other, Version):
+ return NotImplemented
+
+ return self.components < other.components
+
+
+def _coerce(other):
+ if isinstance(other, str):
+ other = Version(other)
+ if isinstance(other, (int, float)):
+ other = Version(str(other))
+ return other