summaryrefslogtreecommitdiffstats
path: root/tests/topotests/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-09 13:16:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-09 13:16:35 +0000
commite2bbf175a2184bd76f6c54ccf8456babeb1a46fc (patch)
treef0b76550d6e6f500ada964a3a4ee933a45e5a6f1 /tests/topotests/lib
parentInitial commit. (diff)
downloadfrr-e2bbf175a2184bd76f6c54ccf8456babeb1a46fc.tar.xz
frr-e2bbf175a2184bd76f6c54ccf8456babeb1a46fc.zip
Adding upstream version 9.1.upstream/9.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/topotests/lib')
-rw-r--r--tests/topotests/lib/__init__.py0
-rw-r--r--tests/topotests/lib/bgp.py5596
-rw-r--r--tests/topotests/lib/bgprib.py165
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/__init__.py0
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/open/__init__.py34
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/__init__.py54
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/af.py53
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/nlri.py140
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py304
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/rd.py59
-rw-r--r--tests/topotests/lib/bmp_collector/bmp.py420
-rwxr-xr-xtests/topotests/lib/bmp_collector/bmpserver45
-rw-r--r--tests/topotests/lib/checkping.py33
-rw-r--r--tests/topotests/lib/common_config.py4960
-rwxr-xr-xtests/topotests/lib/exa-receive.py43
-rw-r--r--tests/topotests/lib/fixtures.py30
-rwxr-xr-xtests/topotests/lib/grpc-query.py128
-rw-r--r--tests/topotests/lib/ltemplate.py307
-rw-r--r--tests/topotests/lib/lutil.py499
-rwxr-xr-xtests/topotests/lib/mcast-tester.py142
-rw-r--r--tests/topotests/lib/micronet.py24
-rw-r--r--tests/topotests/lib/micronet_compat.py370
-rw-r--r--tests/topotests/lib/ospf.py3028
-rw-r--r--tests/topotests/lib/pim.py5203
-rwxr-xr-xtests/topotests/lib/scapy_sendpkt.py44
-rwxr-xr-xtests/topotests/lib/send_bsr_packet.py45
-rw-r--r--tests/topotests/lib/snmptest.py145
-rw-r--r--tests/topotests/lib/test/__init__.py0
-rwxr-xr-xtests/topotests/lib/test/test_json.py627
-rwxr-xr-xtests/topotests/lib/test/test_run_and_expect.py66
-rwxr-xr-xtests/topotests/lib/test/test_version.py77
-rw-r--r--tests/topotests/lib/topogen.py1390
-rw-r--r--tests/topotests/lib/topojson.py405
-rw-r--r--tests/topotests/lib/topolog.py161
-rw-r--r--tests/topotests/lib/topotest.py2397
35 files changed, 26994 insertions, 0 deletions
diff --git a/tests/topotests/lib/__init__.py b/tests/topotests/lib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/topotests/lib/__init__.py
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
new file mode 100644
index 0000000..3a16ed5
--- /dev/null
+++ b/tests/topotests/lib/bgp.py
@@ -0,0 +1,5596 @@
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+
+import ipaddress
+import sys
+import traceback
+from copy import deepcopy
+from time import sleep
+
+# Import common_config to use commomnly used APIs
+from lib.common_config import (
+ create_common_configurations,
+ FRRCFG_FILE,
+ InvalidCLIError,
+ apply_raw_config,
+ check_address_types,
+ find_interface_with_greater_ip,
+ generate_ips,
+ get_frr_ipv6_linklocal,
+ retry,
+ run_frr_cmd,
+ validate_ip_address,
+)
+from lib.topogen import get_topogen
+from lib.topolog import logger
+from lib.topotest import frr_unicode
+
+from lib import topotest
+
+
+def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config=True):
+ """
+ API to configure bgp on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": "200",
+ "router_id": "22.22.22.22",
+ "bgp_always_compare_med": True,
+ "graceful-restart": {
+ "graceful-restart": True,
+ "preserve-fw-state": True,
+ "timer": {
+ "restart-time": 30,
+ "rib-stale-time": 30,
+ "select-defer-time": 30,
+ }
+ },
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate":{
+ "neighbor":"R2",
+ "add_type":"lo"
+ "route_map":"rm"
+
+ },
+ "redistribute": [{
+ "redist_type": "static",
+ "attribute": {
+ "metric" : 123
+ }
+ },
+ {"redist_type": "connected"}
+ ],
+ "advertise_networks": [
+ {
+ "network": "20.0.0.0/32",
+ "no_of_network": 10
+ },
+ {
+ "network": "30.0.0.0/32",
+ "no_of_network": 10
+ }
+ ],
+ "neighbor": {
+ "r3": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "dest_link": {
+ "r4": {
+ "allowas-in": {
+ "number_occurences": 2
+ },
+ "prefix_lists": [
+ {
+ "name": "pf_list_1",
+ "direction": "in"
+ }
+ ],
+ "route_maps": [{
+ "name": "RMAP_MED_R3",
+ "direction": "in"
+ }],
+ "next_hop_self": True
+ },
+ "r1": {"graceful-restart-helper": True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ # Flag is used when testing ipv6 over ipv4 or vice-versa
+ afi_test = False
+
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ if "bgp" not in input_dict[router]:
+ logger.debug("Router %s: 'bgp' not present in input_dict", router)
+ continue
+
+ bgp_data_list = input_dict[router]["bgp"]
+
+ if type(bgp_data_list) is not list:
+ bgp_data_list = [bgp_data_list]
+
+ config_data = []
+
+ for bgp_data in bgp_data_list:
+ data_all_bgp = __create_bgp_global(tgen, bgp_data, router, build)
+ if data_all_bgp:
+ bgp_addr_data = bgp_data.setdefault("address_family", {})
+
+ if not bgp_addr_data:
+ logger.debug(
+ "Router %s: 'address_family' not present in "
+ "input_dict for BGP",
+ router,
+ )
+ else:
+ ipv4_data = bgp_addr_data.setdefault("ipv4", {})
+ ipv6_data = bgp_addr_data.setdefault("ipv6", {})
+ l2vpn_data = bgp_addr_data.setdefault("l2vpn", {})
+
+ neigh_unicast = (
+ True
+ if ipv4_data.setdefault("unicast", {})
+ or ipv6_data.setdefault("unicast", {})
+ else False
+ )
+
+ l2vpn_evpn = True if l2vpn_data.setdefault("evpn", {}) else False
+
+ if neigh_unicast:
+ data_all_bgp = __create_bgp_unicast_neighbor(
+ tgen,
+ topo,
+ bgp_data,
+ router,
+ afi_test,
+ config_data=data_all_bgp,
+ )
+
+ if l2vpn_evpn:
+ data_all_bgp = __create_l2vpn_evpn_address_family(
+ tgen, topo, bgp_data, router, config_data=data_all_bgp
+ )
+ if data_all_bgp:
+ config_data.extend(data_all_bgp)
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "bgp", build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_router_bgp", exc_info=True)
+ result = False
+
+ logger.debug("Exiting lib API: create_router_bgp()")
+ return result
+
+
+def __create_bgp_global(tgen, input_dict, router, build=False):
+ """
+ Helper API to create bgp global configuration.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ list of config commands
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ bgp_data = input_dict
+ del_bgp_action = bgp_data.setdefault("delete", False)
+
+ config_data = []
+
+ if "local_as" not in bgp_data and build:
+ logger.debug(
+ "Router %s: 'local_as' not present in input_dict" "for BGP", router
+ )
+ return config_data
+
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ if del_bgp_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ return config_data
+
+ config_data.append(cmd)
+ config_data.append("no bgp ebgp-requires-policy")
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(router_id))
+
+ config_data.append("bgp log-neighbor-changes")
+ config_data.append("no bgp network import-check")
+ bgp_peer_grp_data = bgp_data.setdefault("peer-group", {})
+
+ if "peer-group" in bgp_data and bgp_peer_grp_data:
+ peer_grp_data = __create_bgp_peer_group(tgen, bgp_peer_grp_data, router)
+ config_data.extend(peer_grp_data)
+
+ bst_path = bgp_data.setdefault("bestpath", None)
+ if bst_path:
+ if "aspath" in bst_path:
+ if "delete" in bst_path:
+ config_data.append(
+ "no bgp bestpath as-path {}".format(bst_path["aspath"])
+ )
+ else:
+ config_data.append("bgp bestpath as-path {}".format(bst_path["aspath"]))
+
+ if "graceful-restart" in bgp_data:
+ graceful_config = bgp_data["graceful-restart"]
+
+ graceful_restart = graceful_config.setdefault("graceful-restart", None)
+
+ graceful_restart_disable = graceful_config.setdefault(
+ "graceful-restart-disable", None
+ )
+
+ preserve_fw_state = graceful_config.setdefault("preserve-fw-state", None)
+
+ disable_eor = graceful_config.setdefault("disable-eor", None)
+
+ if graceful_restart == False:
+ cmd = "no bgp graceful-restart"
+ if graceful_restart:
+ cmd = "bgp graceful-restart"
+
+ if graceful_restart is not None:
+ config_data.append(cmd)
+
+ if graceful_restart_disable == False:
+ cmd = "no bgp graceful-restart-disable"
+ if graceful_restart_disable:
+ cmd = "bgp graceful-restart-disable"
+
+ if graceful_restart_disable is not None:
+ config_data.append(cmd)
+
+ if preserve_fw_state == False:
+ cmd = "no bgp graceful-restart preserve-fw-state"
+ if preserve_fw_state:
+ cmd = "bgp graceful-restart preserve-fw-state"
+
+ if preserve_fw_state is not None:
+ config_data.append(cmd)
+
+ if disable_eor == False:
+ cmd = "no bgp graceful-restart disable-eor"
+ if disable_eor:
+ cmd = "bgp graceful-restart disable-eor"
+
+ if disable_eor is not None:
+ config_data.append(cmd)
+
+ if "timer" in bgp_data["graceful-restart"]:
+ timer = bgp_data["graceful-restart"]["timer"]
+
+ if "delete" in timer:
+ del_action = timer["delete"]
+ else:
+ del_action = False
+
+ for rs_timer, value in timer.items():
+ rs_timer_value = timer.setdefault(rs_timer, None)
+
+ if rs_timer_value and rs_timer != "delete":
+ cmd = "bgp graceful-restart {} {}".format(rs_timer, rs_timer_value)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ if "bgp_always_compare_med" in bgp_data:
+ bgp_always_compare_med = bgp_data["bgp_always_compare_med"]
+ if bgp_always_compare_med == True:
+ config_data.append("bgp always-compare-med")
+ elif bgp_always_compare_med == False:
+ config_data.append("no bgp always-compare-med")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return config_data
+
+
+def __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router, afi_test, config_data=None
+):
+ """
+ Helper API to create configuration for address-family unicast
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `afi_test` : use when ipv6 needs to be tested over ipv4 or vice-versa
+ * `build` : Only for initial setup phase this is set as True.
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ add_neigh = True
+ bgp_data = input_dict
+ if "router bgp" in config_data:
+ add_neigh = False
+
+ bgp_data = input_dict["address_family"]
+
+ for addr_type, addr_dict in bgp_data.items():
+ if not addr_dict:
+ continue
+
+ if not check_address_types(addr_type) and not afi_test:
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if addr_data:
+ config_data.append("address-family {} unicast".format(addr_type))
+
+ advertise_network = addr_data.setdefault("advertise_networks", [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ del_action = advertise_network_dict.setdefault("delete", False)
+
+ # Generating IPs for verification
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddress.ip_network(frr_unicode(ip)))
+
+ cmd = "network {}".format(ip)
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ import_cmd = addr_data.setdefault("import", {})
+ if import_cmd:
+ try:
+ if import_cmd["delete"]:
+ config_data.append("no import vrf {}".format(import_cmd["vrf"]))
+ except KeyError:
+ config_data.append("import vrf {}".format(import_cmd["vrf"]))
+
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ del_cmd = max_paths.setdefault("delete", False)
+ if ibgp:
+ if del_cmd:
+ config_data.append("no maximum-paths ibgp {}".format(ibgp))
+ else:
+ config_data.append("maximum-paths ibgp {}".format(ibgp))
+ if ebgp:
+ if del_cmd:
+ config_data.append("no maximum-paths {}".format(ebgp))
+ else:
+ config_data.append("maximum-paths {}".format(ebgp))
+
+ aggregate_addresses = addr_data.setdefault("aggregate_address", [])
+ for aggregate_address in aggregate_addresses:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.debug(
+ "Router %s: 'network' not present in " "input_dict for BGP", router
+ )
+ else:
+ cmd = "aggregate-address {}".format(network)
+
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} as-set".format(cmd)
+ if summary:
+ cmd = "{} summary".format(cmd)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.debug(
+ "Router %s: 'redist_type' not present in " "input_dict", router
+ )
+ else:
+ cmd = "redistribute {}".format(redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute", None)
+ if redist_attr:
+ if type(redist_attr) is dict:
+ for key, value in redist_attr.items():
+ cmd = "{} {} {}".format(cmd, key, value)
+ else:
+ cmd = "{} {}".format(cmd, redist_attr)
+
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ admin_dist_data = addr_data.setdefault("distance", {})
+ if admin_dist_data:
+ if len(admin_dist_data) < 2:
+ logger.debug(
+ "Router %s: pass the admin distance values for "
+ "ebgp, ibgp and local routes",
+ router,
+ )
+ cmd = "distance bgp {} {} {}".format(
+ admin_dist_data["ebgp"],
+ admin_dist_data["ibgp"],
+ admin_dist_data["local"],
+ )
+
+ del_action = admin_dist_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no distance bgp"
+ config_data.append(cmd)
+
+ import_vrf_data = addr_data.setdefault("import", {})
+ if import_vrf_data:
+ cmd = "import vrf {}".format(import_vrf_data["vrf"])
+
+ del_action = import_vrf_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(
+ topo, input_dict, router, addr_type, add_neigh
+ )
+ config_data.extend(neigh_data)
+ # configure default originate
+ if "default_originate" in addr_data:
+ default_originate_config = __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh
+ )
+ config_data.extend(default_originate_config)
+
+ for addr_type, addr_dict in bgp_data.items():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh
+ )
+
+ config_data.extend(neigh_addr_data)
+
+ config_data.append("exit")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return config_data
+
+
+def __create_bgp_default_originate_neighbor(
+ topo, input_dict, router, addr_type, add_neigh=True
+):
+ """
+ Helper API to create neighbor default - originate configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ tgen = get_topogen()
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_default_originate_neighbor()")
+
+ bgp_data = input_dict["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["default_originate"]
+ for name, peer_dict in neigh_data.items():
+ nh_details = topo[name]
+
+ neighbor_ip = None
+ if "dest-link" in neigh_data[name]:
+ dest_link = neigh_data[name]["dest-link"]
+ neighbor_ip = nh_details["links"][dest_link][addr_type].split("/")[0]
+ elif "add_type" in neigh_data[name]:
+ add_type = neigh_data[name]["add_type"]
+ neighbor_ip = nh_details["links"][add_type][addr_type].split("/")[0]
+ else:
+ neighbor_ip = nh_details["links"][router][addr_type].split("/")[0]
+
+ config_data.append("address-family {} unicast".format(addr_type))
+ if "route_map" in peer_dict:
+ route_map = peer_dict["route_map"]
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+ else:
+ config_data.append(
+ " neighbor {} default-originate route-map {}".format(
+ neighbor_ip, route_map
+ )
+ )
+
+ else:
+ if "delete" in peer_dict:
+ if peer_dict["delete"]:
+ config_data.append(
+ "no neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append(
+ "neighbor {} default-originate".format(neighbor_ip)
+ )
+ else:
+ config_data.append("neighbor {} default-originate".format(neighbor_ip))
+
+ logger.debug("Exiting lib API: __create_bgp_default_originate_neighbor()")
+ return config_data
+
+
+def __create_l2vpn_evpn_address_family(
+ tgen, topo, input_dict, router, config_data=None
+):
+ """
+ Helper API to create configuration for l2vpn evpn address-family
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring
+ from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+ """
+
+ result = False
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ bgp_data = input_dict["address_family"]
+
+ for family_type, family_dict in bgp_data.items():
+ if family_type != "l2vpn":
+ continue
+
+ family_data = family_dict["evpn"]
+ if family_data:
+ config_data.append("address-family l2vpn evpn")
+
+ advertise_data = family_data.setdefault("advertise", {})
+ neighbor_data = family_data.setdefault("neighbor", {})
+ advertise_all_vni_data = family_data.setdefault("advertise-all-vni", None)
+ rd_data = family_data.setdefault("rd", None)
+ no_rd_data = family_data.setdefault("no rd", False)
+ route_target_data = family_data.setdefault("route-target", {})
+
+ if advertise_data:
+ for address_type, unicast_type in advertise_data.items():
+ if type(unicast_type) is dict:
+ for key, value in unicast_type.items():
+ cmd = "advertise {} {}".format(address_type, key)
+
+ if value:
+ route_map = value.setdefault("route-map", {})
+ advertise_del_action = value.setdefault("delete", None)
+
+ if route_map:
+ cmd = "{} route-map {}".format(cmd, route_map)
+
+ if advertise_del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ if neighbor_data:
+ for neighbor, neighbor_data in neighbor_data.items():
+ ipv4_neighbor = neighbor_data.setdefault("ipv4", {})
+ ipv6_neighbor = neighbor_data.setdefault("ipv6", {})
+
+ if ipv4_neighbor:
+ for neighbor_name, action in ipv4_neighbor.items():
+ neighbor_ip = topo[neighbor]["links"][neighbor_name][
+ "ipv4"
+ ].split("/")[0]
+
+ if type(action) is dict:
+ next_hop_self = action.setdefault("next_hop_self", None)
+ route_maps = action.setdefault("route_maps", {})
+
+ if next_hop_self is not None:
+ if next_hop_self is True:
+ config_data.append(
+ "neighbor {} "
+ "next-hop-self".format(neighbor_ip)
+ )
+ elif next_hop_self is False:
+ config_data.append(
+ "no neighbor {} "
+ "next-hop-self".format(neighbor_ip)
+ )
+
+ if route_maps:
+ for route_map in route_maps:
+ name = route_map.setdefault("name", {})
+ direction = route_map.setdefault("direction", "in")
+ del_action = route_map.setdefault("delete", False)
+
+ if not name:
+ logger.info(
+ "Router %s: 'name' "
+ "not present in "
+ "input_dict for BGP "
+ "neighbor route name",
+ router,
+ )
+ else:
+ cmd = "neighbor {} route-map {} " "{}".format(
+ neighbor_ip, name, direction
+ )
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ else:
+ if action == "activate":
+ cmd = "neighbor {} activate".format(neighbor_ip)
+ elif action == "deactivate":
+ cmd = "no neighbor {} activate".format(neighbor_ip)
+
+ config_data.append(cmd)
+
+ if ipv6_neighbor:
+ for neighbor_name, action in ipv4_neighbor.items():
+ neighbor_ip = topo[neighbor]["links"][neighbor_name][
+ "ipv6"
+ ].split("/")[0]
+ if action == "activate":
+ cmd = "neighbor {} activate".format(neighbor_ip)
+ elif action == "deactivate":
+ cmd = "no neighbor {} activate".format(neighbor_ip)
+
+ config_data.append(cmd)
+
+ if advertise_all_vni_data == True:
+ cmd = "advertise-all-vni"
+ config_data.append(cmd)
+ elif advertise_all_vni_data == False:
+ cmd = "no advertise-all-vni"
+ config_data.append(cmd)
+
+ if rd_data:
+ cmd = "rd {}".format(rd_data)
+ config_data.append(cmd)
+
+ if no_rd_data:
+ cmd = "no rd {}".format(no_rd_data)
+ config_data.append(cmd)
+
+ if route_target_data:
+ for rt_type, rt_dict in route_target_data.items():
+ for _rt_dict in rt_dict:
+ rt_value = _rt_dict.setdefault("value", None)
+ del_rt = _rt_dict.setdefault("delete", None)
+
+ if rt_value:
+ cmd = "route-target {} {}".format(rt_type, rt_value)
+ if del_rt:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return config_data
+
+
+def __create_bgp_peer_group(topo, input_dict, router):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ config_data = []
+ logger.debug("Entering lib API: __create_bgp_peer_group()")
+
+ for grp, grp_dict in input_dict.items():
+ config_data.append("neighbor {} peer-group".format(grp))
+ neigh_cxt = "neighbor {} ".format(grp)
+ update_source = grp_dict.setdefault("update-source", None)
+ remote_as = grp_dict.setdefault("remote-as", None)
+ capability = grp_dict.setdefault("capability", None)
+ if update_source:
+ config_data.append("{} update-source {}".format(neigh_cxt, update_source))
+
+ if remote_as:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
+
+ logger.debug("Exiting lib API: __create_bgp_peer_group()")
+ return config_data
+
+
+def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
+ """
+ Helper API to create neighbor specific configuration
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured
+ """
+ config_data = []
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ bgp_data = input_dict["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+ global_connect = input_dict.get("connecttimer", 5)
+
+ for name, peer_dict in neigh_data.items():
+ remote_as = 0
+ for dest_link, peer in peer_dict["dest_link"].items():
+ local_asn = peer.setdefault("local_asn", {})
+ if local_asn:
+ local_as = local_asn.setdefault("local_as", 0)
+ remote_as = local_asn.setdefault("remote_as", 0)
+ no_prepend = local_asn.setdefault("no_prepend", False)
+ replace_as = local_asn.setdefault("replace_as", False)
+ if local_as == remote_as:
+ assert False is True, (
+ " Configuration Error : Router must not have "
+ "same AS-NUMBER as Local AS NUMBER"
+ )
+ nh_details = topo[name]
+
+ if "vrfs" in topo[router] or type(nh_details["bgp"]) is list:
+ for vrf_data in nh_details["bgp"]:
+ if "vrf" in nh_details["links"][dest_link] and "vrf" in vrf_data:
+ if nh_details["links"][dest_link]["vrf"] == vrf_data["vrf"]:
+ if not remote_as:
+ remote_as = vrf_data["local_as"]
+ break
+ else:
+ if "vrf" not in vrf_data:
+ if not remote_as:
+ remote_as = vrf_data["local_as"]
+ break
+ else:
+ if not remote_as:
+ remote_as = nh_details["bgp"]["local_as"]
+
+ update_source = None
+
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
+ except KeyError:
+ intf = topo[name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, name, intf)
+ if "delete" in peer and peer["delete"]:
+ neigh_cxt = "no neighbor {}".format(ip_addr)
+ config_data.append("{}".format(neigh_cxt))
+ return config_data
+ else:
+ neigh_cxt = "neighbor {}".format(ip_addr)
+
+ if "peer-group" in peer:
+ config_data.append(
+ "neighbor {} interface peer-group {}".format(
+ ip_addr, peer["peer-group"]
+ )
+ )
+
+ # Loopback interface
+ if "source_link" in peer:
+ if peer["source_link"] == "lo":
+ update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
+ else:
+ update_source = topo[router]["links"][peer["source_link"]][
+ "interface"
+ ]
+ if "peer-group" not in peer:
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ config_data.append(
+ "{} interface remote-as {}".format(neigh_cxt, remote_as)
+ )
+ elif add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+
+ if local_asn and local_as:
+ cmd = "{} local-as {}".format(neigh_cxt, local_as)
+ if no_prepend:
+ cmd = "{} no-prepend".format(cmd)
+ if replace_as:
+ cmd = "{} replace-as".format(cmd)
+ config_data.append("{}".format(cmd))
+
+ if addr_type == "ipv6":
+ config_data.append("address-family ipv6 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
+ if "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ config_data.append(
+ "{} update-source {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+ config_data.append(
+ "{} interface {}".format(
+ neigh_cxt, nh_details["links"][dest_link]["peer-interface"]
+ )
+ )
+
+ disable_connected = peer.setdefault("disable_connected_check", False)
+ connect = peer.get("connecttimer", global_connect)
+ keep_alive = peer.setdefault("keepalivetimer", 3)
+ hold_down = peer.setdefault("holddowntimer", 10)
+ password = peer.setdefault("password", None)
+ no_password = peer.setdefault("no_password", None)
+ capability = peer.setdefault("capability", None)
+ max_hop_limit = peer.setdefault("ebgp_multihop", 1)
+
+ graceful_restart = peer.setdefault("graceful-restart", None)
+ graceful_restart_helper = peer.setdefault("graceful-restart-helper", None)
+ graceful_restart_disable = peer.setdefault("graceful-restart-disable", None)
+ if capability:
+ config_data.append("{} capability {}".format(neigh_cxt, capability))
+
+ if update_source:
+ config_data.append(
+ "{} update-source {}".format(neigh_cxt, update_source)
+ )
+ if disable_connected:
+ config_data.append(
+ "{} disable-connected-check".format(disable_connected)
+ )
+ if update_source:
+ config_data.append(
+ "{} update-source {}".format(neigh_cxt, update_source)
+ )
+ if int(keep_alive) != 60 and int(hold_down) != 180:
+ config_data.append(
+ "{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
+ )
+ if int(connect) != 120:
+ config_data.append("{} timers connect {}".format(neigh_cxt, connect))
+
+ if graceful_restart:
+ config_data.append("{} graceful-restart".format(neigh_cxt))
+ elif graceful_restart == False:
+ config_data.append("no {} graceful-restart".format(neigh_cxt))
+
+ if graceful_restart_helper:
+ config_data.append("{} graceful-restart-helper".format(neigh_cxt))
+ elif graceful_restart_helper == False:
+ config_data.append("no {} graceful-restart-helper".format(neigh_cxt))
+
+ if graceful_restart_disable:
+ config_data.append("{} graceful-restart-disable".format(neigh_cxt))
+ elif graceful_restart_disable == False:
+ config_data.append("no {} graceful-restart-disable".format(neigh_cxt))
+
+ if password:
+ config_data.append("{} password {}".format(neigh_cxt, password))
+
+ if no_password:
+ config_data.append("no {} password {}".format(neigh_cxt, no_password))
+
+ if max_hop_limit > 1:
+ config_data.append(
+ "{} ebgp-multihop {}".format(neigh_cxt, max_hop_limit)
+ )
+ config_data.append("{} enforce-multihop".format(neigh_cxt))
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return config_data
+
+
+def __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh=True
+):
+ """
+ API prints bgp global config to bgp_json file.
+
+ Parameters
+ ----------
+ * `bgp_cfg` : BGP class variables have BGP config saved in it for
+ particular router,
+ * `local_as_no` : Local as number
+ * `router_id` : Router-id
+ * `ecmp_path` : ECMP max path
+ * `gr_enable` : BGP global gracefull restart config
+ """
+
+ config_data = []
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ bgp_data = input_dict["address_family"]
+ neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+
+ for peer_name, peer_dict in deepcopy(neigh_data).items():
+ for dest_link, peer in peer_dict["dest_link"].items():
+ deactivate = None
+ activate = None
+ nh_details = topo[peer_name]
+ activate_addr_family = peer.setdefault("activate", None)
+ deactivate_addr_family = peer.setdefault("deactivate", None)
+ # Loopback interface
+ if "source_link" in peer and peer["source_link"] == "lo":
+ for destRouterLink, data in sorted(nh_details["links"].items()):
+ if "type" in data and data["type"] == "loopback":
+ if dest_link == destRouterLink:
+ ip_addr = (
+ nh_details["links"][destRouterLink][addr_type]
+ .split("/")[0]
+ .lower()
+ )
+
+ # Physical interface
+ else:
+ # check the neighbor type if un numbered nbr, use interface.
+ if "neighbor_type" in peer and peer["neighbor_type"] == "unnumbered":
+ ip_addr = nh_details["links"][dest_link]["peer-interface"]
+ elif "neighbor_type" in peer and peer["neighbor_type"] == "link-local":
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ elif dest_link in nh_details["links"].keys():
+ try:
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[
+ 0
+ ]
+ except KeyError:
+ intf = topo[peer_name]["links"][dest_link]["interface"]
+ ip_addr = get_frr_ipv6_linklocal(tgen, peer_name, intf)
+ if (
+ addr_type == "ipv4"
+ and bgp_data["ipv6"]
+ and check_address_types("ipv6")
+ and "ipv6" in nh_details["links"][dest_link]
+ ):
+ deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[
+ 0
+ ]
+
+ neigh_cxt = "neighbor {}".format(ip_addr)
+ config_data.append("address-family {} unicast".format(addr_type))
+
+ if activate_addr_family is not None:
+ config_data.append(
+ "address-family {} unicast".format(activate_addr_family)
+ )
+
+ config_data.append("{} activate".format(neigh_cxt))
+
+ if deactivate and activate_addr_family is None:
+ config_data.append("no neighbor {} activate".format(deactivate))
+
+ if deactivate_addr_family is not None:
+ config_data.append(
+ "address-family {} unicast".format(deactivate_addr_family)
+ )
+ config_data.append("no {} activate".format(neigh_cxt))
+
+ next_hop_self = peer.setdefault("next_hop_self", None)
+ send_community = peer.setdefault("send_community", None)
+ prefix_lists = peer.setdefault("prefix_lists", {})
+ route_maps = peer.setdefault("route_maps", {})
+ no_send_community = peer.setdefault("no_send_community", None)
+ capability = peer.setdefault("capability", None)
+ allowas_in = peer.setdefault("allowas-in", None)
+
+ # next-hop-self
+ if next_hop_self is not None:
+ if next_hop_self is True:
+ config_data.append("{} next-hop-self".format(neigh_cxt))
+ else:
+ config_data.append("no {} next-hop-self".format(neigh_cxt))
+
+ # send_community
+ if send_community:
+ config_data.append("{} send-community".format(neigh_cxt))
+
+ # no_send_community
+ if no_send_community:
+ config_data.append(
+ "no {} send-community {}".format(neigh_cxt, no_send_community)
+ )
+
+ # capability_ext_nh
+ if capability and addr_type == "ipv6":
+ config_data.append("address-family ipv4 unicast")
+ config_data.append("{} activate".format(neigh_cxt))
+
+ if "allowas_in" in peer:
+ allow_as_in = peer["allowas_in"]
+ config_data.append("{} allowas-in {}".format(neigh_cxt, allow_as_in))
+
+ if "no_allowas_in" in peer:
+ allow_as_in = peer["no_allowas_in"]
+ config_data.append("no {} allowas-in {}".format(neigh_cxt, allow_as_in))
+
+ if "shutdown" in peer:
+ config_data.append(
+ "{} {} shutdown".format(
+ "no" if not peer["shutdown"] else "", neigh_cxt
+ )
+ )
+
+ if prefix_lists:
+ for prefix_list in prefix_lists:
+ name = prefix_list.setdefault("name", {})
+ direction = prefix_list.setdefault("direction", "in")
+ del_action = prefix_list.setdefault("delete", False)
+ if not name:
+ logger.info(
+ "Router %s: 'name' not present in "
+ "input_dict for BGP neighbor prefix lists",
+ router,
+ )
+ else:
+ cmd = "{} prefix-list {} {}".format(neigh_cxt, name, direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if route_maps:
+ for route_map in route_maps:
+ name = route_map.setdefault("name", {})
+ direction = route_map.setdefault("direction", "in")
+ del_action = route_map.setdefault("delete", False)
+ if not name:
+ logger.info(
+ "Router %s: 'name' not present in "
+ "input_dict for BGP neighbor route name",
+ router,
+ )
+ else:
+ cmd = "{} route-map {} {}".format(neigh_cxt, name, direction)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if allowas_in:
+ number_occurences = allowas_in.setdefault("number_occurences", {})
+ del_action = allowas_in.setdefault("delete", False)
+
+ cmd = "{} allowas-in {}".format(neigh_cxt, number_occurences)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ return config_data
+
+
+def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
+ """
+ API will save the current config to router's /etc/frr/ for BGPd
+ daemon(bgpd.conf file)
+
+ Paramters
+ ---------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : defines for which router, and which config
+ needs to be modified
+
+ Usage:
+ ------
+ # Modify graceful-restart config not to set f-bit
+ # and write to /etc/frr
+
+ # Api call to delete advertised networks
+ input_dict_2 = {
+ "r5": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "101.0.20.1/32",
+ "no_of_network": 5,
+ "delete": True
+ }
+ ],
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "5::1/128",
+ "no_of_network": 5,
+ "delete": True
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = modify_bgp_config_when_bgpd_down(tgen, topo, input_dict)
+
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ try:
+ result = create_router_bgp(
+ tgen, topo, input_dict, build=False, load_config=False
+ )
+ if result is not True:
+ return result
+
+ # Copy bgp config file to /etc/frr
+ for dut in input_dict.keys():
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Delete BGP config when BGPd is down in {}".format(router))
+ # Reading the config from "rundir" and copy to /etc/frr/bgpd.conf
+ cmd = "cat {}/{}/{} >> /etc/frr/bgpd.conf".format(
+ tgen.logdir, router, FRRCFG_FILE
+ )
+ router_list[router].run(cmd)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+#############################################
+# Verification APIs
+#############################################
+@retry(retry_timeout=8)
+def verify_router_id(tgen, topo, input_dict, expected=True):
+ """
+ Running command "show ip bgp json" for DUT and reading router-id
+ from input_dict and verifying with command output.
+ 1. Statically modfified router-id should take place
+ 2. When static router-id is deleted highest loopback should
+ become router-id
+ 3. When loopback intf is down then highest physcial intf
+ should become router-id
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ # Verify if router-id for r1 is 12.12.12.12
+ input_dict = {
+ "r1":{
+ "router_id": "12.12.12.12"
+ }
+ # Verify that router-id for r1 is highest interface ip
+ input_dict = {
+ "routers": ["r1"]
+ }
+ result = verify_router_id(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ del_router_id = input_dict[router]["bgp"].setdefault("del_router_id", False)
+
+ logger.info("Checking router %s router-id", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
+ router_id_out = show_bgp_json["ipv4Unicast"]["routerId"]
+ router_id_out = ipaddress.IPv4Address(frr_unicode(router_id_out))
+
+ # Once router-id is deleted, highest interface ip should become
+ # router-id
+ if del_router_id:
+ router_id = find_interface_with_greater_ip(topo, router)
+ else:
+ router_id = input_dict[router]["bgp"]["router_id"]
+ router_id = ipaddress.IPv4Address(frr_unicode(router_id))
+
+ if router_id == router_id_out:
+ logger.info("Found expected router-id %s for router %s", router_id, router)
+ else:
+ errormsg = (
+ "Router-id for router:{} mismatch, expected:"
+ " {} but found:{}".format(router, router_id, router_id_out)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=150)
+def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True, addr_type=None):
+ """
+ API will verify if BGP is converged with in the given time frame.
+ Running "show bgp summary json" command and verify bgp neighbor
+ state is established,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `dut`: device under test
+ * `addr_type` : address type for which verification to be done, by-default both v4 and v6
+
+ Usage
+ -----
+ # To veriry is BGP is converged for all the routers used in
+ topology
+ results = verify_bgp_convergence(tgen, topo, dut="r1")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if "bgp" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying BGP Convergence on router %s:", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ bgp_data_list = topo["routers"][router]["bgp"]
+
+ if type(bgp_data_list) is not list:
+ bgp_data_list = [bgp_data_list]
+
+ for bgp_data in bgp_data_list:
+ if "vrf" in bgp_data:
+ vrf = bgp_data["vrf"]
+ if vrf is None:
+ vrf = "default"
+ else:
+ vrf = "default"
+
+ # To find neighbor ip type
+ bgp_addr_type = bgp_data["address_family"]
+ if "l2vpn" in bgp_addr_type:
+ total_evpn_peer = 0
+
+ if "neighbor" not in bgp_addr_type["l2vpn"]["evpn"]:
+ continue
+
+ bgp_neighbors = bgp_addr_type["l2vpn"]["evpn"]["neighbor"]
+ total_evpn_peer += len(bgp_neighbors)
+
+ no_of_evpn_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for _addr_type, dest_link_dict in peer_data.items():
+ data = topo["routers"][bgp_neighbor]["links"]
+ for dest_link in dest_link_dict.keys():
+ if dest_link in data:
+ peer_details = peer_data[_addr_type][dest_link]
+
+ neighbor_ip = data[dest_link][_addr_type].split("/")[0]
+ nh_state = None
+
+ if (
+ "ipv4Unicast" in show_bgp_json[vrf]
+ or "ipv6Unicast" in show_bgp_json[vrf]
+ ):
+ errormsg = (
+ "[DUT: %s] VRF: %s, "
+ "ipv4Unicast/ipv6Unicast"
+ " address-family present"
+ " under l2vpn" % (router, vrf)
+ )
+ return errormsg
+
+ l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][
+ "peers"
+ ]
+ nh_state = l2VpnEvpn_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_evpn_peer += 1
+
+ if no_of_evpn_peer == total_evpn_peer:
+ logger.info(
+ "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers",
+ router,
+ vrf,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: %s] VRF: %s, BGP is not converged "
+ "for evpn peers" % (router, vrf)
+ )
+ return errormsg
+ else:
+ total_peer = 0
+ for _addr_type in bgp_addr_type.keys():
+ if not check_address_types(_addr_type):
+ continue
+
+ if addr_type and addr_type != _addr_type:
+ continue
+
+ bgp_neighbors = bgp_addr_type[_addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for _addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ if addr_type and addr_type != _addr_type:
+ continue
+
+ bgp_neighbors = bgp_addr_type[_addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ peer_details = peer_data["dest_link"][dest_link]
+ # for link local neighbors
+ if (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "link-local"
+ ):
+ intf = topo["routers"][bgp_neighbor]["links"][
+ dest_link
+ ]["interface"]
+ neighbor_ip = get_frr_ipv6_linklocal(
+ tgen, bgp_neighbor, intf
+ )
+ elif "source_link" in peer_details:
+ neighbor_ip = topo["routers"][bgp_neighbor][
+ "links"
+ ][peer_details["source_link"]][_addr_type].split(
+ "/"
+ )[
+ 0
+ ]
+ elif (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "unnumbered"
+ ):
+ neighbor_ip = data[dest_link]["peer-interface"]
+ else:
+ neighbor_ip = data[dest_link][_addr_type].split(
+ "/"
+ )[0]
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ if _addr_type == "ipv4":
+ ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][
+ "peers"
+ ]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][
+ "peers"
+ ]
+ if neighbor_ip in ipv6_data:
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer and no_of_peer > 0:
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged", router, vrf)
+ result = True
+ else:
+ errormsg = "[DUT: %s] VRF: %s, BGP is not converged" % (router, vrf)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=16)
+def verify_bgp_community(
+ tgen,
+ addr_type,
+ router,
+ network,
+ input_dict=None,
+ vrf=None,
+ bestpath=False,
+ expected=True,
+):
+ """
+ API to veiryf BGP large community is attached in route for any given
+ DUT by running "show bgp ipv4/6 {route address} json" command.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `network`: network for which set criteria needs to be verified
+ * `input_dict`: having details like - for which router, community and
+ values needs to be verified
+ * `vrf`: VRF name
+ * `bestpath`: To check best path cli
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ networks = ["200.50.2.0/32"]
+ input_dict = {
+ "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
+ }
+ result = verify_bgp_community(tgen, "ipv4", dut, network, input_dict=None)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: verify_bgp_community()")
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ logger.info(
+ "Verifying BGP community attributes on dut %s: for %s " "network %s",
+ router,
+ addr_type,
+ network,
+ )
+
+ command = "show bgp"
+
+ for net in network:
+ if vrf:
+ cmd = "{} vrf {} {} {} json".format(command, vrf, addr_type, net)
+ elif bestpath:
+ cmd = "{} {} {} bestpath json".format(command, addr_type, net)
+ else:
+ cmd = "{} {} {} json".format(command, addr_type, net)
+
+ show_bgp_json = run_frr_cmd(rnode, cmd, isjson=True)
+ if "paths" not in show_bgp_json:
+ return "Prefix {} not found in BGP table of router: {}".format(net, router)
+
+ as_paths = show_bgp_json["paths"]
+ found = False
+ for i in range(len(as_paths)):
+ if (
+ "largeCommunity" in show_bgp_json["paths"][i]
+ or "community" in show_bgp_json["paths"][i]
+ ):
+ found = True
+ logger.info(
+ "Large Community attribute is found for route:" " %s in router: %s",
+ net,
+ router,
+ )
+ if input_dict is not None:
+ for criteria, comm_val in input_dict.items():
+ show_val = show_bgp_json["paths"][i][criteria]["string"]
+ if comm_val == show_val:
+ logger.info(
+ "Verifying BGP %s for prefix: %s"
+ " in router: %s, found expected"
+ " value: %s",
+ criteria,
+ net,
+ router,
+ comm_val,
+ )
+ else:
+ errormsg = (
+ "Failed: Verifying BGP attribute"
+ " {} for route: {} in router: {}"
+ ", expected value: {} but found"
+ ": {}".format(criteria, net, router, comm_val, show_val)
+ )
+ return errormsg
+
+ if not found:
+ errormsg = (
+ "Large Community attribute is not found for route: "
+ "{} in router: {} ".format(net, router)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: verify_bgp_community()")
+ return True
+
+
+def modify_as_number(tgen, topo, input_dict):
+ """
+ API reads local_as and remote_as from user defined input_dict and
+ modify router"s ASNs accordingly. Router"s config is modified and
+ recent/changed config is loadeded to router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : defines for which router ASNs needs to be modified
+
+ Usage
+ -----
+ To modify ASNs for router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ try:
+ new_topo = deepcopy(topo["routers"])
+ router_dict = {}
+ for router in input_dict.keys():
+ # Remove bgp configuration
+
+ router_dict.update({router: {"bgp": {"delete": True}}})
+ try:
+ new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"][
+ "local_as"
+ ]
+ except TypeError:
+ new_topo[router]["bgp"][0]["local_as"] = input_dict[router]["bgp"][
+ "local_as"
+ ]
+ logger.info("Removing bgp configuration")
+ create_router_bgp(tgen, topo, router_dict)
+
+ logger.info("Applying modified bgp configuration")
+ result = create_router_bgp(tgen, new_topo)
+ if result is not True:
+ result = "Error applying new AS number config"
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=8)
+def verify_as_numbers(tgen, topo, input_dict, expected=True):
+ """
+ This API is to verify AS numbers for given DUT by running
+ "show ip bgp neighbor json" command. Local AS and Remote AS
+ will ve verified with input_dict data and command output.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type, ipv4/ipv6
+ * `input_dict`: defines - for which router, AS numbers needs to be verified
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "local_as": 131079
+ }
+ }
+ }
+ result = verify_as_numbers(tgen, topo, addr_type, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for router in input_dict.keys():
+ if router not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[router]
+
+ logger.info("Verifying AS numbers for dut %s:", router)
+
+ show_ip_bgp_neighbor_json = run_frr_cmd(
+ rnode, "show ip bgp neighbor json", isjson=True
+ )
+ local_as = input_dict[router]["bgp"]["local_as"]
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"]
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ neighbor_ip = None
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ neigh_data = show_ip_bgp_neighbor_json[neighbor_ip]
+ # Verify Local AS for router
+ if neigh_data["localAs"] != local_as:
+ errormsg = (
+ "Failed: Verify local_as for dut {},"
+ " found: {} but expected: {}".format(
+ router, neigh_data["localAs"], local_as
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "Verified local_as for dut %s, found" " expected: %s",
+ router,
+ local_as,
+ )
+
+ # Verify Remote AS for neighbor
+ if neigh_data["remoteAs"] != remote_as:
+ errormsg = (
+ "Failed: Verify remote_as for dut "
+ "{}'s neighbor {}, found: {} but "
+ "expected: {}".format(
+ router, bgp_neighbor, neigh_data["remoteAs"], remote_as
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "Verified remote_as for dut %s's "
+ "neighbor %s, found expected: %s",
+ router,
+ bgp_neighbor,
+ remote_as,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=150)
+def verify_bgp_convergence_from_running_config(tgen, dut=None, expected=True):
+ """
+ API to verify BGP convergence b/w loopback and physical interface.
+ This API would be used when routers have BGP neighborship is loopback
+ to physical or vice-versa
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ results = verify_bgp_convergence_bw_lo_and_phy_intf(tgen, topo,
+ dut="r1")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying BGP Convergence on router %s:", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ for vrf, addr_family_data in show_bgp_json.items():
+ for address_family, neighborship_data in addr_family_data.items():
+ total_peer = 0
+ no_of_peer = 0
+
+ total_peer = len(neighborship_data["peers"].keys())
+
+ for peer, peer_data in neighborship_data["peers"].items():
+ if peer_data["state"] == "Established":
+ no_of_peer += 1
+
+ if total_peer != no_of_peer:
+ errormsg = (
+ "[DUT: %s] VRF: %s, BGP is not converged"
+ " for peer: %s" % (router, vrf, peer)
+ )
+ return errormsg
+
+ logger.info("[DUT: %s]: vrf: %s, BGP is Converged", router, vrf)
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_bgp(tgen, addr_type, router, vrf=None, neighbor=None):
+ """
+ This API is to clear bgp neighborship by running
+ clear ip bgp */clear bgp ipv6 * command,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `addr_type`: ip type ipv4/ipv6
+ * `router`: device under test
+ * `vrf`: vrf name
+ * `neighbor`: Neighbor for which bgp needs to be cleared
+
+ Usage
+ -----
+ clear_bgp(tgen, addr_type, "r1")
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ if vrf:
+ if type(vrf) is not list:
+ vrf = [vrf]
+
+ # Clearing BGP
+ logger.info("Clearing BGP neighborship for router %s..", router)
+ if addr_type == "ipv4":
+ if vrf:
+ for _vrf in vrf:
+ run_frr_cmd(rnode, "clear ip bgp vrf {} *".format(_vrf))
+ elif neighbor:
+ run_frr_cmd(rnode, "clear bgp ipv4 {}".format(neighbor))
+ else:
+ run_frr_cmd(rnode, "clear ip bgp *")
+ elif addr_type == "ipv6":
+ if vrf:
+ for _vrf in vrf:
+ run_frr_cmd(rnode, "clear bgp vrf {} ipv6 *".format(_vrf))
+ elif neighbor:
+ run_frr_cmd(rnode, "clear bgp ipv6 {}".format(neighbor))
+ else:
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
+ else:
+ run_frr_cmd(rnode, "clear bgp *")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+
+def clear_bgp_and_verify(tgen, topo, router, rid=None):
+ """
+ This API is to clear bgp neighborship and verify bgp neighborship
+ is coming up(BGP is converged) usinf "show bgp summary json" command
+ and also verifying for all bgp neighbors uptime before and after
+ clear bgp sessions is different as the uptime must be changed once
+ bgp sessions are cleared using "clear ip bgp */clear bgp ipv6 *" cmd.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `router`: device under test
+
+ Usage
+ -----
+ result = clear_bgp_and_verify(tgen, topo, addr_type, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ peer_uptime_before_clear_bgp = {}
+ sleeptime = 3
+
+ # Verifying BGP convergence before bgp clear command
+ for retry in range(50):
+ # Waiting for BGP to converge
+ logger.info(
+ "Waiting for %s sec for BGP to converge on router" " %s...",
+ sleeptime,
+ router,
+ )
+ sleep(sleeptime)
+
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ try:
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ except TypeError:
+ bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"]
+
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = ipv4_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ # Peer up time dictionary
+ peer_uptime_before_clear_bgp[bgp_neighbor] = ipv6_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s before bgp" " clear", router)
+ break
+ else:
+ logger.info(
+ "BGP is not yet Converged for router %s " "before bgp clear", router
+ )
+ else:
+ errormsg = (
+ "TIMEOUT!! BGP is not converged in {} seconds for"
+ " router {}".format(retry * sleeptime, router)
+ )
+ return errormsg
+
+ # Clearing BGP
+ logger.info("Clearing BGP neighborship for router %s..", router)
+ for addr_type in bgp_addr_type.keys():
+ if addr_type == "ipv4":
+ if rid:
+ run_frr_cmd(rnode, "clear bgp ipv4 {}".format(rid))
+ else:
+ run_frr_cmd(rnode, "clear bgp ipv4 *")
+ elif addr_type == "ipv6":
+ if rid:
+ run_frr_cmd(rnode, "clear bgp ipv6 {}".format(rid))
+ else:
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
+ peer_uptime_after_clear_bgp = {}
+ # Verifying BGP convergence after bgp clear command
+ for retry in range(50):
+ # Waiting for BGP to converge
+ logger.info(
+ "Waiting for %s sec for BGP to converge on router" " %s...",
+ sleeptime,
+ router,
+ )
+ sleep(sleeptime)
+
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ try:
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ except TypeError:
+ bgp_addr_type = topo["routers"][router]["bgp"][0]["address_family"]
+
+ total_peer = 0
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ no_of_peer = 0
+ for addr_type in bgp_addr_type:
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ peer_uptime_after_clear_bgp[bgp_neighbor] = ipv4_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+ # Peer up time dictionary
+ peer_uptime_after_clear_bgp[bgp_neighbor] = ipv6_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s after bgp clear", router)
+ break
+ else:
+ logger.info(
+ "BGP is not yet Converged for router %s after" " bgp clear", router
+ )
+ else:
+ errormsg = (
+ "TIMEOUT!! BGP is not converged in {} seconds for"
+ " router {}".format(retry * sleeptime, router)
+ )
+ return errormsg
+
+ # Comparing peerUptimeEstablishedEpoch dictionaries
+ if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
+ logger.info("BGP neighborship is reset after clear BGP on router %s", router)
+ else:
+ errormsg = (
+ "BGP neighborship is not reset after clear bgp on router"
+ " {}".format(router)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
+ """
+ To verify BGP timer config, execute "show ip bgp neighbor json" command
+ and verify bgp timers with input_dict data.
+ To veirfy bgp timers functonality, shutting down peer interface
+ and verify BGP neighborship status.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type`: ip type, ipv4/ipv6
+ * `input_dict`: defines for which router, bgp timers needs to be verified
+
+ Usage:
+ # To verify BGP timers for neighbor r2 of router r1
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "bgp_neighbors":{
+ "r2":{
+ "keepalivetimer": 5,
+ "holddowntimer": 15,
+ }}}}}
+ result = verify_bgp_timers_and_functionality(tgen, topo, "ipv4",
+ input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ sleep(5)
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+
+ logger.info("Verifying bgp timers functionality, DUT is %s:", router)
+
+ show_ip_bgp_neighbor_json = run_frr_cmd(
+ rnode, "show ip bgp neighbor json", isjson=True
+ )
+
+ bgp_addr_type = input_dict[router]["bgp"]["address_family"]
+
+ for addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ keepalivetimer = peer_dict["keepalivetimer"]
+ holddowntimer = peer_dict["holddowntimer"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+ neighbor_intf = data[dest_link]["interface"]
+
+ # Verify HoldDownTimer for neighbor
+ bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][
+ "bgpTimerHoldTimeMsecs"
+ ]
+ if bgpHoldTimeMsecs != holddowntimer * 1000:
+ errormsg = (
+ "Verifying holddowntimer for bgp "
+ "neighbor {} under dut {}, found: {} "
+ "but expected: {}".format(
+ neighbor_ip,
+ router,
+ bgpHoldTimeMsecs,
+ holddowntimer * 1000,
+ )
+ )
+ return errormsg
+
+ # Verify KeepAliveTimer for neighbor
+ bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][
+ "bgpTimerKeepAliveIntervalMsecs"
+ ]
+ if bgpKeepAliveTimeMsecs != keepalivetimer * 1000:
+ errormsg = (
+ "Verifying keepalivetimer for bgp "
+ "neighbor {} under dut {}, found: {} "
+ "but expected: {}".format(
+ neighbor_ip,
+ router,
+ bgpKeepAliveTimeMsecs,
+ keepalivetimer * 1000,
+ )
+ )
+ return errormsg
+
+ ####################
+ # Shutting down peer interface after keepalive time and
+ # after some time bringing up peer interface.
+ # verifying BGP neighborship in (hold down-keep alive)
+ # time, it should not go down
+ ####################
+
+ # Wait till keep alive time
+ logger.info("=" * 20)
+ logger.info("Scenario 1:")
+ logger.info(
+ "Shutdown and bring up peer interface: %s "
+ "in keep alive time : %s sec and verify "
+ " BGP neighborship is intact in %s sec ",
+ neighbor_intf,
+ keepalivetimer,
+ (holddowntimer - keepalivetimer),
+ )
+ logger.info("=" * 20)
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+
+ # Shutting down peer ineterface
+ logger.info(
+ "Shutting down interface %s on router %s",
+ neighbor_intf,
+ bgp_neighbor,
+ )
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=False
+ )
+
+ # Bringing up peer interface
+ sleep(5)
+ logger.info(
+ "Bringing up interface %s on router %s..",
+ neighbor_intf,
+ bgp_neighbor,
+ )
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=True
+ )
+
+ # Verifying BGP neighborship is intact in
+ # (holddown - keepalive) time
+ for timer in range(
+ keepalivetimer, holddowntimer, int(holddowntimer / 3)
+ ):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp summary json", isjson=True
+ )
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == (holddowntimer - keepalivetimer):
+ if nh_state != "Established":
+ errormsg = (
+ "BGP neighborship has not gone "
+ "down in {} sec for neighbor {}".format(
+ timer, bgp_neighbor
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "BGP neighborship is intact in %s"
+ " sec for neighbor %s",
+ timer,
+ bgp_neighbor,
+ )
+
+ ####################
+ # Shutting down peer interface and verifying that BGP
+ # neighborship is going down in holddown time
+ ####################
+ logger.info("=" * 20)
+ logger.info("Scenario 2:")
+ logger.info(
+ "Shutdown peer interface: %s and verify BGP"
+ " neighborship has gone down in hold down "
+ "time %s sec",
+ neighbor_intf,
+ holddowntimer,
+ )
+ logger.info("=" * 20)
+
+ logger.info(
+ "Shutting down interface %s on router %s..",
+ neighbor_intf,
+ bgp_neighbor,
+ )
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=False
+ )
+
+ # Verifying BGP neighborship is going down in holddown time
+ for timer in range(
+ keepalivetimer,
+ (holddowntimer + keepalivetimer),
+ int(holddowntimer / 3),
+ ):
+ logger.info("Waiting for %s sec..", keepalivetimer)
+ sleep(keepalivetimer)
+ sleep(2)
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp summary json", isjson=True
+ )
+
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if timer == holddowntimer:
+ if nh_state == "Established":
+ errormsg = (
+ "BGP neighborship has not gone "
+ "down in {} sec for neighbor {}".format(
+ timer, bgp_neighbor
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "BGP neighborship has gone down in"
+ " %s sec for neighbor %s",
+ timer,
+ bgp_neighbor,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=16)
+def verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ static_routes,
+ rmap_name=None,
+ input_dict=None,
+ seq_id=None,
+ vrf=None,
+ nexthop=None,
+ expected=True,
+):
+ """
+ API will verify BGP attributes set by Route-map for given prefix and
+ DUT. it will run "show bgp ipv4/ipv6 {prefix_address} json" command
+ in DUT to verify BGP attributes set by route-map, Set attributes
+ values will be read from input_dict and verified with command output.
+
+ * `tgen`: topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `static_routes`: Static Routes for which BGP set attributes needs to be
+ verified
+ * `rmap_name`: route map name for which set criteria needs to be verified
+ * `input_dict`: defines for which router, AS numbers needs
+ * `seq_id`: sequence number of rmap, default is None
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ # To verify BGP attribute "localpref" set to 150 and "med" set to 30
+ for prefix 10.0.20.1/32 in router r3.
+ input_dict = {
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_list1": [
+ {
+ "action": "PERMIT",
+ "match": {"prefix_list": "pf_list_1"},
+ "set": {"localpref": 150, "med": 30}
+ }
+ ],
+ },
+ "as_path": "500 400"
+ }
+ }
+ static_routes (list) = ["10.0.20.1/32"]
+
+
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ logger.info("Verifying BGP set attributes for dut {}:".format(router))
+
+ for static_route in static_routes:
+ if vrf:
+ cmd = "show bgp vrf {} {} {} json".format(vrf, addr_type, static_route)
+ else:
+ cmd = "show bgp {} {} json".format(addr_type, static_route)
+ show_bgp_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ dict_to_test = []
+ tmp_list = []
+
+ dict_list = list(input_dict.values())[0]
+
+ if "route_maps" in dict_list:
+ for rmap_router in input_dict.keys():
+ for rmap, values in input_dict[rmap_router]["route_maps"].items():
+ if rmap == rmap_name:
+ dict_to_test = values
+ for rmap_dict in values:
+ if seq_id is not None:
+ if type(seq_id) is not list:
+ seq_id = [seq_id]
+
+ if "seq_id" in rmap_dict:
+ rmap_seq_id = rmap_dict["seq_id"]
+ for _seq_id in seq_id:
+ if _seq_id == rmap_seq_id:
+ tmp_list.append(rmap_dict)
+ if tmp_list:
+ dict_to_test = tmp_list
+
+ value = None
+ for rmap_dict in dict_to_test:
+ if "set" in rmap_dict:
+ for criteria in rmap_dict["set"].keys():
+ found = False
+ for path in show_bgp_json["paths"]:
+ if criteria not in path:
+ continue
+
+ if criteria == "aspath":
+ value = path[criteria]["string"]
+ else:
+ value = path[criteria]
+
+ if rmap_dict["set"][criteria] == value:
+ found = True
+ logger.info(
+ "Verifying BGP "
+ "attribute {} for"
+ " route: {} in "
+ "router: {}, found"
+ " expected value:"
+ " {}".format(
+ criteria,
+ static_route,
+ dut,
+ value,
+ )
+ )
+ break
+
+ if not found:
+ errormsg = (
+ "Failed: Verifying BGP "
+ "attribute {} for route:"
+ " {} in router: {}, "
+ " expected value: {} but"
+ " found: {}".format(
+ criteria,
+ static_route,
+ dut,
+ rmap_dict["set"][criteria],
+ value,
+ )
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=8)
+def verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, router, input_dict, attribute, expected=True
+):
+ """
+ API is to verify best path according to BGP attributes for given routes.
+ "show bgp ipv4/6 json" command will be run and verify best path according
+ to shortest as-path, highest local-preference and med, lowest weight and
+ route origin IGP>EGP>INCOMPLETE.
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using this attribute
+ * `input_dict`: defines different routes to calculate for which route
+ best path is selected
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ # To verify best path for routes 200.50.2.0/32 and 200.60.2.0/32 from
+ router r7 to router r1(DUT) as per shortest as-path attribute
+ input_dict = {
+ "r7": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": "200.50.2.0/32"
+ },
+ {
+ "network": "200.60.2.0/32"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ attribute = "locPrf"
+ result = verify_best_path_as_per_bgp_attribute(tgen, "ipv4", dut, \
+ input_dict, attribute)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+
+ # Verifying show bgp json
+ command = "show bgp"
+
+ sleep(2)
+ logger.info("Verifying router %s RIB for best path:", router)
+
+ static_route = False
+ advertise_network = False
+ for route_val in input_dict.values():
+ if "static_routes" in route_val:
+ static_route = True
+ networks = route_val["static_routes"]
+ else:
+ advertise_network = True
+ net_data = route_val["bgp"]["address_family"][addr_type]["unicast"]
+ networks = net_data["advertise_networks"]
+
+ for network in networks:
+ _network = network["network"]
+ no_of_ip = network.setdefault("no_of_ip", 1)
+ vrf = network.setdefault("vrf", None)
+
+ if vrf:
+ cmd = "{} vrf {}".format(command, vrf)
+ else:
+ cmd = command
+
+ cmd = "{} {}".format(cmd, addr_type)
+ cmd = "{} json".format(cmd)
+ sh_ip_bgp_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ routes = generate_ips(_network, no_of_ip)
+ for route in routes:
+ route = str(ipaddress.ip_network(frr_unicode(route)))
+
+ if route in sh_ip_bgp_json["routes"]:
+ route_attributes = sh_ip_bgp_json["routes"][route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute[attribute]
+
+ # AS_PATH attribute
+ if attribute == "path":
+ # Find next_hop for the route have minimum as_path
+ _next_hop = min(
+ attribute_dict, key=lambda x: len(set(attribute_dict[x]))
+ )
+ compare = "SHORTEST"
+
+ # LOCAL_PREF attribute
+ elif attribute == "locPrf":
+ # Find next_hop for the route have highest local preference
+ _next_hop = max(
+ attribute_dict, key=(lambda k: attribute_dict[k])
+ )
+ compare = "HIGHEST"
+
+ # WEIGHT attribute
+ elif attribute == "weight":
+ # Find next_hop for the route have highest weight
+ _next_hop = max(
+ attribute_dict, key=(lambda k: attribute_dict[k])
+ )
+ compare = "HIGHEST"
+
+ # ORIGIN attribute
+ elif attribute == "origin":
+ # Find next_hop for the route have IGP as origin, -
+ # - rule is IGP>EGP>INCOMPLETE
+ _next_hop = [
+ key
+ for (key, value) in attribute_dict.items()
+ if value == "IGP"
+ ][0]
+ compare = ""
+
+ # MED attribute
+ elif attribute == "metric":
+ # Find next_hop for the route have LOWEST MED
+ _next_hop = min(
+ attribute_dict, key=(lambda k: attribute_dict[k])
+ )
+ compare = "LOWEST"
+
+ # Show ip route
+ if addr_type == "ipv4":
+ command_1 = "show ip route"
+ else:
+ command_1 = "show ipv6 route"
+
+ if vrf:
+ cmd = "{} vrf {} json".format(command_1, vrf)
+ else:
+ cmd = "{} json".format(command_1)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..".format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if (
+ rib_routes_json[route][0]["nexthops"][0]["ip"]
+ in attribute_dict
+ ):
+ nh_found = True
+ else:
+ errormsg = (
+ "Incorrect Nexthop for BGP route {} in "
+ "RIB of router {}, Expected: {}, Found:"
+ " {}\n".format(
+ route,
+ router,
+ rib_routes_json[route][0]["nexthops"][0]["ip"],
+ _next_hop,
+ )
+ )
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info(
+ "Best path for prefix: %s with next_hop: %s is "
+ "installed according to %s %s: (%s) in RIB of "
+ "router %s",
+ route,
+ _next_hop,
+ compare,
+ attribute,
+ attribute_dict[_next_hop],
+ router,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=10)
+def verify_best_path_as_per_admin_distance(
+ tgen, addr_type, router, input_dict, attribute, expected=True, vrf=None
+):
+ """
+ API is to verify best path according to admin distance for given
+ route. "show ip/ipv6 route json" command will be run and verify
+ best path accoring to shortest admin distanc.
+
+ Parameters
+ ----------
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `tgen` : topogen object
+ * `attribute` : calculate best path using admin distance
+ * `input_dict`: defines different routes with different admin distance
+ to calculate for which route best path is selected
+ * `expected` : expected results from API, by-default True
+ * `vrf`: Pass vrf name check for perticular vrf.
+
+ Usage
+ -----
+ # To verify best path for route 200.50.2.0/32 from router r2 to
+ router r1(DUT) as per shortest admin distance which is 60.
+ input_dict = {
+ "r2": {
+ "static_routes": [{"network": "200.50.2.0/32", \
+ "admin_distance": 80, "next_hop": "10.0.0.14"},
+ {"network": "200.50.2.0/32", \
+ "admin_distance": 60, "next_hop": "10.0.0.18"}]
+ }}
+ attribute = "locPrf"
+ result = verify_best_path_as_per_admin_distance(tgen, "ipv4", dut, \
+ input_dict, attribute):
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
+ if router not in router_list:
+ return False
+
+ rnode = tgen.routers()[router]
+
+ sleep(5)
+ logger.info("Verifying router %s RIB for best path:", router)
+
+ # Show ip route cmd
+ if addr_type == "ipv4":
+ command = "show ip route"
+ else:
+ command = "show ipv6 route"
+
+ if vrf:
+ command = "{} vrf {} json".format(command, vrf)
+ else:
+ command = "{} json".format(command)
+
+ for routes_from_router in input_dict.keys():
+ sh_ip_route_json = router_list[routes_from_router].vtysh_cmd(
+ command, isjson=True
+ )
+ networks = input_dict[routes_from_router]["static_routes"]
+ for network in networks:
+ route = network["network"]
+
+ route_attributes = sh_ip_route_json[route]
+ _next_hop = None
+ compare = None
+ attribute_dict = {}
+ for route_attribute in route_attributes:
+ next_hops = route_attribute["nexthops"]
+ for next_hop in next_hops:
+ next_hop_ip = next_hop["ip"]
+ attribute_dict[next_hop_ip] = route_attribute["distance"]
+
+ # Find next_hop for the route have LOWEST Admin Distance
+ _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k]))
+ compare = "LOWEST"
+
+ # Show ip route
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if not bool(rib_routes_json):
+ errormsg = "No route found in RIB of router {}..".format(router)
+ return errormsg
+
+ st_found = False
+ nh_found = False
+ # Find best is installed in RIB
+ if route in rib_routes_json:
+ st_found = True
+ # Verify next_hop in rib_routes_json
+ if [
+ nh
+ for nh in rib_routes_json[route][0]["nexthops"]
+ if nh["ip"] == _next_hop
+ ]:
+ nh_found = True
+ else:
+ errormsg = (
+ "Nexthop {} is Missing for BGP route {}"
+ " in RIB of router {}\n".format(_next_hop, route, router)
+ )
+ return errormsg
+
+ if st_found and nh_found:
+ logger.info(
+ "Best path for prefix: %s is installed according"
+ " to %s %s: (%s) in RIB of router %s",
+ route,
+ compare,
+ attribute,
+ attribute_dict[_next_hop],
+ router,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=30)
+def verify_bgp_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=None,
+ aspath=None,
+ multi_nh=None,
+ expected=True,
+):
+ """
+ This API is to verify whether bgp rib has any
+ matching route for a nexthop.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: input dut router name
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict` : input dict, has details of static routes
+ * `next_hop`[optional]: next_hop which needs to be verified,
+ default = static
+ * 'aspath'[optional]: aspath which needs to be verified
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = 'r1'
+ next_hop = "192.168.1.10"
+ input_dict = topo['routers']
+ aspath = "100 200 300"
+ result = verify_bgp_rib(tgen, addr_type, dut, tgen, input_dict,
+ next_hop, aspath)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: verify_bgp_rib()")
+
+ router_list = tgen.routers()
+ additional_nexthops_in_required_nhs = []
+ list1 = []
+ list2 = []
+ found_hops = []
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ # Verifying RIB routes
+ command = "show bgp"
+
+ # Static routes
+ sleep(2)
+ logger.info("Checking router {} BGP RIB:".format(dut))
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ found_routes = []
+ missing_routes = []
+ st_found = False
+ nh_found = False
+
+ vrf = static_route.setdefault("vrf", None)
+ community = static_route.setdefault("community", None)
+ largeCommunity = static_route.setdefault("largeCommunity", None)
+
+ if vrf:
+ cmd = "{} vrf {} {}".format(command, vrf, addr_type)
+
+ if community:
+ cmd = "{} community {}".format(cmd, community)
+
+ if largeCommunity:
+ cmd = "{} large-community {}".format(cmd, largeCommunity)
+ else:
+ cmd = "{} {}".format(command, addr_type)
+
+ cmd = "{} json".format(cmd)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) == False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+
+ network = static_route["network"]
+
+ if "no_of_ip" in static_route:
+ no_of_ip = static_route["no_of_ip"]
+ else:
+ no_of_ip = 1
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json["routes"]:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if next_hop and multi_nh and st_found:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ list1 = next_hop
+
+ for mnh in range(
+ 0, len(rib_routes_json["routes"][st_rt])
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in rib_routes_json["routes"][
+ st_rt
+ ][mnh]["nexthops"]
+ ]
+ )
+ for mnh in found_hops:
+ for each_nh_in_multipath in mnh:
+ list2.append(each_nh_in_multipath)
+ if found_hops[0]:
+ missing_list_of_nexthops = set(list2).difference(
+ list1
+ )
+ additional_nexthops_in_required_nhs = set(
+ list1
+ ).difference(list2)
+
+ if list2:
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ else:
+ nh_found = True
+
+ elif next_hop and multi_nh is None:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ list1 = next_hop
+ found_hops = [
+ rib_r["ip"]
+ for rib_r in rib_routes_json["routes"][st_rt][0][
+ "nexthops"
+ ]
+ ]
+ list2 = found_hops
+ missing_list_of_nexthops = set(list2).difference(list1)
+ additional_nexthops_in_required_nhs = set(
+ list1
+ ).difference(list2)
+
+ if list2:
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+
+ if aspath:
+ found_paths = rib_routes_json["routes"][st_rt][0][
+ "path"
+ ]
+ if aspath == found_paths:
+ aspath_found = True
+ logger.info(
+ "Found AS path {} for route"
+ " {} in RIB of router "
+ "{}\n".format(aspath, st_rt, dut)
+ )
+ else:
+ errormsg = (
+ "AS Path {} is missing for route"
+ "for route {} in RIB of router {}\n".format(
+ aspath, st_rt, dut
+ )
+ )
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info(
+ "Found next_hop {} for all bgp"
+ " routes in RIB of"
+ " router {}\n".format(next_hop, router)
+ )
+
+ if len(missing_routes) > 0:
+ errormsg = (
+ "Missing route in RIB of router {}, "
+ "routes: {}\n".format(dut, missing_routes)
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "Verified routes in router {} BGP RIB, "
+ "found routes are: {} \n".format(dut, found_routes)
+ )
+ continue
+
+ if "bgp" not in input_dict[routerInput]:
+ continue
+
+ # Advertise networks
+ bgp_data_list = input_dict[routerInput]["bgp"]
+
+ if type(bgp_data_list) is not list:
+ bgp_data_list = [bgp_data_list]
+
+ for bgp_data in bgp_data_list:
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {} {}".format(command, vrf_id, addr_type)
+ else:
+ cmd = "{} {}".format(command, addr_type)
+
+ cmd = "{} json".format(cmd)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) == False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+
+ bgp_net_advertise = bgp_data["address_family"][addr_type]["unicast"]
+ advertise_network = bgp_net_advertise.setdefault(
+ "advertise_networks", []
+ )
+
+ for advertise_network_dict in advertise_network:
+ found_routes = []
+ missing_routes = []
+ found = False
+
+ network = advertise_network_dict["network"]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_network)
+
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json["routes"]:
+ found = True
+ found_routes.append(st_rt)
+ else:
+ found = False
+ missing_routes.append(st_rt)
+
+ if len(missing_routes) > 0:
+ errormsg = (
+ "Missing route in BGP RIB of router {},"
+ " are: {}\n".format(dut, missing_routes)
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "Verified routes in router {} BGP RIB, found "
+ "routes are: {}\n".format(dut, found_routes)
+ )
+
+ logger.debug("Exiting lib API: verify_bgp_rib()")
+ return True
+
+
+@retry(retry_timeout=10)
+def verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut, peer, expected=True
+):
+ """
+ This API is to verify verify_graceful_restart configuration of DUT and
+ cross verify the same from the peer bgp routerrouter.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: input peer router name
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_graceful_restart(tgen, topo, addr_type, input_dict,
+ dut = "r1", peer = 'r2')
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ try:
+ bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+ except TypeError:
+ bgp_addr_type = topo["routers"][dut]["bgp"][0]["address_family"]
+
+ # bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = None
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ lmode = None
+ rmode = None
+ # Local GR mode
+ if "address_family" in input_dict[dut]["bgp"]:
+ bgp_neighbors = input_dict[dut]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][peer]["dest_link"]
+
+ for dest_link, data in bgp_neighbors.items():
+ if (
+ "graceful-restart-helper" in data
+ and data["graceful-restart-helper"]
+ ):
+ lmode = "Helper"
+ elif "graceful-restart" in data and data["graceful-restart"]:
+ lmode = "Restart"
+ elif (
+ "graceful-restart-disable" in data
+ and data["graceful-restart-disable"]
+ ):
+ lmode = "Disable"
+ else:
+ lmode = None
+
+ if lmode is None:
+ if "graceful-restart" in input_dict[dut]["bgp"]:
+ if (
+ "graceful-restart" in input_dict[dut]["bgp"]["graceful-restart"]
+ and input_dict[dut]["bgp"]["graceful-restart"][
+ "graceful-restart"
+ ]
+ ):
+ lmode = "Restart*"
+ elif (
+ "graceful-restart-disable"
+ in input_dict[dut]["bgp"]["graceful-restart"]
+ and input_dict[dut]["bgp"]["graceful-restart"][
+ "graceful-restart-disable"
+ ]
+ ):
+ lmode = "Disable*"
+ else:
+ lmode = "Helper*"
+ else:
+ lmode = "Helper*"
+
+ if lmode == "Disable" or lmode == "Disable*":
+ return True
+
+ # Remote GR mode
+ if "address_family" in input_dict[peer]["bgp"]:
+ bgp_neighbors = input_dict[peer]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][dut]["dest_link"]
+
+ for dest_link, data in bgp_neighbors.items():
+ if (
+ "graceful-restart-helper" in data
+ and data["graceful-restart-helper"]
+ ):
+ rmode = "Helper"
+ elif "graceful-restart" in data and data["graceful-restart"]:
+ rmode = "Restart"
+ elif (
+ "graceful-restart-disable" in data
+ and data["graceful-restart-disable"]
+ ):
+ rmode = "Disable"
+ else:
+ rmode = None
+
+ if rmode is None:
+ if "graceful-restart" in input_dict[peer]["bgp"]:
+ if (
+ "graceful-restart"
+ in input_dict[peer]["bgp"]["graceful-restart"]
+ and input_dict[peer]["bgp"]["graceful-restart"][
+ "graceful-restart"
+ ]
+ ):
+ rmode = "Restart"
+ elif (
+ "graceful-restart-disable"
+ in input_dict[peer]["bgp"]["graceful-restart"]
+ and input_dict[peer]["bgp"]["graceful-restart"][
+ "graceful-restart-disable"
+ ]
+ ):
+ rmode = "Disable"
+ else:
+ rmode = "Helper"
+ else:
+ rmode = "Helper"
+
+ if show_bgp_graceful_json_out["localGrMode"] == lmode:
+ logger.info(
+ "[DUT: {}]: localGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["localGrMode"]
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}]: localGrMode is not correct"
+ " Expected: {}, Found: {}".format(
+ dut, lmode, show_bgp_graceful_json_out["localGrMode"]
+ )
+ )
+ return errormsg
+
+ if show_bgp_graceful_json_out["remoteGrMode"] == rmode:
+ logger.info(
+ "[DUT: {}]: remoteGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ elif (
+ show_bgp_graceful_json_out["remoteGrMode"] == "NotApplicable"
+ and rmode == "Disable"
+ ):
+ logger.info(
+ "[DUT: {}]: remoteGrMode : {} ".format(
+ dut, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}]: remoteGrMode is not correct"
+ " Expected: {}, Found: {}".format(
+ dut, rmode, show_bgp_graceful_json_out["remoteGrMode"]
+ )
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=10)
+def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
+ """
+ This API is to verify r_bit in the BGP gr capability advertised
+ by the neighbor router
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ if "rBit" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["rBit"]:
+ logger.info("[DUT: {}]: Rbit true {}".format(dut, neighbor_ip))
+ else:
+ errormsg = "[DUT: {}]: Rbit false {}".format(dut, neighbor_ip)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=10)
+def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
+ """
+ This API is to verify EOR
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of DUT, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ Usage
+ -----
+ input_dict = {
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_eor(tgen, topo, addr_type, input_dict, dut, peer)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: %s]: Checking bgp graceful-restart" " show o/p %s",
+ dut,
+ neighbor_ip,
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info("[DUT: %s]: Neighbor ip matched %s", dut, neighbor_ip)
+ else:
+ errormsg = "[DUT: %s]: Neighbor ip is NOT matched %s" % (
+ dut,
+ neighbor_ip,
+ )
+ return errormsg
+
+ if addr_type == "ipv4":
+ afi = "ipv4Unicast"
+ elif addr_type == "ipv6":
+ afi = "ipv6Unicast"
+ else:
+ errormsg = "Address type %s is not supported" % (addr_type)
+ return errormsg
+
+ eor_json = show_bgp_graceful_json_out[afi]["endOfRibStatus"]
+ if "endOfRibSend" in eor_json:
+ if eor_json["endOfRibSend"]:
+ logger.info(
+ "[DUT: %s]: EOR Send true for %s " "%s", dut, neighbor_ip, afi
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR Send false for %s" " %s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ return errormsg
+
+ if "endOfRibRecv" in eor_json:
+ if eor_json["endOfRibRecv"]:
+ logger.info(
+ "[DUT: %s]: EOR Recv true %s " "%s", dut, neighbor_ip, afi
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR Recv false %s " "%s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ return errormsg
+
+ if "endOfRibSentAfterUpdate" in eor_json:
+ if eor_json["endOfRibSentAfterUpdate"]:
+ logger.info(
+ "[DUT: %s]: EOR SendTime true for %s" " %s",
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ else:
+ errormsg = "[DUT: %s]: EOR SendTime false for " "%s %s" % (
+ dut,
+ neighbor_ip,
+ afi,
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=8)
+def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
+ """
+ This API is to verify f_bit in the BGP gr capability advertised
+ by the neighbor router
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test, for
+ which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link":{
+ "r1": {
+ "graceful-restart": True
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_f_bit(tgen, topo, 'ipv4', input_dict, dut, peer)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip NOT a match {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ if "ipv4Unicast" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["ipv4Unicast"]["fBit"]:
+ logger.info(
+ "[DUT: {}]: Fbit True for {} IPv4"
+ " Unicast".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Fbit False for {} IPv4" " Unicast".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ elif "ipv6Unicast" in show_bgp_graceful_json_out:
+ if show_bgp_graceful_json_out["ipv6Unicast"]["fBit"]:
+ logger.info(
+ "[DUT: {}]: Fbit True for {} IPv6"
+ " Unicast".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Fbit False for {} IPv6" " Unicast".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+ else:
+ show_bgp_graceful_json_out["ipv4Unicast"]
+ show_bgp_graceful_json_out["ipv6Unicast"]
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=10)
+def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer):
+ """
+ This API is to verify graceful restart timers, configured and received
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `input_dict`: input dictionary, have details of Device Under Test,
+ for which user wants to test the data
+ * `dut`: input dut router name
+ * `peer`: peer name
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ # Configure graceful-restart
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "bgp_neighbors": {
+ "r3": {
+ "graceful-restart": "graceful-restart-helper"
+ }
+ },
+ "gracefulrestart": ["restart-time 150"]
+ }
+ },
+ "r3": {
+ "bgp": {
+ "bgp_neighbors": {
+ "r1": {
+ "graceful-restart": "graceful-restart"
+ }
+ }
+ }
+ }
+ }
+
+ result = verify_graceful_restart_timers(tgen, topo, 'ipv4', input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router, rnode in tgen.routers().items():
+ if router != dut:
+ continue
+
+ bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+
+ if addr_type in bgp_addr_type:
+ if not check_address_types(addr_type):
+ continue
+
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor, peer_data in bgp_neighbors.items():
+ if bgp_neighbor != peer:
+ continue
+
+ for dest_link, peer_dict in peer_data["dest_link"].items():
+ data = topo["routers"][bgp_neighbor]["links"]
+
+ if dest_link in data:
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show"
+ " o/p {}".format(dut, neighbor_ip)
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(
+ addr_type, neighbor_ip
+ ),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info(
+ "[DUT: {}]: Neighbor ip matched {}".format(dut, neighbor_ip)
+ )
+ else:
+ errormsg = "[DUT: {}]: Neighbor ip is NOT matched {}".format(
+ dut, neighbor_ip
+ )
+ return errormsg
+
+ # Graceful-restart timer
+ if "graceful-restart" in input_dict[peer]["bgp"]:
+ if "timer" in input_dict[peer]["bgp"]["graceful-restart"]:
+ for rs_timer, value in input_dict[peer]["bgp"]["graceful-restart"][
+ "timer"
+ ].items():
+ if rs_timer == "restart-time":
+ receivedTimer = value
+ if (
+ show_bgp_graceful_json_out["timers"][
+ "receivedRestartTimer"
+ ]
+ == receivedTimer
+ ):
+ logger.info(
+ "receivedRestartTimer is {}"
+ " on {} from peer {}".format(
+ receivedTimer, router, peer
+ )
+ )
+ else:
+ errormsg = (
+ "receivedRestartTimer is not"
+ " as expected {}".format(receivedTimer)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=8)
+def verify_gr_address_family(
+ tgen, topo, addr_type, addr_family, dut, peer, expected=True
+):
+ """
+ This API is to verify gr_address_family in the BGP gr capability advertised
+ by the neighbor router
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: input json file data
+ * `addr_type` : ip type ipv4/ipv6
+ * `addr_type` : ip type IPV4 Unicast/IPV6 Unicast
+ * `dut`: input dut router name
+ * `peer`: input peer router to check
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+
+ result = verify_gr_address_family(tgen, topo, "ipv4", "ipv4Unicast", "r1", "r3")
+
+ Returns
+ -------
+ errormsg(str) or None
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if not check_address_types(addr_type):
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return
+
+ routers = tgen.routers()
+ if dut not in routers:
+ return "{} not in routers".format(dut)
+
+ rnode = routers[dut]
+ bgp_addr_type = topo["routers"][dut]["bgp"]["address_family"]
+
+ if addr_type not in bgp_addr_type:
+ return "{} not in bgp_addr_types".format(addr_type)
+
+ if peer not in bgp_addr_type[addr_type]["unicast"]["neighbor"]:
+ return "{} not a peer of {} over {}".format(peer, dut, addr_type)
+
+ nbr_links = topo["routers"][peer]["links"]
+ if dut not in nbr_links or addr_type not in nbr_links[dut]:
+ return "peer {} missing back link to {} over {}".format(peer, dut, addr_type)
+
+ neighbor_ip = nbr_links[dut][addr_type].split("/")[0]
+
+ logger.info(
+ "[DUT: {}]: Checking bgp graceful-restart show o/p {} for {}".format(
+ dut, neighbor_ip, addr_family
+ )
+ )
+
+ show_bgp_graceful_json = run_frr_cmd(
+ rnode,
+ "show bgp {} neighbor {} graceful-restart json".format(addr_type, neighbor_ip),
+ isjson=True,
+ )
+
+ show_bgp_graceful_json_out = show_bgp_graceful_json[neighbor_ip]
+
+ if show_bgp_graceful_json_out["neighborAddr"] == neighbor_ip:
+ logger.info("Neighbor ip matched {}".format(neighbor_ip))
+ else:
+ errormsg = "Neighbor ip NOT a match {}".format(neighbor_ip)
+ return errormsg
+
+ if addr_family == "ipv4Unicast":
+ if "ipv4Unicast" in show_bgp_graceful_json_out:
+ logger.info("ipv4Unicast present for {} ".format(neighbor_ip))
+ return True
+ else:
+ errormsg = "ipv4Unicast NOT present for {} ".format(neighbor_ip)
+ return errormsg
+
+ elif addr_family == "ipv6Unicast":
+ if "ipv6Unicast" in show_bgp_graceful_json_out:
+ logger.info("ipv6Unicast present for {} ".format(neighbor_ip))
+ return True
+ else:
+ errormsg = "ipv6Unicast NOT present for {} ".format(neighbor_ip)
+ return errormsg
+ else:
+ errormsg = "Aaddress family: {} present for {} ".format(
+ addr_family, neighbor_ip
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+
+@retry(retry_timeout=12)
+def verify_attributes_for_evpn_routes(
+ tgen,
+ topo,
+ dut,
+ input_dict,
+ rd=None,
+ rt=None,
+ ethTag=None,
+ ipLen=None,
+ rd_peer=None,
+ rt_peer=None,
+ expected=True,
+):
+ """
+ API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1"
+ command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `input_dict`: having details like - for which route, rd value
+ needs to be verified
+ * `rd` : route distinguisher
+ * `rt` : route target
+ * `ethTag` : Ethernet Tag
+ * `ipLen` : IP prefix length
+ * `rd_peer` : Peer name from which RD will be auto-generated
+ * `rt_peer` : Peer name from which RT will be auto-generated
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict_1 = {
+ "r1": {
+ "static_routes": [{
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED"
+ }]
+ }
+ }
+
+ result = verify_attributes_for_evpn_routes(tgen, topo,
+ input_dict, rd = "10.0.0.33:1")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for router in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ if "static_routes" in input_dict[router]:
+ for static_route in input_dict[router]["static_routes"]:
+ network = static_route["network"]
+
+ if "vrf" in static_route:
+ vrf = static_route["vrf"]
+
+ if type(network) is not list:
+ network = [network]
+
+ for route in network:
+ route = route.split("/")[0]
+ _addr_type = validate_ip_address(route)
+ if "v4" in _addr_type:
+ input_afi = "v4"
+ elif "v6" in _addr_type:
+ input_afi = "v6"
+
+ cmd = "show bgp l2vpn evpn {} json".format(route)
+ evpn_rd_value_json = run_frr_cmd(rnode, cmd, isjson=True)
+ if not bool(evpn_rd_value_json):
+ errormsg = "No output for '{}' cli".format(cmd)
+ return errormsg
+
+ if rd is not None and rd != "auto":
+ logger.info(
+ "[DUT: %s]: Verifying rd value for " "evpn route %s:",
+ dut,
+ route,
+ )
+
+ if rd in evpn_rd_value_json:
+ rd_value_json = evpn_rd_value_json[rd]
+ if rd_value_json["rd"] != rd:
+ errormsg = (
+ "[DUT: %s] Failed: Verifying"
+ " RD value for EVPN route: %s"
+ "[FAILED]!!, EXPECTED : %s "
+ " FOUND : %s"
+ % (dut, route, rd, rd_value_json["rd"])
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: Verifying RD value for"
+ " EVPN route: %s [PASSED]|| "
+ "Found Expected: %s",
+ dut,
+ route,
+ rd,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] RD : %s is not present"
+ " in cli json output" % (dut, rd)
+ )
+ return errormsg
+
+ if rd == "auto":
+ logger.info(
+ "[DUT: %s]: Verifying auto-rd value for " "evpn route %s:",
+ dut,
+ route,
+ )
+
+ if rd_peer:
+ index = 1
+ vni_dict = {}
+
+ rnode = tgen.routers()[rd_peer]
+ vrfs = topo["routers"][rd_peer]["vrfs"]
+ for vrf_dict in vrfs:
+ vni_dict[vrf_dict["name"]] = index
+ index += 1
+
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp vrf all summary json", isjson=True
+ )
+
+ # Verifying output dictionary show_bgp_json is empty
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ show_bgp_json_vrf = show_bgp_json[vrf]
+ for afi, afi_data in show_bgp_json_vrf.items():
+ if input_afi not in afi:
+ continue
+ router_id = afi_data["routerId"]
+
+ found = False
+ rd = "{}:{}".format(router_id, vni_dict[vrf])
+ for _rd, rd_value_json in evpn_rd_value_json.items():
+ if (
+ str(rd_value_json["rd"].split(":")[0])
+ != rd.split(":")[0]
+ ):
+ continue
+
+ if int(rd_value_json["rd"].split(":")[1]) > 0:
+ found = True
+
+ if found:
+ logger.info(
+ "[DUT %s]: Verifying RD value for"
+ " EVPN route: %s "
+ "Found Expected: %s",
+ dut,
+ route,
+ rd_value_json["rd"],
+ )
+ return True
+ else:
+ errormsg = (
+ "[DUT: %s] Failed: Verifying"
+ " RD value for EVPN route: %s"
+ " FOUND : %s" % (dut, route, rd_value_json["rd"])
+ )
+ return errormsg
+
+ if rt == "auto":
+ vni_dict = {}
+ logger.info(
+ "[DUT: %s]: Verifying auto-rt value for " "evpn route %s:",
+ dut,
+ route,
+ )
+
+ if rt_peer:
+ rnode = tgen.routers()[rt_peer]
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp vrf all summary json", isjson=True
+ )
+
+ # Verifying output dictionary show_bgp_json is empty
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
+
+ show_bgp_json_vrf = show_bgp_json[vrf]
+ for afi, afi_data in show_bgp_json_vrf.items():
+ if input_afi not in afi:
+ continue
+ as_num = afi_data["as"]
+
+ show_vrf_vni_json = run_frr_cmd(
+ rnode, "show vrf vni json", isjson=True
+ )
+
+ vrfs = show_vrf_vni_json["vrfs"]
+ for vrf_dict in vrfs:
+ if vrf_dict["vrf"] == vrf:
+ vni_dict[vrf_dict["vrf"]] = str(vrf_dict["vni"])
+
+ # If AS is 4 byte, FRR uses only the lower 2 bytes of ASN+VNI
+ # for auto derived RT value.
+ if as_num > 65535:
+ as_bin = bin(as_num)
+ as_bin = as_bin[-16:]
+ as_num = int(as_bin, 2)
+
+ rt = "{}:{}".format(str(as_num), vni_dict[vrf])
+ for _rd, route_data in evpn_rd_value_json.items():
+ if route_data["ip"] == route:
+ for rt_data in route_data["paths"]:
+ if vni_dict[vrf] == rt_data["vni"]:
+ rt_string = rt_data["extendedCommunity"][
+ "string"
+ ]
+ rt_input = "RT:{}".format(rt)
+ if rt_input not in rt_string:
+ errormsg = (
+ "[DUT: %s] Failed:"
+ " Verifying RT "
+ "value for EVPN "
+ " route: %s"
+ "[FAILED]!!,"
+ " EXPECTED : %s "
+ " FOUND : %s"
+ % (dut, route, rt_input, rt_string)
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: Verifying "
+ "RT value for EVPN "
+ "route: %s [PASSED]||"
+ "Found Expected: %s",
+ dut,
+ route,
+ rt_input,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] Route : %s is not"
+ " present in cli json output" % (dut, route)
+ )
+ return errormsg
+
+ if rt is not None and rt != "auto":
+ logger.info(
+ "[DUT: %s]: Verifying rt value for " "evpn route %s:",
+ dut,
+ route,
+ )
+
+ if type(rt) is not list:
+ rt = [rt]
+
+ for _rt in rt:
+ for _rd, route_data in evpn_rd_value_json.items():
+ if route_data["ip"] == route:
+ for rt_data in route_data["paths"]:
+ rt_string = rt_data["extendedCommunity"][
+ "string"
+ ]
+ rt_input = "RT:{}".format(_rt)
+ if rt_input not in rt_string:
+ errormsg = (
+ "[DUT: %s] Failed: "
+ "Verifying RT value "
+ "for EVPN route: %s"
+ "[FAILED]!!,"
+ " EXPECTED : %s "
+ " FOUND : %s"
+ % (dut, route, rt_input, rt_string)
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: Verifying RT"
+ " value for EVPN route:"
+ " %s [PASSED]|| "
+ "Found Expected: %s",
+ dut,
+ route,
+ rt_input,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] Route : %s is not"
+ " present in cli json output" % (dut, route)
+ )
+ return errormsg
+
+ if ethTag is not None:
+ logger.info(
+ "[DUT: %s]: Verifying ethTag value for " "evpn route :", dut
+ )
+
+ for _rd, route_data in evpn_rd_value_json.items():
+ if route_data["ip"] == route:
+ if route_data["ethTag"] != ethTag:
+ errormsg = (
+ "[DUT: %s] RD: %s, Failed: "
+ "Verifying ethTag value "
+ "for EVPN route: %s"
+ "[FAILED]!!,"
+ " EXPECTED : %s "
+ " FOUND : %s"
+ % (
+ dut,
+ _rd,
+ route,
+ ethTag,
+ route_data["ethTag"],
+ )
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: RD: %s, Verifying "
+ "ethTag value for EVPN route:"
+ " %s [PASSED]|| "
+ "Found Expected: %s",
+ dut,
+ _rd,
+ route,
+ ethTag,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] RD: %s, Route : %s "
+ "is not present in cli json "
+ "output" % (dut, _rd, route)
+ )
+ return errormsg
+
+ if ipLen is not None:
+ logger.info(
+ "[DUT: %s]: Verifying ipLen value for " "evpn route :", dut
+ )
+
+ for _rd, route_data in evpn_rd_value_json.items():
+ if route_data["ip"] == route:
+ if route_data["ipLen"] != int(ipLen):
+ errormsg = (
+ "[DUT: %s] RD: %s, Failed: "
+ "Verifying ipLen value "
+ "for EVPN route: %s"
+ "[FAILED]!!,"
+ " EXPECTED : %s "
+ " FOUND : %s"
+ % (dut, _rd, route, ipLen, route_data["ipLen"])
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: RD: %s, Verifying "
+ "ipLen value for EVPN route:"
+ " %s [PASSED]|| "
+ "Found Expected: %s",
+ dut,
+ _rd,
+ route,
+ ipLen,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] RD: %s, Route : %s "
+ "is not present in cli json "
+ "output " % (dut, _rd, route)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return False
+
+
+@retry(retry_timeout=10)
+def verify_evpn_routes(
+ tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None, expected=True
+):
+ """
+ API to verify evpn routes using "sh bgp l2vpn evpn"
+ command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `input_dict`: having details like - for which route, rd value
+ needs to be verified
+ * `route_type` : Route type 5 is supported as of now
+ * `EthTag` : Ethernet tag, by-default is 0
+ * `next_hop` : Prefered nexthop for the evpn routes
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict_1 = {
+ "r1": {
+ "static_routes": [{
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED"
+ }]
+ }
+ }
+ result = verify_evpn_routes(tgen, topo, input_dict)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying evpn routes: ", dut)
+
+ if "static_routes" in input_dict[router]:
+ for static_route in input_dict[router]["static_routes"]:
+ network = static_route["network"]
+
+ if type(network) is not list:
+ network = [network]
+
+ missing_routes = {}
+ for route in network:
+ rd_keys = 0
+ ip_len = route.split("/")[1]
+ route = route.split("/")[0]
+
+ prefix = "[{}]:[{}]:[{}]:[{}]".format(
+ routeType, EthTag, ip_len, route
+ )
+
+ cmd = "show bgp l2vpn evpn route json"
+ evpn_value_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if not bool(evpn_value_json):
+ errormsg = "No output for '{}' cli".format(cmd)
+ return errormsg
+
+ if evpn_value_json["numPrefix"] == 0:
+ errormsg = "[DUT: %s]: No EVPN prefixes exist" % (dut)
+ return errormsg
+
+ for key, route_data_json in evpn_value_json.items():
+ if type(route_data_json) is dict:
+ rd_keys += 1
+ if prefix not in route_data_json:
+ missing_routes[key] = prefix
+
+ if rd_keys == len(missing_routes.keys()):
+ errormsg = (
+ "[DUT: %s]: "
+ "Missing EVPN routes: "
+ "%s [FAILED]!!" % (dut, list(set(missing_routes.values())))
+ )
+ return errormsg
+
+ for key, route_data_json in evpn_value_json.items():
+ if type(route_data_json) is dict:
+ if prefix not in route_data_json:
+ continue
+
+ for paths in route_data_json[prefix]["paths"]:
+ for path in paths:
+ if path["routeType"] != routeType:
+ errormsg = (
+ "[DUT: %s]: "
+ "Verifying routeType "
+ "for EVPN route: %s "
+ "[FAILED]!! "
+ "Expected: %s, "
+ "Found: %s"
+ % (
+ dut,
+ prefix,
+ routeType,
+ path["routeType"],
+ )
+ )
+ return errormsg
+
+ elif next_hop:
+ for nh_dict in path["nexthops"]:
+ if nh_dict["ip"] != next_hop:
+ errormsg = (
+ "[DUT: %s]: "
+ "Verifying "
+ "nexthop for "
+ "EVPN route: %s"
+ "[FAILED]!! "
+ "Expected: %s,"
+ " Found: %s"
+ % (
+ dut,
+ prefix,
+ next_hop,
+ nh_dict["ip"],
+ )
+ )
+ return errormsg
+
+ else:
+ logger.info(
+ "[DUT %s]: Verifying "
+ "EVPN route : %s, "
+ "routeType: %s is "
+ "installed "
+ "[PASSED]|| ",
+ dut,
+ prefix,
+ routeType,
+ )
+ return True
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return False
+
+
+@retry(retry_timeout=10)
+def verify_bgp_bestpath(tgen, addr_type, input_dict):
+ """
+ Verifies bgp next hop values in best-path output
+
+ * `dut` : device under test
+ * `addr_type` : Address type ipv4/ipv6
+ * `input_dict`: having details like multipath and bestpath
+
+ Usage
+ -----
+ input_dict_1 = {
+ "r1": {
+ "ipv4" : {
+ "bestpath": "50.0.0.1",
+ "multipath": ["50.0.0.1", "50.0.0.2"],
+ "network": "100.0.0.0/24"
+ }
+ "ipv6" : {
+ "bestpath": "1000::1",
+ "multipath": ["1000::1", "1000::2"]
+ "network": "2000::1/128"
+ }
+ }
+ }
+
+ result = verify_bgp_bestpath(tgen, input_dict)
+
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying bgp bestpath and multipath " "routes:", dut)
+ result = False
+ for network_dict in input_dict[dut][addr_type]:
+ nw_addr = network_dict.setdefault("network", None)
+ vrf = network_dict.setdefault("vrf", None)
+ bestpath = network_dict.setdefault("bestpath", None)
+
+ if vrf:
+ cmd = "show bgp vrf {} {} {} bestpath json".format(
+ vrf, addr_type, nw_addr
+ )
+ else:
+ cmd = "show bgp {} {} bestpath json".format(addr_type, nw_addr)
+
+ data = run_frr_cmd(rnode, cmd, isjson=True)
+ route = data["paths"][0]
+
+ if "bestpath" in route:
+ if route["bestpath"]["overall"] is True:
+ _bestpath = route["nexthops"][0]["ip"]
+
+ if _bestpath != bestpath:
+ return (
+ "DUT:[{}] Bestpath do not match for"
+ " network: {}, Expected "
+ " {} as bgp bestpath found {}".format(
+ dut, nw_addr, bestpath, _bestpath
+ )
+ )
+
+ logger.info(
+ "DUT:[{}] Found expected bestpath: "
+ " {} for network: {}".format(dut, _bestpath, nw_addr)
+ )
+ result = True
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def verify_tcp_mss(tgen, dut, neighbour, configured_tcp_mss, vrf=None):
+ """
+ This api is used to verify the tcp-mss value assigned to a neigbour of DUT
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `dut`: device under test
+ * `neighbour`:neigbout IP address
+ * `configured_tcp_mss`:The TCP-MSS value to be verified
+ * `vrf`:vrf
+
+ Usage
+ -----
+ result = verify_tcp_mss(tgen, dut,neighbour,configured_tcp_mss)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ rnode = tgen.routers()[dut]
+ if vrf:
+ cmd = "show bgp vrf {} neighbors {} json".format(vrf, neighbour)
+ else:
+ cmd = "show bgp neighbors {} json".format(neighbour)
+
+ # Execute the command
+ show_vrf_stats = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verify TCP-MSS on router
+ logger.info("Verify that no core is observed")
+ if tgen.routers_have_failure():
+ errormsg = "Core observed while running CLI: %s" % (cmd)
+ return errormsg
+ else:
+ if configured_tcp_mss == show_vrf_stats.get(neighbour).get(
+ "bgpTcpMssConfigured"
+ ):
+ logger.debug(
+ "Configured TCP - MSS Found: {}".format(sys._getframe().f_code.co_name)
+ )
+ return True
+ else:
+ logger.debug(
+ "TCP-MSS Mismatch ,configured {} expecting {}".format(
+ show_vrf_stats.get(neighbour).get("bgpTcpMssConfigured"),
+ configured_tcp_mss,
+ )
+ )
+ return "TCP-MSS Mismatch"
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return False
+
+
+def get_dut_as_number(tgen, dut):
+ """
+ API to get the Autonomous Number of the given DUT
+
+ params:
+ =======
+ dut : Device Under test
+
+ returns :
+ =======
+ Success : DUT Autonomous number
+ Fail : Error message with Boolean False
+ """
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ show_bgp_json = run_frr_cmd(rnode, "sh ip bgp summary json ", isjson=True)
+ as_number = show_bgp_json["ipv4Unicast"]["as"]
+ if as_number:
+ logger.info(
+ "[dut {}] DUT contains Automnomous number :: {} ".format(
+ dut, as_number
+ )
+ )
+ return as_number
+ else:
+ logger.error(
+ "[dut {}] ERROR....! DUT doesnot contain any Automnomous number ".format(
+ dut
+ )
+ )
+ return False
+
+
+def get_prefix_count_route(
+ tgen, topo, dut, peer, vrf=None, link=None, sent=None, received=None
+):
+ """
+ API to return the prefix count of default originate the given DUT
+ dut : Device under test
+ peer : neigbor on which you are expecting the route to be received
+
+ returns :
+ prefix_count as dict with ipv4 and ipv6 value
+ """
+ # the neighbor IP address can be accessable by finding the neigborship (vice-versa)
+
+ if link:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][link]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][link]["ipv6"]
+ else:
+ neighbor_ipv4_address = topo["routers"][peer]["links"][dut]["ipv4"]
+ neighbor_ipv6_address = topo["routers"][peer]["links"][dut]["ipv6"]
+
+ neighbor_ipv4_address = neighbor_ipv4_address.split("/")[0]
+ neighbor_ipv6_address = neighbor_ipv6_address.split("/")[0]
+ prefix_count = {}
+ tgen = get_topogen()
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ if vrf:
+ ipv4_cmd = "sh ip bgp vrf {} summary json".format(vrf)
+ show_bgp_json_ipv4 = run_frr_cmd(rnode, ipv4_cmd, isjson=True)
+ ipv6_cmd = "sh ip bgp vrf {} ipv6 unicast summary json".format(vrf)
+ show_bgp_json_ipv6 = run_frr_cmd(rnode, ipv6_cmd, isjson=True)
+
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"]["peers"][
+ neighbor_ipv4_address
+ ]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the [DUT:{} : vrf [{}] ] towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut,
+ vrf,
+ neighbor_ipv4_address,
+ neighbor_ipv6_address,
+ prefix_count,
+ )
+ )
+ return prefix_count
+
+ else:
+ show_bgp_json_ipv4 = run_frr_cmd(
+ rnode, "sh ip bgp summary json ", isjson=True
+ )
+ show_bgp_json_ipv6 = run_frr_cmd(
+ rnode, "sh ip bgp ipv6 unicast summary json ", isjson=True
+ )
+ if received:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ elif sent:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxSnt"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxSnt"]
+
+ else:
+ prefix_count["ipv4_count"] = show_bgp_json_ipv4["ipv4Unicast"][
+ "peers"
+ ][neighbor_ipv4_address]["pfxRcd"]
+ prefix_count["ipv6_count"] = show_bgp_json_ipv6["peers"][
+ neighbor_ipv6_address
+ ]["pfxRcd"]
+
+ logger.info(
+ "The Prefix Count of the DUT:{} towards neighbor ipv4 : {} and ipv6 : {} is : {}".format(
+ dut, neighbor_ipv4_address, neighbor_ipv6_address, prefix_count
+ )
+ )
+ return prefix_count
+ else:
+ logger.error("ERROR...! Unknown dut {} in topolgy".format(dut))
+
+
+@retry(retry_timeout=5)
+def verify_rib_default_route(
+ tgen,
+ topo,
+ dut,
+ routes,
+ expected_nexthop,
+ metric=None,
+ origin=None,
+ locPrf=None,
+ expected_aspath=None,
+):
+ """
+ API to verify the the 'Default route" in BGP RIB with the attributes the rout carries (metric , local preference, )
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_routes = run_frr_cmd(rnode, "sh ip bgp json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ip bgp ipv6 unicast json", isjson=True)
+ is_ipv4_default_attrib_found = False
+ is_ipv6_default_attrib_found = False
+
+ default_ipv4_route = routes["ipv4"]
+ default_ipv6_route = "::/0"
+ ipv4_route_Origin = False
+ ipv4_route_local_pref = False
+ ipv4_route_metric = False
+
+ if default_ipv4_route in ipv4_routes["routes"].keys():
+ nxt_hop_count = len(ipv4_routes["routes"][default_ipv4_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv4_routes["routes"][default_ipv4_route][index]["nexthops"][0]["ip"]
+ )
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv4":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected is missing {}".format(
+ default_ipv4_route, nxt_hop[1]
+ )
+ )
+ return False
+
+ else:
+ pass
+
+ if "origin" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_Origin = ipv4_routes["routes"][default_ipv4_route][0]["origin"]
+ if "locPrf" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_local_pref = ipv4_routes["routes"][default_ipv4_route][0][
+ "locPrf"
+ ]
+ if "metric" in ipv4_routes["routes"][default_ipv4_route][0].keys():
+ ipv4_route_metric = ipv4_routes["routes"][default_ipv4_route][0]["metric"]
+ else:
+ logger.error("ERROR [ DUT {}] : The Default Route Not found in RIB".format(dut))
+ return False
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == ipv4_route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv4_route, origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Origin is {} obtained {}".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == ipv4_route_local_pref:
+ logger.info(
+ "Dafault Route {} expected local preference {} Found in RIB....PASSED".format(
+ default_ipv4_route, locPrf
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected Local preference is {} obtained {}".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == ipv4_route_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV4::! Expected metric is {} obtained {}".format(
+ metric, ipv4_route_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv4_routes["routes"]["0.0.0.0/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv4_default_attrib_found = True
+ logger.info(
+ "IPV4:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv4_default_attrib_found = False
+ logger.error(
+ "IPV4:: Expected origin ['{}'] Obtained [{}]".format(
+ origin, ipv4_route_Origin
+ )
+ )
+ logger.error(
+ "IPV4:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, ipv4_route_local_pref
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, ipv4_route_metric
+ )
+ )
+ logger.error(
+ "IPV4:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ route_Origin = False
+ route_local_pref = False
+ route_local_metric = False
+ default_ipv6_route = ""
+ try:
+ ipv6_routes["routes"]["0::0/0"]
+ default_ipv6_route = "0::0/0"
+ except:
+ ipv6_routes["routes"]["::/0"]
+ default_ipv6_route = "::/0"
+ if default_ipv6_route in ipv6_routes["routes"].keys():
+ nxt_hop_count = len(ipv6_routes["routes"][default_ipv6_route])
+ rib_next_hops = []
+ for index in range(nxt_hop_count):
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][0]["ip"]
+ )
+ try:
+ rib_next_hops.append(
+ ipv6_routes["routes"][default_ipv6_route][index]["nexthops"][1][
+ "ip"
+ ]
+ )
+ except (KeyError, IndexError) as e:
+ logger.error("NO impact ..! Global IPV6 Address not found ")
+
+ for nxt_hop in expected_nexthop.items():
+ if nxt_hop[0] == "ipv6":
+ if nxt_hop[1] in rib_next_hops:
+ logger.info(
+ "Default routes [{}] obtained from {} .....PASSED".format(
+ default_ipv6_route, nxt_hop[1]
+ )
+ )
+ else:
+ logger.error(
+ "ERROR ...! Default routes [{}] expected from {} obtained {}".format(
+ default_ipv6_route, nxt_hop[1], rib_next_hops
+ )
+ )
+ return False
+
+ else:
+ pass
+ if "origin" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_Origin = ipv6_routes["routes"][default_ipv6_route][0]["origin"]
+ if "locPrf" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_pref = ipv6_routes["routes"][default_ipv6_route][0]["locPrf"]
+ if "metric" in ipv6_routes["routes"][default_ipv6_route][0].keys():
+ route_local_metric = ipv6_routes["routes"][default_ipv6_route][0]["metric"]
+
+ origin_found = False
+ locPrf_found = False
+ metric_found = False
+ as_path_found = False
+
+ if origin:
+ if origin == route_Origin:
+ logger.info(
+ "Dafault Route {} expected origin {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_Origin
+ )
+ )
+ origin_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Origin is {} obtained {}".format(
+ origin, route_Origin
+ )
+ )
+ return False
+ else:
+ origin_found = True
+
+ if locPrf:
+ if locPrf == route_local_pref:
+ logger.info(
+ "Dafault Route {} expected Local Preference {} Found in RIB....PASSED".format(
+ default_ipv6_route, route_local_pref
+ )
+ )
+ locPrf_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected Local Preference is {} obtained {}".format(
+ locPrf, route_local_pref
+ )
+ )
+ return False
+ else:
+ locPrf_found = True
+
+ if metric:
+ if metric == route_local_metric:
+ logger.info(
+ "Dafault Route {} expected metric {} Found in RIB....PASSED".format(
+ default_ipv4_route, metric
+ )
+ )
+
+ metric_found = True
+ else:
+ logger.error(
+ "ERROR... IPV6::! Expected metric is {} obtained {}".format(
+ metric, route_local_metric
+ )
+ )
+ return False
+ else:
+ metric_found = True
+
+ if expected_aspath:
+ obtained_aspath = ipv6_routes["routes"]["::/0"][0]["path"]
+ if expected_aspath in obtained_aspath:
+ as_path_found = True
+ logger.info(
+ "Dafault Route {} expected AS path {} Found in RIB....PASSED".format(
+ default_ipv4_route, expected_aspath
+ )
+ )
+ else:
+ logger.error(
+ "ERROR.....! Expected AS path {} obtained {}..... FAILED ".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+ return False
+ else:
+ as_path_found = True
+
+ if origin_found and locPrf_found and metric_found and as_path_found:
+ is_ipv6_default_attrib_found = True
+ logger.info(
+ "IPV6:: Expected origin ['{}'] , Local Preference ['{}'] , Metric ['{}'] and AS path [{}] is found in RIB".format(
+ origin, locPrf, metric, expected_aspath
+ )
+ )
+ else:
+ is_ipv6_default_attrib_found = False
+ logger.error(
+ "IPV6:: Expected origin ['{}'] Obtained [{}]".format(origin, route_Origin)
+ )
+ logger.error(
+ "IPV6:: Expected locPrf ['{}'] Obtained [{}]".format(
+ locPrf, route_local_pref
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ metric, route_local_metric
+ )
+ )
+ logger.error(
+ "IPV6:: Expected metric ['{}'] Obtained [{}]".format(
+ expected_aspath, obtained_aspath
+ )
+ )
+
+ if is_ipv4_default_attrib_found and is_ipv6_default_attrib_found:
+ logger.info("The attributes are found for default route in RIB ")
+ return True
+ else:
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_fib_default_route(tgen, topo, dut, routes, expected_nexthop):
+ """
+ API to verify the the 'Default route" in FIB
+
+ param
+ =====
+ dut : device under test
+ routes : default route with expected nexthop
+ expected_nexthop : the nexthop that is expected the deafult route
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+ connected_routes = {}
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_routes = run_frr_cmd(rnode, "sh ip route json", isjson=True)
+ ipv6_routes = run_frr_cmd(rnode, "sh ipv6 route json", isjson=True)
+
+ is_ipv4_default_route_found = False
+ is_ipv6_default_route_found = False
+ if routes["ipv4"] in ipv4_routes.keys():
+ rib_ipv4_nxt_hops = []
+ ipv4_default_route = routes["ipv4"]
+ nxt_hop_count = len(ipv4_routes[ipv4_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv4_nxt_hops.append(
+ ipv4_routes[ipv4_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv4"] in rib_ipv4_nxt_hops:
+ is_ipv4_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv4_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if routes["ipv6"] in ipv6_routes.keys() or "::/0" in ipv6_routes.keys():
+ rib_ipv6_nxt_hops = []
+ if "::/0" in ipv6_routes.keys():
+ ipv6_default_route = "::/0"
+ elif routes["ipv6"] in ipv6_routes.keys():
+ ipv6_default_route = routes["ipv6"]
+
+ nxt_hop_count = len(ipv6_routes[ipv6_default_route][0]["nexthops"])
+ for index in range(nxt_hop_count):
+ rib_ipv6_nxt_hops.append(
+ ipv6_routes[ipv6_default_route][0]["nexthops"][index]["ip"]
+ )
+
+ if expected_nexthop["ipv6"] in rib_ipv6_nxt_hops:
+ is_ipv6_default_route_found = True
+ logger.info(
+ "{} default route with next hop {} is found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ else:
+ logger.error(
+ "ERROR .. ! {} default route with next hop {} is not found in FIB ".format(
+ ipv6_default_route, expected_nexthop
+ )
+ )
+ return False
+
+ if is_ipv4_default_route_found and is_ipv6_default_route_found:
+ return True
+ else:
+ logger.error(
+ "Default Route for ipv4 and ipv6 address family is not found in FIB "
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_advertised_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ APi is verifies the the routes that are advertised from dut to peer
+
+ command used :
+ "sh ip bgp neighbor <x.x.x.x> advertised-routes" and
+ "sh ip bgp ipv6 unicast neighbor<x::x> advertised-routes"
+
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns: True / False
+
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} advertised-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} advertised-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ elif (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["advertisedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ elif (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["bgpOriginatingDefaultNetwork"]
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not advertised to {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are advertised to the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+@retry(retry_timeout=5)
+def verify_bgp_received_routes_from_neighbor(tgen, topo, dut, peer, expected_routes):
+ """
+ API to verify the bgp received routes
+
+ commad used :
+ =============
+ show ip bgp neighbor <x.x.x.x> received-routes
+ show ip bgp ipv6 unicast neighbor <x::x> received-routes
+
+ params
+ =======
+ dut : Device Under Tests
+ Peer : Peer on which the routs is expected
+ expected_routes : dual stack IPV4-and IPv6 routes to be verified
+ expected_routes
+
+ returns:
+ ========
+ True / False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ tgen = get_topogen()
+
+ peer_ipv4_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv4"].split("/")[0]
+ peer_ipv6_neighbor_ip = topo["routers"][peer]["links"][dut]["ipv6"].split("/")[0]
+
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ neigbor_dict = {"ipv4": peer_ipv4_neighbor_ip, "ipv6": peer_ipv6_neighbor_ip}
+ result = configure_bgp_soft_configuration(
+ tgen, dut, neigbor_dict, direction="inbound"
+ )
+ assert (
+ result is True
+ ), " Failed to configure the soft configuration \n Error: {}".format(result)
+
+ """sleep of 10 sec is required to get the routes on peer after soft configuration"""
+ sleep(10)
+ for router, rnode in tgen.routers().items():
+ if router == dut:
+ ipv4_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp neighbor {} received-routes json".format(
+ peer_ipv4_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv6_receieved_routes = run_frr_cmd(
+ rnode,
+ "sh ip bgp ipv6 unicast neighbor {} received-routes json".format(
+ peer_ipv6_neighbor_ip
+ ),
+ isjson=True,
+ )
+ ipv4_route_count = 0
+ ipv6_route_count = 0
+ if ipv4_receieved_routes:
+ for index in range(len(expected_routes["ipv4"])):
+ if (
+ expected_routes["ipv4"][index]["network"]
+ in ipv4_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv4_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv4"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv4_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV4 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv6_receieved_routes:
+ for index in range(len(expected_routes["ipv6"])):
+ if (
+ expected_routes["ipv6"][index]["network"]
+ in ipv6_receieved_routes["receivedRoutes"].keys()
+ ):
+ ipv6_route_count += 1
+ logger.info(
+ "Success [DUT : {}] The Expected Route {} is received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(
+ "ERROR....![DUT : {}] The Expected Route {} is not received from {} ".format(
+ dut, expected_routes["ipv6"][index]["network"], peer
+ )
+ )
+ else:
+ logger.error(ipv6_receieved_routes)
+ logger.error(
+ "ERROR...! [DUT : {}] No IPV6 Routes are received from the peer {}".format(
+ dut, peer
+ )
+ )
+ return False
+
+ if ipv4_route_count == len(expected_routes["ipv4"]) and ipv6_route_count == len(
+ expected_routes["ipv6"]
+ ):
+ return True
+ else:
+ logger.error(
+ "ERROR ....! IPV4 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv4"], ipv4_receieved_routes["advertisedRoutes"]
+ )
+ )
+ logger.error(
+ "ERROR ....! IPV6 : Expected Routes -> {} obtained ->{} ".format(
+ expected_routes["ipv6"], ipv6_receieved_routes["advertisedRoutes"]
+ )
+ )
+ return False
+
+
+def configure_bgp_soft_configuration(tgen, dut, neighbor_dict, direction):
+ """
+ Api to configure the bgp soft configuration to show the received routes from peer
+ params
+ ======
+ dut : device under test route on which the sonfiguration to be applied
+ neighbor_dict : dict element contains ipv4 and ipv6 neigbor ip
+ direction : Directionon which it should be applied in/out
+
+ returns:
+ ========
+ boolean
+ """
+ logger.info("Enabling Soft configuration to neighbor INBOUND ")
+ local_as = get_dut_as_number(tgen, dut)
+ ipv4_neighbor = neighbor_dict["ipv4"]
+ ipv6_neighbor = neighbor_dict["ipv6"]
+ direction = direction.lower()
+ if ipv4_neighbor and ipv4_neighbor:
+ raw_config = {
+ dut: {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv4_neighbor, direction
+ ),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "neighbor {} soft-reconfiguration {} ".format(
+ ipv6_neighbor, direction
+ ),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ logger.info(
+ "Success... [DUT : {}] The soft configuration onis applied on neighbors {} ".format(
+ dut, neighbor_dict
+ )
+ )
+ return True
diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py
new file mode 100644
index 0000000..699c7a4
--- /dev/null
+++ b/tests/topotests/lib/bgprib.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Copyright 2018, LabN Consulting, L.L.C.
+
+#
+# want_rd_routes = [
+# {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1', 'bp': True},
+# {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1', 'bp': False},
+#
+# {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
+# ]
+#
+# ribRequireVpnRoutes('r2','Customer routes',want_rd_routes)
+#
+# want_unicast_routes = [
+# {'p':'5.1.0.0/24', 'n':'1.1.1.1'},
+# ]
+#
+# ribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer routes in vrf',want_unicast_routes)
+# ribRequireUnicastRoutes('r1','ipv4','','Customer routes in default',want_unicast_routes)
+#
+
+from lib.lutil import luCommand, luResult, LUtil
+import json
+import re
+
+
+# gpz: get rib in json form and compare against desired routes
+class BgpRib:
+ def log(self, str):
+ LUtil.log("BgpRib: " + str)
+
+ def routes_include_wanted(self, pfxtbl, want, debug):
+ # helper function to RequireVpnRoutes
+ for pfx in pfxtbl.keys():
+ if debug:
+ self.log("trying pfx %s" % pfx)
+ if pfx != want["p"]:
+ if debug:
+ self.log("want pfx=" + want["p"] + ", not " + pfx)
+ continue
+ if debug:
+ self.log("have pfx=%s" % pfx)
+ for r in pfxtbl[pfx]:
+ bp = r.get("bestpath", False)
+ if debug:
+ self.log("trying route %s bp=%s" % (r, bp))
+ nexthops = r["nexthops"]
+ for nh in nexthops:
+ if debug:
+ self.log("trying nh %s" % nh["ip"])
+ if nh["ip"] == want["n"]:
+ if debug:
+ self.log("found %s" % want["n"])
+ if bp == want.get("bp", bp):
+ return 1
+ elif debug:
+ self.log("bestpath mismatch %s != %s" % (bp, want["bp"]))
+ else:
+ if debug:
+ self.log("want nh=" + want["n"] + ", not " + nh["ip"])
+ if debug:
+ self.log("missing route: pfx=" + want["p"] + ", nh=" + want["n"])
+ return 0
+
+ def RequireVpnRoutes(self, target, title, wantroutes, debug=0):
+ import json
+
+ logstr = "RequireVpnRoutes " + str(wantroutes)
+ # non json form for humans
+ luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn"',
+ ".",
+ "None",
+ "Get VPN RIB (non-json)",
+ )
+ ret = luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn json"',
+ ".*",
+ "None",
+ "Get VPN RIB (json)",
+ )
+ if re.search(r"^\s*$", ret):
+ # degenerate case: empty json means no routes
+ if len(wantroutes) > 0:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+ rib = json.loads(ret)
+ rds = rib["routes"]["routeDistinguishers"]
+ for want in wantroutes:
+ found = 0
+ if debug:
+ self.log("want rd %s" % want["rd"])
+ for rd in rds.keys():
+ if rd != want["rd"]:
+ continue
+ if debug:
+ self.log("found rd %s" % rd)
+ table = rds[rd]
+ if self.routes_include_wanted(table, want, debug):
+ found = 1
+ break
+ if not found:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+
+ def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0):
+ logstr = "RequireUnicastRoutes %s" % str(wantroutes)
+ vrfstr = ""
+ if vrf != "":
+ vrfstr = "vrf %s" % (vrf)
+
+ if (afi != "ipv4") and (afi != "ipv6"):
+ self.log("ERROR invalid afi")
+
+ cmdstr = "show bgp %s %s unicast" % (vrfstr, afi)
+ # non json form for humans
+ cmd = 'vtysh -c "%s"' % cmdstr
+ luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi))
+ cmd = 'vtysh -c "%s json"' % cmdstr
+ ret = luCommand(
+ target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi)
+ )
+ if re.search(r"^\s*$", ret):
+ # degenerate case: empty json means no routes
+ if len(wantroutes) > 0:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+ rib = json.loads(ret)
+ try:
+ table = rib["routes"]
+ # KeyError: 'routes' probably means missing/bad VRF
+ except KeyError as err:
+ if vrf != "":
+ errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf)
+ else:
+ errstr = "-script ERROR: check if vrf missing"
+ luResult(target, False, title + errstr, logstr)
+ return
+ # if debug:
+ # self.log("table=%s" % table)
+ for want in wantroutes:
+ if debug:
+ self.log("want=%s" % want)
+ if not self.routes_include_wanted(table, want, debug):
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
+
+
+BgpRib = BgpRib()
+
+
+def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0):
+ BgpRib.RequireVpnRoutes(target, title, wantroutes, debug)
+
+
+def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0):
+ BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug)
diff --git a/tests/topotests/lib/bmp_collector/bgp/__init__.py b/tests/topotests/lib/bmp_collector/bgp/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/__init__.py
diff --git a/tests/topotests/lib/bmp_collector/bgp/open/__init__.py b/tests/topotests/lib/bmp_collector/bgp/open/__init__.py
new file mode 100644
index 0000000..6c814ee
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/open/__init__.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import ipaddress
+import struct
+
+
+class BGPOpen:
+ UNPACK_STR = '!16sHBBHH4sB'
+
+ @classmethod
+ def dissect(cls, data):
+ (marker,
+ length,
+ open_type,
+ version,
+ my_as,
+ hold_time,
+ bgp_id,
+ optional_params_len) = struct.unpack_from(cls.UNPACK_STR, data)
+
+ data = data[struct.calcsize(cls.UNPACK_STR) + optional_params_len:]
+
+ # XXX: parse optional parameters
+
+ return data, {
+ 'version': version,
+ 'my_as': my_as,
+ 'hold_time': hold_time,
+ 'bgp_id': ipaddress.ip_address(bgp_id),
+ 'optional_params_len': optional_params_len,
+ }
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/__init__.py b/tests/topotests/lib/bmp_collector/bgp/update/__init__.py
new file mode 100644
index 0000000..d079b35
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/update/__init__.py
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import ipaddress
+import struct
+
+from .nlri import NlriIPv4Unicast
+from .path_attributes import PathAttribute
+
+
+#------------------------------------------------------------------------------
+class BGPUpdate:
+ UNPACK_STR = '!16sHBH'
+ STATIC_SIZE = 23
+
+ @classmethod
+ def dissect(cls, data):
+ msg = {'bmp_log_type': 'update'}
+ common_size = struct.calcsize(cls.UNPACK_STR)
+ (marker,
+ length,
+ update_type,
+ withdrawn_routes_len) = struct.unpack_from(cls.UNPACK_STR, data)
+
+ # get withdrawn routes
+ withdrawn_routes = ''
+ if withdrawn_routes_len:
+ withdrawn_routes = NlriIPv4Unicast.parse(
+ data[common_size:common_size + withdrawn_routes_len]
+ )
+ msg['bmp_log_type'] = 'withdraw'
+ msg.update(withdrawn_routes)
+
+ # get path attributes
+ (total_path_attrs_len,) = struct.unpack_from(
+ '!H', data[common_size+withdrawn_routes_len:])
+
+ if total_path_attrs_len:
+ offset = cls.STATIC_SIZE + withdrawn_routes_len
+ path_attrs_data = data[offset:offset + total_path_attrs_len]
+ while path_attrs_data:
+ path_attrs_data, pattr = PathAttribute.dissect(path_attrs_data)
+ if pattr:
+ msg = {**msg, **pattr}
+
+ # get nlri
+ nlri_len = length - cls.STATIC_SIZE - withdrawn_routes_len - total_path_attrs_len
+ if nlri_len > 0:
+ nlri = NlriIPv4Unicast.parse(data[length - nlri_len:length])
+ msg.update(nlri)
+
+ return data[length:], msg
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/af.py b/tests/topotests/lib/bmp_collector/bgp/update/af.py
new file mode 100644
index 0000000..01af1ae
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/update/af.py
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+
+# IANA Address Family Identifier
+AFI_IP = 1
+AFI_IP6 = 2
+AFI_L2VPN = 25
+
+# IANA Subsequent Address Family Idenitifier
+SAFI_UNICAST = 1
+SAFI_MULTICAST = 2
+SAFI_MPLS_LABEL = 4
+SAFI_EVPN = 70
+SAFI_MPLS_VPN = 128
+SAFI_IP_FLOWSPEC = 133
+SAFI_VPN_FLOWSPEC = 134
+
+
+#------------------------------------------------------------------------------
+class AddressFamily:
+ def __init__(self, afi, safi):
+ self.afi = afi
+ self.safi = safi
+
+ def __eq__(self, other):
+ if not isinstance(other, type(self)):
+ return False
+ return (self.afi, self.safi) == (other.afi, other.safi)
+
+ def __str__(self):
+ return f'afi: {self.afi}, safi: {self.safi}'
+
+ def __hash__(self):
+ return hash((self.afi, self.safi))
+
+
+#------------------------------------------------------------------------------
+class AF:
+ IPv4_UNICAST = AddressFamily(AFI_IP, SAFI_UNICAST)
+ IPv6_UNICAST = AddressFamily(AFI_IP6, SAFI_UNICAST)
+ IPv4_VPN = AddressFamily(AFI_IP, SAFI_MPLS_VPN)
+ IPv6_VPN = AddressFamily(AFI_IP6, SAFI_MPLS_VPN)
+ IPv4_MPLS = AddressFamily(AFI_IP, SAFI_MPLS_LABEL)
+ IPv6_MPLS = AddressFamily(AFI_IP6, SAFI_MPLS_LABEL)
+ IPv4_FLOWSPEC = AddressFamily(AFI_IP, SAFI_IP_FLOWSPEC)
+ IPv6_FLOWSPEC = AddressFamily(AFI_IP6, SAFI_IP_FLOWSPEC)
+ VPNv4_FLOWSPEC = AddressFamily(AFI_IP, SAFI_VPN_FLOWSPEC)
+ VPNv6_FLOWSPEC = AddressFamily(AFI_IP6, SAFI_VPN_FLOWSPEC)
+ L2EVPN = AddressFamily(AFI_L2VPN, SAFI_EVPN)
+ L2VPN_FLOWSPEC = AddressFamily(AFI_L2VPN, SAFI_VPN_FLOWSPEC)
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/nlri.py b/tests/topotests/lib/bmp_collector/bgp/update/nlri.py
new file mode 100644
index 0000000..c1720f1
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/update/nlri.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import ipaddress
+import struct
+
+from .af import AddressFamily, AF
+from .rd import RouteDistinguisher
+
+
+def decode_label(label):
+ # from frr
+ # frr encode just one label
+ return (label[0] << 12) | (label[1] << 4) | (label[2] & 0xf0) >> 4
+
+def padding(databin, len_):
+ """
+ Assumption:
+ One nlri per update/withdraw message, so we can add
+ a padding to the prefix without worrying about its length
+ """
+ if len(databin) >= len_:
+ return databin
+ return databin + b'\0' * (len_ - len(databin))
+
+def dissect_nlri(nlri_data, afi, safi):
+ """
+ Exract nlri information based on the address family
+ """
+ addr_family = AddressFamily(afi, safi)
+ if addr_family == AF.IPv6_VPN:
+ return NlriIPv6Vpn.parse(nlri_data)
+ elif addr_family == AF.IPv4_VPN:
+ return NlriIPv4Vpn.parse(nlri_data)
+ elif addr_family == AF.IPv6_UNICAST:
+ return NlriIPv6Unicast.parse(nlri_data)
+
+ return {'ip_prefix': 'Unknown'}
+
+
+#------------------------------------------------------------------------------
+class NlriIPv4Unicast:
+
+ @staticmethod
+ def parse(data):
+ """parses prefixes from withdrawn_routes or nrli data"""
+ (prefix_len,) = struct.unpack_from('!B', data)
+ prefix = padding(data[1:], 4)
+
+ return {'ip_prefix': f'{ipaddress.IPv4Address(prefix)}/{prefix_len}'}
+
+
+#------------------------------------------------------------------------------
+class NlriIPv6Unicast:
+ @staticmethod
+ def parse(data):
+ """parses prefixes from withdrawn_routes or nrli data"""
+ (prefix_len,) = struct.unpack_from('!B', data)
+ prefix = padding(data[1:], 16)
+
+ return {'ip_prefix': f'{ipaddress.IPv6Address(prefix)}/{prefix_len}'}
+
+
+#------------------------------------------------------------------------------
+class NlriIPv4Vpn:
+ UNPACK_STR = '!B3s8s'
+
+ @classmethod
+ def parse(cls, data):
+ (bit_len, label, rd) = struct.unpack_from(cls.UNPACK_STR, data)
+ offset = struct.calcsize(cls.UNPACK_STR)
+
+ ipv4 = padding(data[offset:], 4)
+ # prefix_len = total_bits_len - label_bits_len - rd_bits_len
+ prefix_len = bit_len - 3*8 - 8*8
+ return {
+ 'label': decode_label(label),
+ 'rd': str(RouteDistinguisher(rd)),
+ 'ip_prefix': f'{ipaddress.IPv4Address(ipv4)}/{prefix_len}',
+ }
+
+
+#------------------------------------------------------------------------------
+class NlriIPv6Vpn:
+ UNPACK_STR = '!B3s8s'
+
+ @classmethod
+ def parse(cls, data):
+ # rfc 3107, 8227
+ (bit_len, label, rd) = struct.unpack_from(cls.UNPACK_STR, data)
+ offset = struct.calcsize(cls.UNPACK_STR)
+
+ ipv6 = padding(data[offset:], 16)
+ prefix_len = bit_len - 3*8 - 8*8
+ return {
+ 'label': decode_label(label),
+ 'rd': str(RouteDistinguisher(rd)),
+ 'ip_prefix': f'{ipaddress.IPv6Address(ipv6)}/{prefix_len}',
+ }
+
+
+#------------------------------------------------------------------------------
+class NlriIPv4Mpls:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriIPv6Mpls:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriIPv4FlowSpec:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriIPv6FlowSpec:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriVpn4FlowSpec:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriVpn6FlowSpec:
+ pass
+
+
+#------------------------------------------------------------------------------
+class NlriL2EVPN:
+ pass
+
+#------------------------------------------------------------------------------
+class NlriL2VPNFlowSpec:
+ pass
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py b/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py
new file mode 100644
index 0000000..6e82e9c
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py
@@ -0,0 +1,304 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import struct
+import ipaddress
+
+from . import nlri as NLRI
+from .af import AddressFamily, AF
+from .rd import RouteDistinguisher
+
+
+PATH_ATTR_FLAG_OPTIONAL = 1 << 7
+PATH_ATTR_FLAG_TRANSITIVE = 1 << 6
+PATH_ATTR_FLAG_PARTIAL = 1 << 5
+PATH_ATTR_FLAG_EXTENDED_LENGTH = 1 << 4
+
+PATH_ATTR_TYPE_ORIGIN = 1
+PATH_ATTR_TYPE_AS_PATH = 2
+PATH_ATTR_TYPE_NEXT_HOP = 3
+PATH_ATTR_TYPE_MULTI_EXIT_DISC = 4
+PATH_ATTR_TYPE_LOCAL_PREF = 5
+PATH_ATTR_TYPE_ATOMIC_AGGREGATE = 6
+PATH_ATTR_TYPE_AGGREGATOR = 7
+PATH_ATTR_TYPE_COMMUNITIES = 8
+PATH_ATTR_TYPE_ORIGINATOR_ID = 9
+PATH_ATTR_TYPE_CLUSTER_LIST = 10
+PATH_ATTR_TYPE_MP_REACH_NLRI = 14
+PATH_ATTR_TYPE_MP_UNREACH_NLRI = 15
+PATH_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+PATH_ATTR_TYPE_AS4_PATH = 17
+PATH_ATTR_TYPE_AS4_AGGREGATOR = 18
+PATH_ATTR_TYEP_PMSI_TUNNEL_ATTRIBUTE = 22
+
+ORIGIN_IGP = 0x00
+ORIGIN_EGP = 0x01
+ORIGIN_INCOMPLETE = 0x02
+
+
+#------------------------------------------------------------------------------
+class PathAttribute:
+ PATH_ATTRS = {}
+ UNKNOWN_ATTR = None
+ UNPACK_STR = '!BB'
+
+ @classmethod
+ def register_path_attr(cls, path_attr):
+ def _register_path_attr(subcls):
+ cls.PATH_ATTRS[path_attr] = subcls
+ return subcls
+ return _register_path_attr
+
+ @classmethod
+ def lookup_path_attr(cls, type_code):
+ return cls.PATH_ATTRS.get(type_code, cls.UNKNOWN_ATTR)
+
+ @classmethod
+ def dissect(cls, data):
+ flags, type_code = struct.unpack_from(cls.UNPACK_STR, data)
+ offset = struct.calcsize(cls.UNPACK_STR)
+
+ # get attribute length
+ attr_len_str = '!H' if (flags & PATH_ATTR_FLAG_EXTENDED_LENGTH) else '!B'
+
+ (attr_len,) = struct.unpack_from(attr_len_str, data[offset:])
+
+ offset += struct.calcsize(attr_len_str)
+
+ path_attr_cls = cls.lookup_path_attr(type_code)
+ if path_attr_cls == cls.UNKNOWN_ATTR:
+ return data[offset + attr_len:], None
+
+ return data[offset+attr_len:], path_attr_cls.dissect(data[offset:offset+attr_len])
+
+
+#------------------------------------------------------------------------------
+@PathAttribute.register_path_attr(PATH_ATTR_TYPE_ORIGIN)
+class PathAttrOrigin:
+ ORIGIN_STR = {
+ ORIGIN_IGP: 'IGP',
+ ORIGIN_EGP: 'EGP',
+ ORIGIN_INCOMPLETE: 'INCOMPLETE',
+ }
+
+ @classmethod
+ def dissect(cls, data):
+ (origin,) = struct.unpack_from('!B', data)
+
+ return {'origin': cls.ORIGIN_STR.get(origin, 'UNKNOWN')}
+
+
+#------------------------------------------------------------------------------
+@PathAttribute.register_path_attr(PATH_ATTR_TYPE_AS_PATH)
+class PathAttrAsPath:
+ AS_PATH_TYPE_SET = 0x01
+ AS_PATH_TYPE_SEQUENCE= 0x02
+
+ @staticmethod
+ def get_asn_len(asns):
+ """XXX: Add this nightmare to determine the ASN length"""
+ pass
+
+ @classmethod
+ def dissect(cls, data):
+ (_type, _len) = struct.unpack_from('!BB', data)
+ data = data[2:]
+
+ _type_str = 'Ordred' if _type == cls.AS_PATH_TYPE_SEQUENCE else 'Raw'
+ segment = []
+ while data:
+ (asn,) = struct.unpack_from('!I', data)
+ segment.append(asn)
+ data = data[4:]
+
+ return {'as_path': ' '.join(str(a) for a in segment)}
+
+
+#------------------------------------------------------------------------------
+@PathAttribute.register_path_attr(PATH_ATTR_TYPE_NEXT_HOP)
+class PathAttrNextHop:
+ @classmethod
+ def dissect(cls, data):
+ (nexthop,) = struct.unpack_from('!4s', data)
+ return {'bgp_nexthop': str(ipaddress.IPv4Address(nexthop))}
+
+
+#------------------------------------------------------------------------------
+class PathAttrMultiExitDisc:
+ pass
+
+
+#------------------------------------------------------------------------------
+@PathAttribute.register_path_attr(PATH_ATTR_TYPE_MP_REACH_NLRI)
+class PathAttrMpReachNLRI:
+ """
+ +---------------------------------------------------------+
+ | Address Family Identifier (2 octets) |
+ +---------------------------------------------------------+
+ | Subsequent Address Family Identifier (1 octet) |
+ +---------------------------------------------------------+
+ | Length of Next Hop Network Address (1 octet) |
+ +---------------------------------------------------------+
+ | Network Address of Next Hop (variable) |
+ +---------------------------------------------------------+
+ | Number of SNPAs (1 octet) |
+ +---------------------------------------------------------+
+ | Length of first SNPA(1 octet) |
+ +---------------------------------------------------------+
+ | First SNPA (variable) |
+ +---------------------------------------------------------+
+ | Length of second SNPA (1 octet) |
+ +---------------------------------------------------------+
+ | Second SNPA (variable) |
+ +---------------------------------------------------------+
+ | ... |
+ +---------------------------------------------------------+
+ | Length of Last SNPA (1 octet) |
+ +---------------------------------------------------------+
+ | Last SNPA (variable) |
+ +---------------------------------------------------------+
+ | Network Layer Reachability Information (variable) |
+ +---------------------------------------------------------+
+ """
+ UNPACK_STR = '!HBB'
+ NLRI_RESERVED_LEN = 1
+
+ @staticmethod
+ def dissect_nexthop(nexthop_data, nexthop_len):
+ msg = {}
+ if nexthop_len == 4:
+ # IPv4
+ (ipv4,) = struct.unpack_from('!4s', nexthop_data)
+ msg['nxhp_ip'] = str(ipaddress.IPv4Address(ipv4))
+ elif nexthop_len == 12:
+ # RD + IPv4
+ (rd, ipv4) = struct.unpack_from('!8s4s', nexthop_data)
+ msg['nxhp_ip'] = str(ipaddress.IPv4Address(ipv4))
+ msg['nxhp_rd'] = str(RouteDistinguisher(rd))
+ elif nexthop_len == 16:
+ # IPv6
+ (ipv6,) = struct.unpack_from('!16s', nexthop_data)
+ msg['nxhp_ip'] = str(ipaddress.IPv6Address(ipv6))
+ elif nexthop_len == 24:
+ # RD + IPv6
+ (rd, ipv6) = struct.unpack_from('!8s16s', nexthop_data)
+ msg['nxhp_ip'] = str(ipaddress.IPv6Address(ipv6))
+ msg['nxhp_rd'] = str(RouteDistinguisher(rd))
+ elif nexthop_len == 32:
+ # IPv6 + IPv6 link-local
+ (ipv6, link_local)= struct.unpack_from('!16s16s', nexthop_data)
+ msg['nxhp_ip'] = str(ipaddress.IPv6Address(ipv6))
+ msg['nxhp_link-local'] = str(ipaddress.IPv6Address(link_local))
+ elif nexthop_len == 48:
+ # RD + IPv6 + RD + IPv6 link-local
+ u_str = '!8s16s8s16s'
+ (rd1, ipv6, rd2, link_local)= struct.unpack_from(u_str, nexthop_data)
+ msg['nxhp_rd1'] = str(RouteDistinguisher(rd1))
+ msg['nxhp_ip'] = str(ipaddress.IPv6Address(ipv6))
+ msg['nxhp_rd2'] = str(RouteDistinguisher(rd2))
+ msg['nxhp_link-local'] = str(ipaddress.IPv6Address(link_local))
+
+ return msg
+
+ @staticmethod
+ def dissect_snpa(snpa_data):
+ pass
+
+ @classmethod
+ def dissect(cls, data):
+ (afi, safi, nexthop_len) = struct.unpack_from(cls.UNPACK_STR, data)
+ offset = struct.calcsize(cls.UNPACK_STR)
+ msg = {'afi': afi, 'safi': safi}
+
+ # dissect nexthop
+ nexthop_data = data[offset: offset + nexthop_len]
+ nexthop = cls.dissect_nexthop(nexthop_data, nexthop_len)
+ msg.update(nexthop)
+
+ offset += nexthop_len
+ # dissect snpa or just reserved
+ offset += 1
+ # dissect nlri
+ nlri = NLRI.dissect_nlri(data[offset:], afi, safi)
+ msg.update(nlri)
+
+ return msg
+
+
+#------------------------------------------------------------------------------
+@PathAttribute.register_path_attr(PATH_ATTR_TYPE_MP_UNREACH_NLRI)
+class PathAttrMpUnReachNLRI:
+ """
+ +---------------------------------------------------------+
+ | Address Family Identifier (2 bytes) |
+ +---------------------------------------------------------+
+ | Subsequent Address Family Identifier (1 byte) |
+ +---------------------------------------------------------+
+ | Withdrawn Routes (variable) |
+ +---------------------------------------------------------+
+ """
+ UNPACK_STR = '!HB'
+
+ @classmethod
+ def dissect(cls, data):
+ (afi, safi) = struct.unpack_from(cls.UNPACK_STR, data)
+ offset = struct.calcsize(cls.UNPACK_STR)
+ msg = {'bmp_log_type': 'withdraw','afi': afi, 'safi': safi}
+
+ if data[offset:]:
+ # dissect withdrawn_routes
+ msg.update(NLRI.dissect_nlri(data[offset:], afi, safi))
+
+ return msg
+
+
+#------------------------------------------------------------------------------
+class PathAttrLocalPref:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrAtomicAgregate:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrAggregator:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrCommunities:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrOriginatorID:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrClusterList:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrExtendedCommunities:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrPMSITunnel:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrLinkState:
+ pass
+
+
+#------------------------------------------------------------------------------
+class PathAttrLargeCommunities:
+ pass
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/rd.py b/tests/topotests/lib/bmp_collector/bgp/update/rd.py
new file mode 100644
index 0000000..c382fa8
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bgp/update/rd.py
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import ipaddress
+import struct
+
+
+#------------------------------------------------------------------------------
+class RouteDistinguisher:
+ """
+ type 0:
+ +---------------------------------------------------------------------+
+ + type=0 (2 bytes)| Administrator subfield | Assigned number subfiled |
+ + | AS number (2 bytes) | Service Provider 4 bytes)|
+ +---------------------------------------------------------------------+
+
+ type 1:
+ +---------------------------------------------------------------------+
+ + type=1 (2 bytes)| Administrator subfield | Assigned number subfiled |
+ + | IPv4 (4 bytes) | Service Provider 2 bytes)|
+ +---------------------------------------------------------------------+
+
+ type 2:
+ +-------------------------------------------------------------------------+
+ + type=2 (2 bytes)| Administrator subfield | Assigned number subfiled |
+ + | 4-bytes AS number (4 bytes)| Service Provider 2 bytes)|
+ +-------------------------------------------------------------------------+
+ """
+ def __init__(self, rd):
+ self.rd = rd
+ self.as_number = None
+ self.admin_ipv4 = None
+ self.four_bytes_as = None
+ self.assigned_sp = None
+ self.repr_str = ''
+ self.dissect()
+
+ def dissect(self):
+ (rd_type,) = struct.unpack_from('!H', self.rd)
+ if rd_type == 0:
+ (self.as_number,
+ self.assigned_sp) = struct.unpack_from('!HI', self.rd[2:])
+ self.repr_str = f'{self.as_number}:{self.assigned_sp}'
+
+ elif rd_type == 1:
+ (self.admin_ipv4,
+ self.assigned_sp) = struct.unpack_from('!IH', self.rd[2:])
+ ipv4 = str(ipaddress.IPv4Address(self.admin_ipv4))
+ self.repr_str = f'{self.as_number}:{self.assigned_sp}'
+
+ elif rd_type == 2:
+ (self.four_bytes_as,
+ self.assigned_sp) = struct.unpack_from('!IH', self.rd[2:])
+ self.repr_str = f'{self.four_bytes_as}:{self.assigned_sp}'
+
+ def __str__(self):
+ return self.repr_str
diff --git a/tests/topotests/lib/bmp_collector/bmp.py b/tests/topotests/lib/bmp_collector/bmp.py
new file mode 100644
index 0000000..b07329c
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bmp.py
@@ -0,0 +1,420 @@
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+"""
+BMP main module:
+ - dissect monitoring messages in the way to get updated/withdrawed prefixes
+ - XXX: missing RFCs references
+ - XXX: more bmp messages types to dissect
+ - XXX: complete bgp message dissection
+"""
+import datetime
+import ipaddress
+import json
+import os
+import struct
+
+from bgp.update import BGPUpdate
+from bgp.update.rd import RouteDistinguisher
+
+
+SEQ = 0
+LOG_DIR = "/var/log/"
+LOG_FILE = "/var/log/bmp.log"
+
+IS_ADJ_RIB_OUT = 1 << 4
+IS_AS_PATH = 1 << 5
+IS_POST_POLICY = 1 << 6
+IS_IPV6 = 1 << 7
+IS_FILTERED = 1 << 7
+
+if not os.path.exists(LOG_DIR):
+ os.makedirs(LOG_DIR)
+
+def bin2str_ipaddress(ip_bytes, is_ipv6=False):
+ if is_ipv6:
+ return str(ipaddress.IPv6Address(ip_bytes))
+ return str(ipaddress.IPv4Address(ip_bytes[-4:]))
+
+def log2file(logs):
+ """
+ XXX: extract the useful information and save it in a flat dictionnary
+ """
+ with open(LOG_FILE, 'a') as f:
+ f.write(json.dumps(logs) + "\n")
+
+
+#------------------------------------------------------------------------------
+class BMPCodes:
+ """
+ XXX: complete the list, provide RFCs.
+ """
+ VERSION = 0x3
+
+ BMP_MSG_TYPE_ROUTE_MONITORING = 0x00
+ BMP_MSG_TYPE_STATISTICS_REPORT = 0x01
+ BMP_MSG_TYPE_PEER_DOWN_NOTIFICATION = 0x02
+ BMP_MSG_TYPE_PEER_UP_NOTIFICATION = 0x03
+ BMP_MSG_TYPE_INITIATION = 0x04
+ BMP_MSG_TYPE_TERMINATION = 0x05
+ BMP_MSG_TYPE_ROUTE_MIRRORING = 0x06
+ BMP_MSG_TYPE_ROUTE_POLICY = 0x64
+
+ # initiation message types
+ BMP_INIT_INFO_STRING = 0x00
+ BMP_INIT_SYSTEM_DESCRIPTION = 0x01
+ BMP_INIT_SYSTEM_NAME = 0x02
+ BMP_INIT_VRF_TABLE_NAME = 0x03
+ BMP_INIT_ADMIN_LABEL = 0x04
+
+ # peer types
+ BMP_PEER_GLOBAL_INSTANCE = 0x00
+ BMP_PEER_RD_INSTANCE = 0x01
+ BMP_PEER_LOCAL_INSTANCE = 0x02
+ BMP_PEER_LOC_RIB_INSTANCE = 0x03
+
+ # peer header flags
+ BMP_PEER_FLAG_IPV6 = 0x80
+ BMP_PEER_FLAG_POST_POLICY = 0x40
+ BMP_PEER_FLAG_AS_PATH = 0x20
+ BMP_PEER_FLAG_ADJ_RIB_OUT = 0x10
+
+ # peer loc-rib flag
+ BMP_PEER_FLAG_LOC_RIB = 0x80
+ BMP_PEER_FLAG_LOC_RIB_RES = 0x7F
+
+ # statistics type
+ BMP_STAT_PREFIX_REJ = 0x00
+ BMP_STAT_PREFIX_DUP = 0x01
+ BMP_STAT_WITHDRAW_DUP = 0x02
+ BMP_STAT_CLUSTER_LOOP = 0x03
+ BMP_STAT_AS_LOOP = 0x04
+ BMP_STAT_INV_ORIGINATOR = 0x05
+ BMP_STAT_AS_CONFED_LOOP = 0x06
+ BMP_STAT_ROUTES_ADJ_RIB_IN = 0x07
+ BMP_STAT_ROUTES_LOC_RIB = 0x08
+ BMP_STAT_ROUTES_PER_ADJ_RIB_IN = 0x09
+ BMP_STAT_ROUTES_PER_LOC_RIB = 0x0A
+ BMP_STAT_UPDATE_TREAT = 0x0B
+ BMP_STAT_PREFIXES_TREAT = 0x0C
+ BMP_STAT_DUPLICATE_UPDATE = 0x0D
+ BMP_STAT_ROUTES_PRE_ADJ_RIB_OUT = 0x0E
+ BMP_STAT_ROUTES_POST_ADJ_RIB_OUT = 0x0F
+ BMP_STAT_ROUTES_PRE_PER_ADJ_RIB_OUT = 0x10
+ BMP_STAT_ROUTES_POST_PER_ADJ_RIB_OUT = 0x11
+
+ # peer down reason code
+ BMP_PEER_DOWN_LOCAL_NOTIFY = 0x01
+ BMP_PEER_DOWN_LOCAL_NO_NOTIFY = 0X02
+ BMP_PEER_DOWN_REMOTE_NOTIFY = 0X03
+ BMP_PEER_DOWN_REMOTE_NO_NOTIFY = 0X04
+ BMP_PEER_DOWN_INFO_NO_LONGER = 0x05
+ BMP_PEER_DOWN_SYSTEM_CLOSED = 0X06
+
+ # termincation message types
+ BMP_TERM_TYPE_STRING = 0x00
+ BMP_TERM_TYPE_REASON = 0X01
+
+ # termination reason code
+ BMP_TERM_REASON_ADMIN_CLOSE = 0x00
+ BMP_TERM_REASON_UNSPECIFIED = 0x01
+ BMP_TERM_REASON_RESOURCES = 0x02
+ BMP_TERM_REASON_REDUNDANT = 0x03
+ BMP_TERM_REASON_PERM_CLOSE = 0x04
+
+ # policy route tlv
+ BMP_ROUTE_POLICY_TLV_VRF = 0x00
+ BMP_ROUTE_POLICY_TLV_POLICY= 0x01
+ BMP_ROUTE_POLICY_TLV_PRE_POLICY = 0x02
+ BMP_ROUTE_POLICY_TLV_POST_POLICY = 0x03
+ BMP_ROUTE_POLICY_TLV_STRING = 0x04
+
+
+#------------------------------------------------------------------------------
+class BMPMsg:
+ """
+ XXX: should we move register_msg_type and look_msg_type
+ to generic Type class.
+ """
+ TYPES = {}
+ UNKNOWN_TYPE = None
+ HDR_STR = '!BIB'
+ MIN_LEN = struct.calcsize(HDR_STR)
+ TYPES_STR = {
+ BMPCodes.BMP_MSG_TYPE_INITIATION: 'initiation',
+ BMPCodes.BMP_MSG_TYPE_PEER_DOWN_NOTIFICATION: 'peer down notification',
+ BMPCodes.BMP_MSG_TYPE_PEER_UP_NOTIFICATION: 'peer up notification',
+ BMPCodes.BMP_MSG_TYPE_ROUTE_MONITORING: 'route monitoring',
+ BMPCodes.BMP_MSG_TYPE_STATISTICS_REPORT: 'statistics report',
+ BMPCodes.BMP_MSG_TYPE_TERMINATION: 'termination',
+ BMPCodes.BMP_MSG_TYPE_ROUTE_MIRRORING: 'route mirroring',
+ BMPCodes.BMP_MSG_TYPE_ROUTE_POLICY: 'route policy',
+ }
+
+ @classmethod
+ def register_msg_type(cls, msgtype):
+ def _register_type(subcls):
+ cls.TYPES[msgtype] = subcls
+ return subcls
+ return _register_type
+
+ @classmethod
+ def lookup_msg_type(cls, msgtype):
+ return cls.TYPES.get(msgtype, cls.UNKNOWN_TYPE)
+
+ @classmethod
+ def dissect_header(cls, data):
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Message Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Message Type |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ if len(data) < cls.MIN_LEN:
+ pass
+ else:
+ _version, _len, _type = struct.unpack(cls.HDR_STR, data[0:cls.MIN_LEN])
+ return _version, _len, _type
+
+ @classmethod
+ def dissect(cls, data):
+ global SEQ
+ version, msglen, msgtype = cls.dissect_header(data)
+
+ msg_data = data[cls.MIN_LEN:msglen]
+ data = data[msglen:]
+
+ if version != BMPCodes.VERSION:
+ # XXX: log something
+ return data
+
+ msg_cls = cls.lookup_msg_type(msgtype)
+ if msg_cls == cls.UNKNOWN_TYPE:
+ # XXX: log something
+ return data
+
+ msg_cls.MSG_LEN = msglen - cls.MIN_LEN
+ logs = msg_cls.dissect(msg_data)
+ logs["seq"] = SEQ
+ log2file(logs)
+ SEQ += 1
+
+ return data
+
+
+#------------------------------------------------------------------------------
+class BMPPerPeerMessage:
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Peer Type | Peer Flags |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Peer Address (16 bytes) |
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Peer AS |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Peer BGP ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Timestamp (seconds) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Timestamp (microseconds) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ PEER_UNPACK_STR = '!BB8s16sI4sII'
+ PEER_TYPE_STR = {
+ BMPCodes.BMP_PEER_GLOBAL_INSTANCE: 'global instance',
+ BMPCodes.BMP_PEER_RD_INSTANCE: 'route distinguisher instance',
+ BMPCodes.BMP_PEER_LOCAL_INSTANCE: 'local instance',
+ BMPCodes.BMP_PEER_LOC_RIB_INSTANCE: 'loc-rib instance',
+ }
+
+ @classmethod
+ def dissect(cls, data):
+ (peer_type,
+ peer_flags,
+ peer_distinguisher,
+ peer_address,
+ peer_asn,
+ peer_bgp_id,
+ timestamp_secs,
+ timestamp_microsecs) = struct.unpack_from(cls.PEER_UNPACK_STR, data)
+
+ msg = {'peer_type': cls.PEER_TYPE_STR[peer_type]}
+
+ if peer_type == 0x03:
+ msg['is_filtered'] = bool(peer_flags & IS_FILTERED)
+ else:
+ # peer_flags = 0x0000 0000
+ # ipv6, post-policy, as-path, adj-rib-out, reserverdx4
+ is_adj_rib_out = bool(peer_flags & IS_ADJ_RIB_OUT)
+ is_as_path = bool(peer_flags & IS_AS_PATH)
+ is_post_policy = bool(peer_flags & IS_POST_POLICY)
+ is_ipv6 = bool(peer_flags & IS_IPV6)
+ msg['post_policy'] = is_post_policy
+ msg['ipv6'] = is_ipv6
+ msg['peer_ip'] = bin2str_ipaddress(peer_address, is_ipv6)
+
+
+ peer_bgp_id = bin2str_ipaddress(peer_bgp_id)
+ timestamp = float(timestamp_secs) + timestamp_microsecs * (10 ** -6)
+
+ data = data[struct.calcsize(cls.PEER_UNPACK_STR):]
+ msg.update({
+ 'peer_distinguisher': str(RouteDistinguisher(peer_distinguisher)),
+ 'peer_asn': peer_asn,
+ 'peer_bgp_id': peer_bgp_id,
+ 'timestamp': str(datetime.datetime.fromtimestamp(timestamp)),
+ })
+
+ return data, msg
+
+
+#------------------------------------------------------------------------------
+@BMPMsg.register_msg_type(BMPCodes.BMP_MSG_TYPE_ROUTE_MONITORING)
+class BMPRouteMonitoring(BMPPerPeerMessage):
+
+ @classmethod
+ def dissect(cls, data):
+ data, peer_msg = super().dissect(data)
+ data, update_msg = BGPUpdate.dissect(data)
+ return {**peer_msg, **update_msg}
+
+
+#------------------------------------------------------------------------------
+class BMPStatisticsReport:
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Stats Count |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Stat Type | Stat Len |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Stat Data |
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ pass
+
+
+#------------------------------------------------------------------------------
+class BMPPeerDownNotification:
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Reason |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Data (present if Reason = 1, 2 or 3) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ pass
+
+
+#------------------------------------------------------------------------------
+@BMPMsg.register_msg_type(BMPCodes.BMP_MSG_TYPE_PEER_UP_NOTIFICATION)
+class BMPPeerUpNotification(BMPPerPeerMessage):
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Local Address (16 bytes) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Local Port | Remote Port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Sent OPEN Message #|
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Received OPEN Message |
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ UNPACK_STR = '!16sHH'
+ MIN_LEN = struct.calcsize(UNPACK_STR)
+ MSG_LEN = None
+
+ @classmethod
+ def dissect(cls, data):
+ data, peer_msg = super().dissect(data)
+
+ (local_addr,
+ local_port,
+ remote_port) = struct.unpack_from(cls.UNPACK_STR, data)
+
+ msg = {
+ **peer_msg,
+ **{
+ 'local_ip': bin2str_ipaddress(local_addr, peer_msg.get('ipv6')),
+ 'local_port': int(local_port),
+ 'remote_port': int(remote_port),
+ },
+ }
+
+ # XXX: dissect the bgp open message
+
+ return msg
+
+
+#------------------------------------------------------------------------------
+@BMPMsg.register_msg_type(BMPCodes.BMP_MSG_TYPE_INITIATION)
+class BMPInitiation:
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Information Type | Information Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Information (variable) |
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ TLV_STR = '!HH'
+ MIN_LEN = struct.calcsize(TLV_STR)
+ FIELD_TO_STR = {
+ BMPCodes.BMP_INIT_INFO_STRING: 'information',
+ BMPCodes.BMP_INIT_ADMIN_LABEL: 'admin_label',
+ BMPCodes.BMP_INIT_SYSTEM_DESCRIPTION: 'system_description',
+ BMPCodes.BMP_INIT_SYSTEM_NAME: 'system_name',
+ BMPCodes.BMP_INIT_VRF_TABLE_NAME: 'vrf_table_name',
+ }
+
+ @classmethod
+ def dissect(cls, data):
+ msg = {}
+ while len(data) > cls.MIN_LEN:
+ _type, _len = struct.unpack_from(cls.TLV_STR, data[0:cls.MIN_LEN])
+ _value = data[cls.MIN_LEN: cls.MIN_LEN + _len].decode()
+
+ msg[cls.FIELD_TO_STR[_type]] = _value
+ data = data[cls.MIN_LEN + _len:]
+
+ return msg
+
+
+#------------------------------------------------------------------------------
+class BMPTermination:
+ """
+ 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Information Type | Information Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Information (variable) |
+ ~ ~
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ pass
+
+
+#------------------------------------------------------------------------------
+class BMPRouteMirroring:
+ pass
+
+
+#------------------------------------------------------------------------------
+class BMPRoutePolicy:
+ pass
diff --git a/tests/topotests/lib/bmp_collector/bmpserver b/tests/topotests/lib/bmp_collector/bmpserver
new file mode 100755
index 0000000..25b4a52
--- /dev/null
+++ b/tests/topotests/lib/bmp_collector/bmpserver
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+import argparse
+# XXX: something more reliable should be used "Twisted" a great choice.
+import socket
+import sys
+
+from bmp import BMPMsg
+
+BGP_MAX_SIZE = 4096
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-a", "--address", type=str, default="0.0.0.0")
+parser.add_argument("-p", "--port", type=int, default=1789)
+
+def main():
+ args = parser.parse_args()
+ ADDRESS, PORT = args.address, args.port
+
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind((ADDRESS, PORT))
+ s.listen()
+ connection, _ = s.accept()
+
+ try:
+ while True:
+ data = connection.recv(BGP_MAX_SIZE)
+ while len(data) > BMPMsg.MIN_LEN:
+ data = BMPMsg.dissect(data)
+ except Exception as e:
+ # XXX: do something
+ pass
+ except KeyboardInterrupt:
+ # XXX: do something
+ pass
+ finally:
+ connection.close()
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tests/topotests/lib/checkping.py b/tests/topotests/lib/checkping.py
new file mode 100644
index 0000000..aaa6164
--- /dev/null
+++ b/tests/topotests/lib/checkping.py
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright 2023 Quentin Young
+
+import functools
+from lib.topogen import get_topogen
+from lib.topolog import logger
+from lib import topotest
+
+
+def check_ping(name, dest_addr, expect_connected, count, wait):
+ """
+ Assert that ping to dest_addr is expected
+ * 'name': the router to set the ping from
+ * 'dest_addr': The destination ip address to ping
+ * 'expect_connected': True if ping is expected to pass
+ * 'count': how many echos to send
+ * 'wait': how long ping should wait to receive all replies
+ """
+
+ def _check(name, dest_addr, match):
+ tgen = get_topogen()
+ output = tgen.gears[name].run("ping {} -c 1 -w 1".format(dest_addr))
+ logger.info(output)
+ if match not in output:
+ return "ping fail"
+
+ match = ", {} packet loss".format("0%" if expect_connected else "100%")
+ logger.info("[+] check {} {} {}".format(name, dest_addr, match))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, dest_addr, match)
+ success, result = topotest.run_and_expect(func, None, count=count, wait=wait)
+ assert result is None, "Failed"
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
new file mode 100644
index 0000000..e19d96f
--- /dev/null
+++ b/tests/topotests/lib/common_config.py
@@ -0,0 +1,4960 @@
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+
+import functools
+import ipaddress
+import json
+import os
+import platform
+import socket
+import subprocess
+import sys
+import traceback
+from collections import OrderedDict
+from copy import deepcopy
+from datetime import datetime, timedelta
+from functools import wraps
+from re import search as re_search
+from time import sleep
+
+try:
+ # Imports from python2
+ import ConfigParser as configparser
+except ImportError:
+ # Imports from python3
+ import configparser
+
+from lib.micronet import comm_error
+from lib.topogen import TopoRouter, get_topogen
+from lib.topolog import get_logger, logger
+from lib.topotest import frr_unicode, interface_set_status, version_cmp
+from munet.testing.util import pause_test
+
+from lib import topotest
+
+FRRCFG_FILE = "frr_json.conf"
+FRRCFG_BKUP_FILE = "frr_json_initial.conf"
+
+ERROR_LIST = ["Malformed", "Failure", "Unknown", "Incomplete"]
+
+####
+CD = os.path.dirname(os.path.realpath(__file__))
+PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
+
+# NOTE: to save execution logs to log file frrtest_log_dir must be configured
+# in `pytest.ini`.
+config = configparser.ConfigParser()
+config.read(PYTESTINI_PATH)
+
+config_section = "topogen"
+
+# Debug logs for daemons
+DEBUG_LOGS = {
+ "pimd": [
+ "debug msdp events",
+ "debug msdp packets",
+ "debug igmp events",
+ "debug igmp trace",
+ "debug mroute",
+ "debug mroute detail",
+ "debug pim events",
+ "debug pim packets",
+ "debug pim trace",
+ "debug pim zebra",
+ "debug pim bsm",
+ "debug pim packets joins",
+ "debug pim packets register",
+ "debug pim nht",
+ ],
+ "pim6d": [
+ "debug pimv6 events",
+ "debug pimv6 packets",
+ "debug pimv6 packet-dump send",
+ "debug pimv6 packet-dump receive",
+ "debug pimv6 trace",
+ "debug pimv6 trace detail",
+ "debug pimv6 zebra",
+ "debug pimv6 bsm",
+ "debug pimv6 packets hello",
+ "debug pimv6 packets joins",
+ "debug pimv6 packets register",
+ "debug pimv6 nht",
+ "debug pimv6 nht detail",
+ "debug mroute6",
+ "debug mroute6 detail",
+ "debug mld events",
+ "debug mld packets",
+ "debug mld trace",
+ ],
+ "bgpd": [
+ "debug bgp neighbor-events",
+ "debug bgp updates",
+ "debug bgp zebra",
+ "debug bgp nht",
+ "debug bgp neighbor-events",
+ "debug bgp graceful-restart",
+ "debug bgp update-groups",
+ "debug bgp vpn leak-from-vrf",
+ "debug bgp vpn leak-to-vrf",
+ "debug bgp zebr",
+ "debug bgp updates",
+ "debug bgp nht",
+ "debug bgp neighbor-events",
+ "debug vrf",
+ ],
+ "zebra": [
+ "debug zebra events",
+ "debug zebra rib",
+ "debug zebra vxlan",
+ "debug zebra nht",
+ ],
+ "mgmt": [],
+ "ospf": [
+ "debug ospf event",
+ "debug ospf ism",
+ "debug ospf lsa",
+ "debug ospf nsm",
+ "debug ospf nssa",
+ "debug ospf packet all",
+ "debug ospf sr",
+ "debug ospf te",
+ "debug ospf zebra",
+ ],
+ "ospf6": [
+ "debug ospf6 event",
+ "debug ospf6 ism",
+ "debug ospf6 lsa",
+ "debug ospf6 nsm",
+ "debug ospf6 nssa",
+ "debug ospf6 packet all",
+ "debug ospf6 sr",
+ "debug ospf6 te",
+ "debug ospf6 zebra",
+ ],
+}
+
+g_iperf_client_procs = {}
+g_iperf_server_procs = {}
+
+
+def is_string(value):
+ try:
+ return isinstance(value, basestring)
+ except NameError:
+ return isinstance(value, str)
+
+
+if config.has_option("topogen", "verbosity"):
+ loglevel = config.get("topogen", "verbosity")
+ loglevel = loglevel.lower()
+else:
+ loglevel = "info"
+
+if config.has_option("topogen", "frrtest_log_dir"):
+ frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
+ time_stamp = datetime.time(datetime.now())
+ logfile_name = "frr_test_bgp_"
+ frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
+ print("frrtest_log_file..", frrtest_log_file)
+
+ logger = get_logger(
+ "test_execution_logs", log_level=loglevel, target=frrtest_log_file
+ )
+ print("Logs will be sent to logfile: {}".format(frrtest_log_file))
+
+if config.has_option("topogen", "show_router_config"):
+ show_router_config = config.get("topogen", "show_router_config")
+else:
+ show_router_config = False
+
+# env variable for setting what address type to test
+ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES")
+
+
+# Saves sequence id numbers
+SEQ_ID = {"prefix_lists": {}, "route_maps": {}}
+
+
+def get_seq_id(obj_type, router, obj_name):
+ """
+ Generates and saves sequence number in interval of 10
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+ Returns
+ --------
+ Sequence number generated
+ """
+
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + 10
+ obj_data["seq_id"] = seq_id
+
+ return seq_id
+
+
+def set_seq_id(obj_type, router, id, obj_name):
+ """
+ Saves sequence number if not auto-generated and given by user
+ Parameters
+ ----------
+ * `obj_type`: prefix_lists or route_maps
+ * `router`: router name
+ *` obj_name`: name of the prefix-list or route-map
+ """
+ router_data = SEQ_ID[obj_type].setdefault(router, {})
+ obj_data = router_data.setdefault(obj_name, {})
+ seq_id = obj_data.setdefault("seq_id", 0)
+
+ seq_id = int(seq_id) + int(id)
+ obj_data["seq_id"] = seq_id
+
+
+class InvalidCLIError(Exception):
+ """Raise when the CLI command is wrong"""
+
+
+def run_frr_cmd(rnode, cmd, isjson=False):
+ """
+ Execute frr show commands in privileged mode
+ * `rnode`: router node on which command needs to be executed
+ * `cmd`: Command to be executed on frr
+ * `isjson`: If command is to get json data or not
+ :return str:
+ """
+
+ if cmd:
+ ret_data = rnode.vtysh_cmd(cmd, isjson=isjson)
+
+ if isjson:
+ rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False)
+
+ return ret_data
+
+ else:
+ raise InvalidCLIError("No actual cmd passed")
+
+
+def apply_raw_config(tgen, input_dict):
+ """
+ API to configure raw configuration on device. This can be used for any cli
+ which has not been implemented in JSON.
+
+ Parameters
+ ----------
+ * `tgen`: tgen object
+ * `input_dict`: configuration that needs to be applied
+
+ Usage
+ -----
+ input_dict = {
+ "r2": {
+ "raw_config": [
+ "router bgp",
+ "no bgp update-group-split-horizon"
+ ]
+ }
+ }
+ Returns
+ -------
+ True or errormsg
+ """
+
+ rlist = []
+
+ for router_name in input_dict.keys():
+ config_cmd = input_dict[router_name]["raw_config"]
+
+ if not isinstance(config_cmd, list):
+ config_cmd = [config_cmd]
+
+ frr_cfg_file = "{}/{}/{}".format(tgen.logdir, router_name, FRRCFG_FILE)
+ with open(frr_cfg_file, "w") as cfg:
+ for cmd in config_cmd:
+ cfg.write("{}\n".format(cmd))
+
+ rlist.append(router_name)
+
+ # Load config on all routers
+ return load_config_to_routers(tgen, rlist)
+
+
+def create_common_configurations(
+ tgen, config_dict, config_type=None, build=False, load_config=True
+):
+ """
+ API to create object of class FRRConfig and also create frr_json.conf
+ file. It will create interface and common configurations and save it to
+ frr_json.conf and load to router
+ Parameters
+ ----------
+ * `tgen`: tgen object
+ * `config_dict`: Configuration data saved in a dict of { router: config-list }
+ * `routers` : list of router id to be configured.
+ * `config_type` : Syntactic information while writing configuration. Should
+ be one of the value as mentioned in the config_map below.
+ * `build` : Only for initial setup phase this is set as True
+ Returns
+ -------
+ True or False
+ """
+
+ config_map = OrderedDict(
+ {
+ "general_config": "! FRR General Config\n",
+ "debug_log_config": "! Debug log Config\n",
+ "interface_config": "! Interfaces Config\n",
+ "static_route": "! Static Route Config\n",
+ "prefix_list": "! Prefix List Config\n",
+ "bgp_community_list": "! Community List Config\n",
+ "route_maps": "! Route Maps Config\n",
+ "bgp": "! BGP Config\n",
+ "vrf": "! VRF Config\n",
+ "ospf": "! OSPF Config\n",
+ "ospf6": "! OSPF Config\n",
+ "pim": "! PIM Config\n",
+ }
+ )
+
+ if build:
+ mode = "a"
+ elif not load_config:
+ mode = "a"
+ else:
+ mode = "w"
+
+ routers = config_dict.keys()
+ for router in routers:
+ fname = "{}/{}/{}".format(tgen.logdir, router, FRRCFG_FILE)
+ try:
+ frr_cfg_fd = open(fname, mode)
+ if config_type:
+ frr_cfg_fd.write(config_map[config_type])
+ for line in config_dict[router]:
+ frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
+
+ except IOError as err:
+ logger.error("Unable to open FRR Config '%s': %s" % (fname, str(err)))
+ return False
+ finally:
+ frr_cfg_fd.close()
+
+ # If configuration applied from build, it will done at last
+ result = True
+ if not build and load_config:
+ result = load_config_to_routers(tgen, routers)
+
+ return result
+
+
+def create_common_configuration(
+ tgen, router, data, config_type=None, build=False, load_config=True
+):
+ """
+ API to create object of class FRRConfig and also create frr_json.conf
+ file. It will create interface and common configurations and save it to
+ frr_json.conf and load to router
+ Parameters
+ ----------
+ * `tgen`: tgen object
+ * `data`: Configuration data saved in a list.
+ * `router` : router id to be configured.
+ * `config_type` : Syntactic information while writing configuration. Should
+ be one of the value as mentioned in the config_map below.
+ * `build` : Only for initial setup phase this is set as True
+ Returns
+ -------
+ True or False
+ """
+ return create_common_configurations(
+ tgen, {router: data}, config_type, build, load_config
+ )
+
+
+def kill_router_daemons(tgen, router, daemons, save_config=True):
+ """
+ Router's current config would be saved to /etc/frr/ for each daemon
+ and daemon would be killed forcefully using SIGKILL.
+ * `tgen` : topogen object
+ * `router`: Device under test
+ * `daemons`: list of daemons to be killed
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+
+ if save_config:
+ # Saving router config to /etc/frr, which will be loaded to router
+ # when it starts
+ router_list[router].vtysh_cmd("write memory")
+
+ # Kill Daemons
+ result = router_list[router].killDaemons(daemons)
+ if len(result) > 0:
+ assert "Errors found post shutdown - details follow:" == 0, result
+ return result
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+
+def start_router_daemons(tgen, router, daemons):
+ """
+ Daemons defined by user would be started
+ * `tgen` : topogen object
+ * `router`: Device under test
+ * `daemons`: list of daemons to be killed
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+
+ # Start daemons
+ res = router_list[router].startDaemons(daemons)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ res = errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return res
+
+
+def check_router_status(tgen):
+ """
+ Check if all daemons are running for all routers in topology
+ * `tgen` : topogen object
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ result = rnode.check_router_running()
+ if result != "":
+ daemons = []
+ if "mgmtd" in result:
+ daemons.append("mgmtd")
+ if "bgpd" in result:
+ daemons.append("bgpd")
+ if "zebra" in result:
+ daemons.append("zebra")
+ if "pimd" in result:
+ daemons.append("pimd")
+ if "pim6d" in result:
+ daemons.append("pim6d")
+ if "ospfd" in result:
+ daemons.append("ospfd")
+ if "ospf6d" in result:
+ daemons.append("ospf6d")
+ rnode.startDaemons(daemons)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def save_initial_config_on_routers(tgen):
+ """Save current configuration on routers to FRRCFG_BKUP_FILE.
+
+ FRRCFG_BKUP_FILE is the file that will be restored when `reset_config_on_routers()`
+ is called.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ """
+ router_list = tgen.routers()
+ target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
+
+ # Get all running configs in parallel
+ procs = {}
+ for rname in router_list:
+ logger.debug("Fetching running config for router %s", rname)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=open(target_cfg_fmt.format(rname), "w"),
+ stderr=subprocess.PIPE,
+ )
+ for rname, p in procs.items():
+ _, error = p.communicate()
+ if p.returncode:
+ logger.error(
+ "Get running config for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError(
+ "vtysh show running error on {}: {}".format(rname, error)
+ )
+
+
+def reset_config_on_routers(tgen, routerName=None):
+ """
+ Resets configuration on routers to the snapshot created using input JSON
+ file. It replaces existing router configuration with FRRCFG_BKUP_FILE
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router config is to be reset
+ """
+
+ logger.debug("Entering API: reset_config_on_routers")
+
+ tgen.cfg_gen += 1
+ gen = tgen.cfg_gen
+
+ # Trim the router list if needed
+ router_list = tgen.routers()
+ if routerName:
+ if routerName not in router_list:
+ logger.warning(
+ "Exiting API: reset_config_on_routers: no router %s",
+ routerName,
+ exc_info=True,
+ )
+ return True
+ router_list = {routerName: router_list[routerName]}
+
+ delta_fmt = tgen.logdir + "/{}/delta-{}.conf"
+ # FRRCFG_BKUP_FILE
+ target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
+ run_cfg_fmt = tgen.logdir + "/{}/frr-{}.sav"
+
+ #
+ # Get all running configs in parallel
+ #
+ procs = {}
+ for rname in router_list:
+ logger.debug("Fetching running config for router %s", rname)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=open(run_cfg_fmt.format(rname, gen), "w"),
+ stderr=subprocess.PIPE,
+ )
+ for rname, p in procs.items():
+ _, error = p.communicate()
+ if p.returncode:
+ logger.error(
+ "Get running config for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError(
+ "vtysh show running error on {}: {}".format(rname, error)
+ )
+
+ #
+ # Get all delta's in parallel
+ #
+ procs = {}
+ for rname in router_list:
+ logger.debug(
+ "Generating delta for router %s to new configuration (gen %d)", rname, gen
+ )
+ procs[rname] = tgen.net.popen(
+ [
+ "/usr/lib/frr/frr-reload.py",
+ "--test-reset",
+ "--input",
+ run_cfg_fmt.format(rname, gen),
+ "--test",
+ target_cfg_fmt.format(rname),
+ ],
+ stdin=None,
+ stdout=open(delta_fmt.format(rname, gen), "w"),
+ stderr=subprocess.PIPE,
+ )
+ for rname, p in procs.items():
+ _, error = p.communicate()
+ if p.returncode:
+ logger.error(
+ "Delta file creation for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error))
+
+ #
+ # Apply all the deltas in parallel
+ #
+ procs = {}
+ for rname in router_list:
+ logger.debug("Applying delta config on router %s", rname)
+
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname, gen)],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname, gen))
+ if not p.returncode:
+ router_list[rname].logger.debug(
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ else:
+ router_list[rname].logger.warning(
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Delta file apply for %s failed %d: %s", rname, p.returncode, output
+ )
+
+ # We really need to enable this failure; however, currently frr-reload.py
+ # producing invalid "no" commands as it just preprends "no", but some of the
+ # command forms lack matching values (e.g., final values). Until frr-reload
+ # is fixed to handle this (or all the CLI no forms are adjusted) we can't
+ # fail tests.
+ # raise InvalidCLIError("frr-reload error for {}: {}".format(rname, output))
+
+ #
+ # Optionally log all new running config if "show_router_config" is defined in
+ # "pytest.ini"
+ #
+ if show_router_config:
+ procs = {}
+ for rname in router_list:
+ logger.debug("Fetching running config for router %s", rname)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ if p.returncode:
+ logger.warning(
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
+ )
+ else:
+ logger.debug(
+ "Configuration on router %s after reset:\n%s", rname, output
+ )
+
+ logger.debug("Exiting API: reset_config_on_routers")
+ return True
+
+
+def prep_load_config_to_routers(tgen, *config_name_list):
+ """Create common config for `load_config_to_routers`.
+
+ The common config file is constructed from the list of sub-config files passed as
+ position arguments to this function. Each entry in `config_name_list` is looked for
+ under the router sub-directory in the test directory and those files are
+ concatenated together to create the common config. e.g.,
+
+ # Routers are "r1" and "r2", test file is `example/test_example_foo.py`
+ prepare_load_config_to_routers(tgen, "bgpd.conf", "ospfd.conf")
+
+ When the above call is made the files in
+
+ example/r1/bgpd.conf
+ example/r1/ospfd.conf
+
+ Are concat'd together into a single config file that will be loaded on r1, and
+
+ example/r2/bgpd.conf
+ example/r2/ospfd.conf
+
+ Are concat'd together into a single config file that will be loaded on r2 when
+ the call to `load_config_to_routers` is made.
+ """
+
+ routers = tgen.routers()
+ for rname, router in routers.items():
+ destname = "{}/{}/{}".format(tgen.logdir, rname, FRRCFG_FILE)
+ wmode = "w"
+ for cfbase in config_name_list:
+ script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
+ confname = os.path.join(script_dir, "{}/{}".format(rname, cfbase))
+ with open(confname, "r") as cf:
+ with open(destname, wmode) as df:
+ df.write(cf.read())
+ wmode = "a"
+
+
+def load_config_to_routers(tgen, routers, save_bkup=False):
+ """
+ Loads configuration on routers from the file FRRCFG_FILE.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routers` : routers for which configuration is to be loaded
+ * `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ Returns
+ -------
+ True or False
+ """
+
+ logger.debug("Entering API: load_config_to_routers")
+
+ tgen.cfg_gen += 1
+ gen = tgen.cfg_gen
+
+ base_router_list = tgen.routers()
+ router_list = {}
+ for router in routers:
+ if router not in base_router_list:
+ continue
+ router_list[router] = base_router_list[router]
+
+ frr_cfg_file_fmt = tgen.logdir + "/{}/" + FRRCFG_FILE
+ frr_cfg_save_file_fmt = tgen.logdir + "/{}/{}-" + FRRCFG_FILE
+ frr_cfg_bkup_fmt = tgen.logdir + "/{}/" + FRRCFG_BKUP_FILE
+
+ procs = {}
+ for rname in router_list:
+ router = router_list[rname]
+ try:
+ frr_cfg_file = frr_cfg_file_fmt.format(rname)
+ frr_cfg_save_file = frr_cfg_save_file_fmt.format(rname, gen)
+ frr_cfg_bkup = frr_cfg_bkup_fmt.format(rname)
+ with open(frr_cfg_file, "r+") as cfg:
+ data = cfg.read()
+ logger.debug(
+ "Applying following configuration on router %s (gen: %d):\n%s",
+ rname,
+ gen,
+ data,
+ )
+ # Always save a copy of what we just did
+ with open(frr_cfg_save_file, "w") as bkup:
+ bkup.write(data)
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-f", frr_cfg_file],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ except IOError as err:
+ logger.error(
+ "Unable to open config File. error(%s): %s", err.errno, err.strerror
+ )
+ return False
+ except Exception as error:
+ logger.error("Unable to apply config on %s: %s", rname, str(error))
+ return False
+
+ errors = []
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ frr_cfg_file = frr_cfg_file_fmt.format(rname)
+ vtysh_command = "vtysh -f " + frr_cfg_file
+ if not p.returncode:
+ router_list[rname].logger.debug(
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ else:
+ router_list[rname].logger.error(
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Config apply for %s failed %d: %s", rname, p.returncode, output
+ )
+ # We can't thorw an exception here as we won't clear the config file.
+ errors.append(
+ InvalidCLIError(
+ "load_config_to_routers error for {}: {}".format(rname, output)
+ )
+ )
+
+ # Empty the config file or we append to it next time through.
+ with open(frr_cfg_file, "r+") as cfg:
+ cfg.truncate(0)
+
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ procs = {}
+ for rname in router_list:
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ if p.returncode:
+ logger.warning(
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
+ )
+ else:
+ logger.debug("New configuration for router %s:\n%s", rname, output)
+
+ logger.debug("Exiting API: load_config_to_routers")
+ return not errors
+
+
+def load_config_to_router(tgen, routerName, save_bkup=False):
+ """
+ Loads configuration on router from the file FRRCFG_FILE.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router for which configuration to be loaded
+ * `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ """
+ return load_config_to_routers(tgen, [routerName], save_bkup)
+
+
+def reset_with_new_configs(tgen, *cflist):
+ """Reset the router to initial config, then load new configs.
+
+ Resets routers to the initial config state (see `save_initial_config_on_routers()
+ and `reset_config_on_routers()` `), then concat list of router sub-configs together
+ and load onto the routers (see `prep_load_config_to_routers()` and
+ `load_config_to_routers()`)
+ """
+ routers = tgen.routers()
+
+ reset_config_on_routers(tgen)
+ prep_load_config_to_routers(tgen, *cflist)
+ load_config_to_routers(tgen, tgen.routers(), save_bkup=False)
+
+
+def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
+ """
+ API to get the link local ipv6 address of a particular interface using
+ FRR command 'show interface'
+
+ * `tgen`: tgen object
+ * `router` : router for which highest interface should be
+ calculated
+ * `intf` : interface for which link-local address needs to be taken
+ * `vrf` : VRF name
+
+ Usage
+ -----
+ linklocal = get_frr_ipv6_linklocal(tgen, router, "intf1", RED_A)
+
+ Returns
+ -------
+ 1) array of interface names to link local ips.
+ """
+
+ router_list = tgen.routers()
+ for rname, rnode in router_list.items():
+ if rname != router:
+ continue
+
+ linklocal = []
+
+ if vrf:
+ cmd = "show interface vrf {}".format(vrf)
+ else:
+ cmd = "show interface"
+
+ linklocal = []
+ if vrf:
+ cmd = "show interface vrf {}".format(vrf)
+ else:
+ cmd = "show interface"
+ for chk_ll in range(0, 60):
+ sleep(1 / 4)
+ ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
+ # Fix newlines (make them all the same)
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
+ if m:
+ interface = m.group(1).split(" ")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m1 = re_search("inet6 (fe80[:a-fA-F0-9]+/[0-9]+)", line)
+ if m1:
+ local = m1.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ try:
+ if linklocal:
+ if intf:
+ return [
+ _linklocal[1]
+ for _linklocal in linklocal
+ if _linklocal[0] == intf
+ ][0].split("/")[0]
+ return linklocal
+ except IndexError:
+ continue
+
+ errormsg = "Link local ip missing on router {}".format(router)
+ return errormsg
+
+
+def generate_support_bundle():
+ """
+ API to generate support bundle on any verification ste failure.
+ it runs a python utility, /usr/lib/frr/generate_support_bundle.py,
+ which basically runs defined CLIs and dumps the data to specified location
+ """
+
+ tgen = get_topogen()
+ router_list = tgen.routers()
+ test_name = os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
+
+ bundle_procs = {}
+ for rname, rnode in router_list.items():
+ logger.info("Spawn collection of support bundle for %s", rname)
+ dst_bundle = "{}/{}/support_bundles/{}".format(tgen.logdir, rname, test_name)
+ rnode.run("mkdir -p " + dst_bundle)
+
+ gen_sup_cmd = [
+ "/usr/lib/frr/generate_support_bundle.py",
+ "--log-dir=" + dst_bundle,
+ ]
+ bundle_procs[rname] = tgen.net[rname].popen(gen_sup_cmd, stdin=None)
+
+ for rname, rnode in router_list.items():
+ logger.debug("Waiting on support bundle for %s", rname)
+ output, error = bundle_procs[rname].communicate()
+ if output:
+ logger.debug(
+ "Output from collecting support bundle for %s:\n%s", rname, output
+ )
+ if error:
+ logger.warning(
+ "Error from collecting support bundle for %s:\n%s", rname, error
+ )
+
+ return True
+
+
+def start_topology(tgen):
+ """
+ Starting topology, create tmp files which are loaded to routers
+ to start daemons and then start routers
+ * `tgen` : topogen object
+ """
+
+ # Starting topology
+ tgen.start_topology()
+
+ # Starting daemons
+
+ router_list = tgen.routers()
+ routers_sorted = sorted(
+ router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
+ )
+
+ linux_ver = ""
+ router_list = tgen.routers()
+ for rname in routers_sorted:
+ router = router_list[rname]
+
+ # It will help in debugging the failures, will give more details on which
+ # specific kernel version tests are failing
+ if linux_ver == "":
+ linux_ver = router.run("uname -a")
+ logger.info("Logging platform related details: \n %s \n", linux_ver)
+
+ try:
+ os.chdir(tgen.logdir)
+
+ # # Creating router named dir and empty zebra.conf bgpd.conf files
+ # # inside the current directory
+ # if os.path.isdir("{}".format(rname)):
+ # os.system("rm -rf {}".format(rname))
+ # os.mkdir("{}".format(rname))
+ # os.system("chmod -R go+rw {}".format(rname))
+ # os.chdir("{}/{}".format(tgen.logdir, rname))
+ # os.system("touch zebra.conf bgpd.conf")
+ # else:
+ # os.mkdir("{}".format(rname))
+ # os.system("chmod -R go+rw {}".format(rname))
+ # os.chdir("{}/{}".format(tgen.logdir, rname))
+ # os.system("touch zebra.conf bgpd.conf")
+
+ except IOError as err:
+ logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
+
+ topo = tgen.json_topo
+ feature = set()
+
+ if "feature" in topo:
+ feature.update(topo["feature"])
+
+ if rname in topo["routers"]:
+ for key in topo["routers"][rname].keys():
+ feature.add(key)
+
+ for val in topo["routers"][rname]["links"].values():
+ if "pim" in val:
+ feature.add("pim")
+ break
+ for val in topo["routers"][rname]["links"].values():
+ if "pim6" in val:
+ feature.add("pim6")
+ break
+ for val in topo["routers"][rname]["links"].values():
+ if "ospf6" in val:
+ feature.add("ospf6")
+ break
+ if "switches" in topo and rname in topo["switches"]:
+ for val in topo["switches"][rname]["links"].values():
+ if "ospf" in val:
+ feature.add("ospf")
+ break
+ if "ospf6" in val:
+ feature.add("ospf6")
+ break
+
+ # Loading empty mgmtd.conf file to router, to start the mgmtd daemon
+ router.load_config(
+ TopoRouter.RD_MGMTD, "{}/{}/mgmtd.conf".format(tgen.logdir, rname)
+ )
+
+ # Loading empty zebra.conf file to router, to start the zebra deamon
+ router.load_config(
+ TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
+ )
+
+ # Loading empty bgpd.conf file to router, to start the bgp deamon
+ if "bgp" in feature:
+ router.load_config(
+ TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname)
+ )
+
+ # Loading empty pimd.conf file to router, to start the pim deamon
+ if "pim" in feature:
+ router.load_config(
+ TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
+ )
+
+ # Loading empty pimd.conf file to router, to start the pim deamon
+ if "pim6" in feature:
+ router.load_config(
+ TopoRouter.RD_PIM6, "{}/{}/pim6d.conf".format(tgen.logdir, rname)
+ )
+
+ if "ospf" in feature:
+ # Loading empty ospf.conf file to router, to start the ospf deamon
+ router.load_config(
+ TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(tgen.logdir, rname)
+ )
+
+ if "ospf6" in feature:
+ # Loading empty ospf.conf file to router, to start the ospf deamon
+ router.load_config(
+ TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(tgen.logdir, rname)
+ )
+
+ # Starting routers
+ logger.info("Starting all routers once topology is created")
+ tgen.start_router()
+
+
+def stop_router(tgen, router):
+ """
+ Router"s current config would be saved to /tmp/topotest/<suite>/<router> for each daemon
+ and router and its daemons would be stopped.
+
+ * `tgen` : topogen object
+ * `router`: Device under test
+ """
+
+ router_list = tgen.routers()
+
+ # Saving router config to /etc/frr, which will be loaded to router
+ # when it starts
+ router_list[router].vtysh_cmd("write memory")
+
+ # Stop router
+ router_list[router].stop()
+
+
+def start_router(tgen, router):
+ """
+ Router will be started and config would be loaded from /tmp/topotest/<suite>/<router> for each
+ daemon
+
+ * `tgen` : topogen object
+ * `router`: Device under test
+ """
+
+ logger.debug("Entering lib API: start_router")
+
+ try:
+ router_list = tgen.routers()
+
+ # Router and its daemons would be started and config would
+ # be loaded to router for each daemon from /etc/frr
+ router_list[router].start()
+
+ # Waiting for router to come up
+ sleep(5)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: start_router()")
+ return True
+
+
+def number_to_row(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = row 0, a1 = row 1, b2 = row 2, z23 = row 23
+ etc
+ """
+ return int(routerName[1:])
+
+
+def number_to_column(routerName):
+ """
+ Returns the number for the router.
+ Calculation based on name a0 = columnn 0, a1 = column 0, b2= column 1,
+ z23 = column 26 etc
+ """
+ return ord(routerName[0]) - 97
+
+
+def topo_daemons(tgen, topo=None):
+ """
+ Returns daemon list required for the suite based on topojson.
+ """
+ daemon_list = []
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ router_list = tgen.routers()
+ routers_sorted = sorted(
+ router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
+ )
+
+ for rtr in routers_sorted:
+ if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list:
+ daemon_list.append("ospfd")
+
+ if "ospf6" in topo["routers"][rtr] and "ospf6d" not in daemon_list:
+ daemon_list.append("ospf6d")
+
+ for val in topo["routers"][rtr]["links"].values():
+ if "pim" in val and "pimd" not in daemon_list:
+ daemon_list.append("pimd")
+ if "pim6" in val and "pim6d" not in daemon_list:
+ daemon_list.append("pim6d")
+ if "ospf" in val and "ospfd" not in daemon_list:
+ daemon_list.append("ospfd")
+ if "ospf6" in val and "ospf6d" not in daemon_list:
+ daemon_list.append("ospf6d")
+ break
+
+ return daemon_list
+
+
+def add_interfaces_to_vlan(tgen, input_dict):
+ """
+ Add interfaces to VLAN, we need vlan pakcage to be installed on machine
+
+ * `tgen`: tgen onject
+ * `input_dict` : interfaces to be added to vlans
+
+ input_dict= {
+ "r1":{
+ "vlan":{
+ VLAN_1: [{
+ intf_r1_s1: {
+ "ip": "10.1.1.1",
+ "subnet": "255.255.255.0
+ }
+ }]
+ }
+ }
+ }
+
+ add_interfaces_to_vlan(tgen, input_dict)
+
+ """
+
+ router_list = tgen.routers()
+ for dut in input_dict.keys():
+ rnode = router_list[dut]
+
+ if "vlan" in input_dict[dut]:
+ for vlan, interfaces in input_dict[dut]["vlan"].items():
+ for intf_dict in interfaces:
+ for interface, data in intf_dict.items():
+ # Adding interface to VLAN
+ vlan_intf = "{}.{}".format(interface, vlan)
+ cmd = "ip link add link {} name {} type vlan id {}".format(
+ interface, vlan_intf, vlan
+ )
+ logger.debug("[DUT: %s]: Running command: %s", dut, cmd)
+ result = rnode.run(cmd)
+ logger.debug("result %s", result)
+
+ # Bringing interface up
+ cmd = "ip link set {} up".format(vlan_intf)
+ logger.debug("[DUT: %s]: Running command: %s", dut, cmd)
+ result = rnode.run(cmd)
+ logger.debug("result %s", result)
+
+ # Assigning IP address
+ ifaddr = ipaddress.ip_interface(
+ "{}/{}".format(
+ frr_unicode(data["ip"]), frr_unicode(data["subnet"])
+ )
+ )
+
+ cmd = "ip -{0} a flush {1} scope global && ip a add {2} dev {1} && ip l set {1} up".format(
+ ifaddr.version, vlan_intf, ifaddr
+ )
+ logger.debug("[DUT: %s]: Running command: %s", dut, cmd)
+ result = rnode.run(cmd)
+ logger.debug("result %s", result)
+
+
+def create_debug_log_config(tgen, input_dict, build=False):
+ """
+ Enable/disable debug logs for any protocol with defined debug
+ options and logs would be saved to created log file
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : details to enable debug logs for protocols
+ * `build` : Only for initial setup phase this is set as True.
+
+
+ Usage:
+ ------
+ input_dict = {
+ "r2": {
+ "debug":{
+ "log_file" : "debug.log",
+ "enable": ["pimd", "zebra"],
+ "disable": {
+ "bgpd":[
+ 'debug bgp neighbor-events',
+ 'debug bgp updates',
+ 'debug bgp zebra',
+ ]
+ }
+ }
+ }
+ }
+
+ result = create_debug_log_config(tgen, input_dict)
+
+ Returns
+ -------
+ True or False
+ """
+
+ result = False
+ try:
+ debug_config_dict = {}
+
+ for router in input_dict.keys():
+ debug_config = []
+ if "debug" in input_dict[router]:
+ debug_dict = input_dict[router]["debug"]
+
+ disable_logs = debug_dict.setdefault("disable", None)
+ enable_logs = debug_dict.setdefault("enable", None)
+ log_file = debug_dict.setdefault("log_file", None)
+
+ if log_file:
+ _log_file = os.path.join(tgen.logdir, log_file)
+ debug_config.append("log file {} \n".format(_log_file))
+
+ if type(enable_logs) is list:
+ for daemon in enable_logs:
+ for debug_log in DEBUG_LOGS[daemon]:
+ debug_config.append("{}".format(debug_log))
+ elif type(enable_logs) is dict:
+ for daemon, debug_logs in enable_logs.items():
+ for debug_log in debug_logs:
+ debug_config.append("{}".format(debug_log))
+
+ if type(disable_logs) is list:
+ for daemon in disable_logs:
+ for debug_log in DEBUG_LOGS[daemon]:
+ debug_config.append("no {}".format(debug_log))
+ elif type(disable_logs) is dict:
+ for daemon, debug_logs in disable_logs.items():
+ for debug_log in debug_logs:
+ debug_config.append("no {}".format(debug_log))
+ if debug_config:
+ debug_config_dict[router] = debug_config
+
+ result = create_common_configurations(
+ tgen, debug_config_dict, "debug_log_config", build=build
+ )
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+#############################################
+# Common APIs, will be used by all protocols
+#############################################
+
+
+def create_vrf_cfg(tgen, topo, input_dict=None, build=False):
+ """
+ Create vrf configuration for created topology. VRF
+ configuration is provided in input json file.
+
+ VRF config is done in Linux Kernel:
+ * Create VRF
+ * Attach interface to VRF
+ * Bring up VRF
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring
+ from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict={
+ "r3": {
+ "links": {
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED_A"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED_B"},
+ "r2-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE_A"},
+ "r2-link4": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE_B"},
+ },
+ "vrfs":[
+ {
+ "name": "RED_A",
+ "id": "1"
+ },
+ {
+ "name": "RED_B",
+ "id": "2"
+ },
+ {
+ "name": "BLUE_A",
+ "id": "3",
+ "delete": True
+ },
+ {
+ "name": "BLUE_B",
+ "id": "4"
+ }
+ ]
+ }
+ }
+ result = create_vrf_cfg(tgen, topo, input_dict)
+
+ Returns
+ -------
+ True or False
+ """
+ result = True
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ input_dict = deepcopy(input_dict)
+
+ try:
+ config_data_dict = {}
+
+ for c_router, c_data in input_dict.items():
+ rnode = tgen.gears[c_router]
+ config_data = []
+ if "vrfs" in c_data:
+ for vrf in c_data["vrfs"]:
+ name = vrf.setdefault("name", None)
+ table_id = vrf.setdefault("id", None)
+ del_action = vrf.setdefault("delete", False)
+
+ if del_action:
+ # Kernel cmd- Add VRF and table
+ cmd = "ip link del {} type vrf table {}".format(
+ vrf["name"], vrf["id"]
+ )
+
+ logger.debug(
+ "[DUT: %s]: Running kernel cmd [%s]", c_router, cmd
+ )
+ rnode.run(cmd)
+
+ # Kernel cmd - Bring down VRF
+ cmd = "ip link set dev {} down".format(name)
+ logger.debug(
+ "[DUT: %s]: Running kernel cmd [%s]", c_router, cmd
+ )
+ rnode.run(cmd)
+
+ else:
+ if name and table_id:
+ # Kernel cmd- Add VRF and table
+ cmd = "ip link add {} type vrf table {}".format(
+ name, table_id
+ )
+ logger.debug(
+ "[DUT: %s]: Running kernel cmd " "[%s]", c_router, cmd
+ )
+ rnode.run(cmd)
+
+ # Kernel cmd - Bring up VRF
+ cmd = "ip link set dev {} up".format(name)
+ logger.debug(
+ "[DUT: %s]: Running kernel " "cmd [%s]", c_router, cmd
+ )
+ rnode.run(cmd)
+
+ for vrf in c_data["vrfs"]:
+ vni = vrf.setdefault("vni", None)
+ del_vni = vrf.setdefault("no_vni", None)
+
+ if "links" in c_data:
+ for destRouterLink, data in sorted(c_data["links"].items()):
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ if "vrf" in data:
+ vrf_list = data["vrf"]
+
+ if type(vrf_list) is not list:
+ vrf_list = [vrf_list]
+
+ for _vrf in vrf_list:
+ cmd = "ip link set {} master {}".format(
+ interface_name, _vrf
+ )
+
+ logger.debug(
+ "[DUT: %s]: Running" " kernel cmd [%s]",
+ c_router,
+ cmd,
+ )
+ rnode.run(cmd)
+
+ if vni:
+ config_data.append("vrf {}".format(vrf["name"]))
+ cmd = "vni {}".format(vni)
+ config_data.append(cmd)
+
+ if del_vni:
+ config_data.append("vrf {}".format(vrf["name"]))
+ cmd = "no vni {}".format(del_vni)
+ config_data.append(cmd)
+
+ if config_data:
+ config_data_dict[c_router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "vrf", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ return result
+
+
+def create_interface_in_kernel(
+ tgen, dut, name, ip_addr, vrf=None, netmask=None, create=True
+):
+ """
+ Cretae interfaces in kernel for ipv4/ipv6
+ Config is done in Linux Kernel:
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut` : Device for which interfaces to be added
+ * `name` : interface name
+ * `ip_addr` : ip address for interface
+ * `vrf` : VRF name, to which interface will be associated
+ * `netmask` : netmask value, default is None
+ * `create`: Create interface in kernel, if created then no need
+ to create
+ """
+
+ rnode = tgen.gears[dut]
+
+ if create:
+ cmd = "ip link show {0} >/dev/null || ip link add {0} type dummy".format(name)
+ rnode.run(cmd)
+
+ if not netmask:
+ ifaddr = ipaddress.ip_interface(frr_unicode(ip_addr))
+ else:
+ ifaddr = ipaddress.ip_interface(
+ "{}/{}".format(frr_unicode(ip_addr), frr_unicode(netmask))
+ )
+ cmd = "ip -{0} a flush {1} scope global && ip a add {2} dev {1} && ip l set {1} up".format(
+ ifaddr.version, name, ifaddr
+ )
+ logger.debug("[DUT: %s]: Running command: %s", dut, cmd)
+ rnode.run(cmd)
+
+ if vrf:
+ cmd = "ip link set {} master {}".format(name, vrf)
+ rnode.run(cmd)
+
+
+def shutdown_bringup_interface_in_kernel(tgen, dut, intf_name, ifaceaction=False):
+ """
+ Cretae interfaces in kernel for ipv4/ipv6
+ Config is done in Linux Kernel:
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut` : Device for which interfaces to be added
+ * `intf_name` : interface name
+ * `ifaceaction` : False to shutdown and True to bringup the
+ ineterface
+ """
+
+ rnode = tgen.gears[dut]
+
+ cmd = "ip link set dev"
+ if ifaceaction:
+ action = "up"
+ cmd = "{} {} {}".format(cmd, intf_name, action)
+ else:
+ action = "down"
+ cmd = "{} {} {}".format(cmd, intf_name, action)
+
+ logger.debug("[DUT: %s]: Running command: %s", dut, cmd)
+ rnode.run(cmd)
+
+
+def validate_ip_address(ip_address):
+ """
+ Validates the type of ip address
+ Parameters
+ ----------
+ * `ip_address`: IPv4/IPv6 address
+ Returns
+ -------
+ Type of address as string
+ """
+
+ if "/" in ip_address:
+ ip_address = ip_address.split("/")[0]
+
+ v4 = True
+ v6 = True
+ try:
+ socket.inet_aton(ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv4 address")
+ v4 = False
+ else:
+ return "ipv4"
+
+ try:
+ socket.inet_pton(socket.AF_INET6, ip_address)
+ except socket.error as error:
+ logger.debug("Not a valid IPv6 address")
+ v6 = False
+ else:
+ return "ipv6"
+
+ if not v4 and not v6:
+ raise Exception(
+ "InvalidIpAddr", "%s is neither valid IPv4 or IPv6" " address" % ip_address
+ )
+
+
+def check_address_types(addr_type=None):
+ """
+ Checks environment variable set and compares with the current address type
+ """
+
+ addr_types_env = os.environ.get("ADDRESS_TYPES")
+ if not addr_types_env:
+ addr_types_env = "dual"
+
+ if addr_types_env == "dual":
+ addr_types = ["ipv4", "ipv6"]
+ elif addr_types_env == "ipv4":
+ addr_types = ["ipv4"]
+ elif addr_types_env == "ipv6":
+ addr_types = ["ipv6"]
+
+ if addr_type is None:
+ return addr_types
+
+ if addr_type not in addr_types:
+ logger.debug(
+ "{} not in supported/configured address types {}".format(
+ addr_type, addr_types
+ )
+ )
+ return False
+
+ return True
+
+
+def generate_ips(network, no_of_ips):
+ """
+ Returns list of IPs.
+ based on start_ip and no_of_ips
+
+ * `network` : from here the ip will start generating,
+ start_ip will be
+ * `no_of_ips` : these many IPs will be generated
+ """
+ ipaddress_list = []
+ if type(network) is not list:
+ network = [network]
+
+ for start_ipaddr in network:
+ if "/" in start_ipaddr:
+ start_ip = start_ipaddr.split("/")[0]
+ mask = int(start_ipaddr.split("/")[1])
+ else:
+ logger.debug("start_ipaddr {} must have a / in it".format(start_ipaddr))
+ assert 0
+
+ addr_type = validate_ip_address(start_ip)
+ if addr_type == "ipv4":
+ if start_ip == "0.0.0.0" and mask == 0 and no_of_ips == 1:
+ ipaddress_list.append("{}/{}".format(start_ip, mask))
+ return ipaddress_list
+ start_ip = ipaddress.IPv4Address(frr_unicode(start_ip))
+ step = 2 ** (32 - mask)
+ elif addr_type == "ipv6":
+ if start_ip == "0::0" and mask == 0 and no_of_ips == 1:
+ ipaddress_list.append("{}/{}".format(start_ip, mask))
+ return ipaddress_list
+ start_ip = ipaddress.IPv6Address(frr_unicode(start_ip))
+ step = 2 ** (128 - mask)
+ else:
+ return []
+
+ next_ip = start_ip
+ count = 0
+ while count < no_of_ips:
+ ipaddress_list.append("{}/{}".format(next_ip, mask))
+ if addr_type == "ipv6":
+ next_ip = ipaddress.IPv6Address(int(next_ip) + step)
+ else:
+ next_ip += step
+ count += 1
+
+ return ipaddress_list
+
+
+def find_interface_with_greater_ip(topo, router, loopback=True, interface=True):
+ """
+ Returns highest interface ip for ipv4/ipv6. If loopback is there then
+ it will return highest IP from loopback IPs otherwise from physical
+ interface IPs.
+ * `topo` : json file data
+ * `router` : router for which highest interface should be calculated
+ """
+
+ link_data = topo["routers"][router]["links"]
+ lo_list = []
+ interfaces_list = []
+ lo_exists = False
+ for destRouterLink, data in sorted(link_data.items()):
+ if loopback:
+ if "type" in data and data["type"] == "loopback":
+ lo_exists = True
+ ip_address = topo["routers"][router]["links"][destRouterLink][
+ "ipv4"
+ ].split("/")[0]
+ lo_list.append(ip_address)
+ if interface:
+ ip_address = topo["routers"][router]["links"][destRouterLink]["ipv4"].split(
+ "/"
+ )[0]
+ interfaces_list.append(ip_address)
+
+ if lo_exists:
+ return sorted(lo_list)[-1]
+
+ return sorted(interfaces_list)[-1]
+
+
+def write_test_header(tc_name):
+ """Display message at beginning of test case"""
+ count = 20
+ logger.info("*" * (len(tc_name) + count))
+ step("START -> Testcase : %s" % tc_name, reset=True)
+ logger.info("*" * (len(tc_name) + count))
+
+
+def write_test_footer(tc_name):
+ """Display message at end of test case"""
+ count = 21
+ logger.info("=" * (len(tc_name) + count))
+ logger.info("Testcase : %s -> PASSED", tc_name)
+ logger.info("=" * (len(tc_name) + count))
+
+
+def interface_status(tgen, topo, input_dict):
+ """
+ Delete ip route maps from device
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : for which router, route map has to be deleted
+ Usage
+ -----
+ input_dict = {
+ "r3": {
+ "interface_list": ['eth1-r1-r2', 'eth2-r1-r3'],
+ "status": "down"
+ }
+ }
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ rlist = []
+
+ for router in input_dict.keys():
+ interface_list = input_dict[router]["interface_list"]
+ status = input_dict[router].setdefault("status", "up")
+ for intf in interface_list:
+ rnode = tgen.gears[router]
+ interface_set_status(rnode, intf, status)
+
+ rlist.append(router)
+
+ # Load config to routers
+ load_config_to_routers(tgen, rlist)
+
+ except Exception as e:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
+ """
+ Fixture: Retries function while it's return value is an errormsg (str), False, or it raises an exception.
+
+ * `retry_timeout`: Retry for at least this many seconds; after waiting initial_wait seconds
+ * `initial_wait`: Sleeps for this many seconds before first executing function
+ * `expected`: if False then the return logic is inverted, except for exceptions,
+ (i.e., a False or errmsg (str) function return ends the retry loop,
+ and returns that False or str value)
+ * `diag_pct`: Percentage of `retry_timeout` to keep testing after negative result would have
+ been returned in order to see if a positive result comes after. This is an
+ important diagnostic tool, and normally should not be disabled. Calls to wrapped
+ functions though, can override the `diag_pct` value to make it larger in case more
+ diagnostic retrying is appropriate.
+ """
+
+ def _retry(func):
+ @wraps(func)
+ def func_retry(*args, **kwargs):
+ # We will continue to retry diag_pct of the timeout value to see if test would have passed with a
+ # longer retry timeout value.
+ saved_failure = None
+
+ retry_sleep = 2
+
+ # Allow the wrapped function's args to override the fixtures
+ _retry_timeout = kwargs.pop("retry_timeout", retry_timeout)
+ _expected = kwargs.pop("expected", expected)
+ _initial_wait = kwargs.pop("initial_wait", initial_wait)
+ _diag_pct = kwargs.pop("diag_pct", diag_pct)
+
+ start_time = datetime.now()
+ retry_until = datetime.now() + timedelta(
+ seconds=_retry_timeout + _initial_wait
+ )
+
+ if initial_wait > 0:
+ logger.debug("Waiting for [%s]s as initial delay", initial_wait)
+ sleep(initial_wait)
+
+ invert_logic = not _expected
+ while True:
+ seconds_left = (retry_until - datetime.now()).total_seconds()
+ try:
+ ret = func(*args, **kwargs)
+ logger.debug("Function returned %s", ret)
+
+ negative_result = ret is False or is_string(ret)
+ if negative_result == invert_logic:
+ # Simple case, successful result in time
+ if not saved_failure:
+ return ret
+
+ # Positive result, but happened after timeout failure, very important to
+ # note for fixing tests.
+ logger.warning(
+ "RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
+ _retry_timeout,
+ (datetime.now() - start_time).total_seconds(),
+ )
+ if isinstance(saved_failure, Exception):
+ raise saved_failure # pylint: disable=E0702
+ return saved_failure
+
+ except Exception as error:
+ logger.info("Function raised exception: %s", str(error))
+ ret = error
+
+ if seconds_left < 0 and saved_failure:
+ logger.info(
+ "RETRY DIAGNOSTIC: Retry timeout reached, still failing"
+ )
+ if isinstance(saved_failure, Exception):
+ raise saved_failure # pylint: disable=E0702
+ return saved_failure
+
+ if seconds_left < 0:
+ logger.info("Retry timeout of %ds reached", _retry_timeout)
+
+ saved_failure = ret
+ retry_extra_delta = timedelta(
+ seconds=seconds_left + _retry_timeout * _diag_pct
+ )
+ retry_until = datetime.now() + retry_extra_delta
+ seconds_left = retry_extra_delta.total_seconds()
+
+ # Generate bundle after setting remaining diagnostic retry time
+ generate_support_bundle()
+
+ # If user has disabled diagnostic retries return now
+ if not _diag_pct:
+ if isinstance(saved_failure, Exception):
+ raise saved_failure
+ return saved_failure
+
+ if saved_failure:
+ logger.debug(
+ "RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
+ retry_sleep,
+ seconds_left,
+ )
+ else:
+ logger.debug(
+ "Sleeping %ds until next retry with %.1f retry time left",
+ retry_sleep,
+ seconds_left,
+ )
+ sleep(retry_sleep)
+
+ func_retry._original = func
+ return func_retry
+
+ return _retry
+
+
+class Stepper:
+ """
+ Prints step number for the test case step being executed
+ """
+
+ count = 1
+
+ def __call__(self, msg, reset):
+ if reset:
+ Stepper.count = 1
+ logger.info(msg)
+ else:
+ logger.info("STEP %s: '%s'", Stepper.count, msg)
+ Stepper.count += 1
+
+
+def step(msg, reset=False):
+ """
+ Call Stepper to print test steps. Need to reset at the beginning of test.
+ * ` msg` : Step message body.
+ * `reset` : Reset step count to 1 when set to True.
+ """
+ if bool(topotest.g_pytest_config.get_option("--pause")):
+ pause_test("before :" + msg)
+ _step = Stepper()
+ _step(msg, reset)
+
+
+def do_countdown(secs):
+ """
+ Countdown timer display
+ """
+ for i in range(secs, 0, -1):
+ sys.stdout.write("{} ".format(str(i)))
+ sys.stdout.flush()
+ sleep(1)
+ return
+
+
+#############################################
+# These APIs, will used by testcase
+#############################################
+def create_interfaces_cfg(tgen, topo, build=False):
+ """
+ Create interface configuration for created topology. Basic Interface
+ configuration is provided in input json file.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ True or False
+ """
+
+ def _create_interfaces_ospf_cfg(ospf, c_data, data, ospf_keywords):
+ interface_data = []
+ ip_ospf = "ipv6 ospf6" if ospf == "ospf6" else "ip ospf"
+ for keyword in ospf_keywords:
+ if keyword in data[ospf]:
+ intf_ospf_value = c_data["links"][destRouterLink][ospf][keyword]
+ if "delete" in data and data["delete"]:
+ interface_data.append(
+ "no {} {}".format(ip_ospf, keyword.replace("_", "-"))
+ )
+ else:
+ interface_data.append(
+ "{} {} {}".format(
+ ip_ospf, keyword.replace("_", "-"), intf_ospf_value
+ )
+ )
+ return interface_data
+
+ result = False
+ topo = deepcopy(topo)
+
+ try:
+ interface_data_dict = {}
+
+ for c_router, c_data in topo.items():
+ interface_data = []
+ for destRouterLink, data in sorted(c_data["links"].items()):
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ interface_data.append("interface {}".format(str(interface_name)))
+
+ if "ipv4" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv4"]
+
+ if "delete" in data and data["delete"]:
+ interface_data.append("no ip address {}".format(intf_addr))
+ else:
+ interface_data.append("ip address {}".format(intf_addr))
+ if "ipv6" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv6"]
+
+ if "delete" in data and data["delete"]:
+ interface_data.append("no ipv6 address {}".format(intf_addr))
+ else:
+ interface_data.append("ipv6 address {}".format(intf_addr))
+
+ # Wait for vrf interfaces to get link local address once they are up
+ if (
+ not destRouterLink == "lo"
+ and "vrf" in topo[c_router]["links"][destRouterLink]
+ ):
+ vrf = topo[c_router]["links"][destRouterLink]["vrf"]
+ intf = topo[c_router]["links"][destRouterLink]["interface"]
+ ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf, vrf=vrf)
+
+ if "ipv6-link-local" in data:
+ intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
+
+ if "delete" in data and data["delete"]:
+ interface_data.append("no ipv6 address {}".format(intf_addr))
+ else:
+ interface_data.append("ipv6 address {}\n".format(intf_addr))
+
+ ospf_keywords = [
+ "hello_interval",
+ "dead_interval",
+ "network",
+ "priority",
+ "cost",
+ "mtu_ignore",
+ ]
+ if "ospf" in data:
+ interface_data += _create_interfaces_ospf_cfg(
+ "ospf", c_data, data, ospf_keywords + ["area"]
+ )
+ if "ospf6" in data:
+ interface_data += _create_interfaces_ospf_cfg(
+ "ospf6", c_data, data, ospf_keywords + ["area"]
+ )
+ interface_data.append("exit")
+ if interface_data:
+ interface_data_dict[c_router] = interface_data
+
+ result = create_common_configurations(
+ tgen, interface_data_dict, "interface_config", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ return result
+
+
+def create_static_routes(tgen, input_dict, build=False):
+ """
+ Create static routes for given router as defined in input_dict
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict should be in the format below:
+ # static_routes: list of all routes
+ # network: network address
+ # no_of_ip: number of next-hop address that will be configured
+ # admin_distance: admin distance for route/routes.
+ # next_hop: starting next-hop address
+ # tag: tag id for static routes
+ # vrf: VRF name in which static routes needs to be created
+ # delete: True if config to be removed. Default False.
+
+ Example:
+ "routers": {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "100.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.1",
+ "tag": 4001,
+ "vrf": "RED_A"
+ "delete": true
+ }
+ ]
+ }
+ }
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ result = False
+ logger.debug("Entering lib API: create_static_routes()")
+ input_dict = deepcopy(input_dict)
+
+ try:
+ static_routes_list_dict = {}
+
+ for router in input_dict.keys():
+ if "static_routes" not in input_dict[router]:
+ errormsg = "static_routes not present in input_dict"
+ logger.info(errormsg)
+ continue
+
+ static_routes_list = []
+
+ static_routes = input_dict[router]["static_routes"]
+ for static_route in static_routes:
+ del_action = static_route.setdefault("delete", False)
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ network = static_route.setdefault("network", [])
+ if type(network) is not list:
+ network = [network]
+
+ admin_distance = static_route.setdefault("admin_distance", None)
+ tag = static_route.setdefault("tag", None)
+ vrf = static_route.setdefault("vrf", None)
+ interface = static_route.setdefault("interface", None)
+ next_hop = static_route.setdefault("next_hop", None)
+ nexthop_vrf = static_route.setdefault("nexthop_vrf", None)
+
+ ip_list = generate_ips(network, no_of_ip)
+ for ip in ip_list:
+ addr_type = validate_ip_address(ip)
+
+ if addr_type == "ipv4":
+ cmd = "ip route {}".format(ip)
+ else:
+ cmd = "ipv6 route {}".format(ip)
+
+ if interface:
+ cmd = "{} {}".format(cmd, interface)
+
+ if next_hop:
+ cmd = "{} {}".format(cmd, next_hop)
+
+ if nexthop_vrf:
+ cmd = "{} nexthop-vrf {}".format(cmd, nexthop_vrf)
+
+ if vrf:
+ cmd = "{} vrf {}".format(cmd, vrf)
+
+ if tag:
+ cmd = "{} tag {}".format(cmd, str(tag))
+
+ if admin_distance:
+ cmd = "{} {}".format(cmd, admin_distance)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ static_routes_list.append(cmd)
+
+ if static_routes_list:
+ static_routes_list_dict[router] = static_routes_list
+
+ result = create_common_configurations(
+ tgen, static_routes_list_dict, "static_route", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: create_static_routes()")
+ return result
+
+
+def create_prefix_lists(tgen, input_dict, build=False):
+ """
+ Create ip prefix lists as per the config provided in input
+ JSON or input_dict
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ Usage
+ -----
+ # pf_lists_1: name of prefix-list, user defined
+ # seqid: prefix-list seqid, auto-generated if not given by user
+ # network: criteria for applying prefix-list
+ # action: permit/deny
+ # le: less than or equal number of bits
+ # ge: greater than or equal number of bits
+ Example
+ -------
+ input_dict = {
+ "r1": {
+ "prefix_lists":{
+ "ipv4": {
+ "pf_list_1": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "permit",
+ "le": "32",
+ "ge": "30",
+ "delete": True
+ }
+ ]
+ }
+ }
+ }
+ }
+ Returns
+ -------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ try:
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ if "prefix_lists" not in input_dict[router]:
+ errormsg = "prefix_lists not present in input_dict"
+ logger.debug(errormsg)
+ continue
+
+ config_data = []
+ prefix_lists = input_dict[router]["prefix_lists"]
+ for addr_type, prefix_data in prefix_lists.items():
+ if not check_address_types(addr_type):
+ continue
+
+ for prefix_name, prefix_list in prefix_data.items():
+ for prefix_dict in prefix_list:
+ if "action" not in prefix_dict or "network" not in prefix_dict:
+ errormsg = "'action' or network' missing in" " input_dict"
+ return errormsg
+
+ network_addr = prefix_dict["network"]
+ action = prefix_dict["action"]
+ le = prefix_dict.setdefault("le", None)
+ ge = prefix_dict.setdefault("ge", None)
+ seqid = prefix_dict.setdefault("seqid", None)
+ del_action = prefix_dict.setdefault("delete", False)
+ if seqid is None:
+ seqid = get_seq_id("prefix_lists", router, prefix_name)
+ else:
+ set_seq_id("prefix_lists", router, seqid, prefix_name)
+
+ if addr_type == "ipv4":
+ protocol = "ip"
+ else:
+ protocol = "ipv6"
+
+ cmd = "{} prefix-list {} seq {} {} {}".format(
+ protocol, prefix_name, seqid, action, network_addr
+ )
+ if le:
+ cmd = "{} le {}".format(cmd, le)
+ if ge:
+ cmd = "{} ge {}".format(cmd, ge)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "prefix_list", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def create_route_maps(tgen, input_dict, build=False):
+ """
+ Create route-map on the devices as per the arguments passed
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ Usage
+ -----
+ # route_maps: key, value pair for route-map name and its attribute
+ # rmap_match_prefix_list_1: user given name for route-map
+ # action: PERMIT/DENY
+ # match: key,value pair for match criteria. prefix_list, community-list,
+ large-community-list or tag. Only one option at a time.
+ # prefix_list: name of prefix list
+ # large-community-list: name of large community list
+ # community-ist: name of community list
+ # tag: tag id for static routes
+ # set: key, value pair for modifying route attributes
+ # localpref: preference value for the network
+ # med: metric value advertised for AS
+ # aspath: set AS path value
+ # weight: weight for the route
+ # community: standard community value to be attached
+ # large_community: large community value to be attached
+ # community_additive: if set to "additive", adds community/large-community
+ value to the existing values of the network prefix
+ Example:
+ --------
+ input_dict = {
+ "r1": {
+ "route_maps": {
+ "rmap_match_prefix_list_1": [
+ {
+ "action": "PERMIT",
+ "match": {
+ "ipv4": {
+ "prefix_list": "pf_list_1"
+ }
+ "ipv6": {
+ "prefix_list": "pf_list_1"
+ }
+ "large-community-list": {
+ "id": "community_1",
+ "exact_match": True
+ }
+ "community_list": {
+ "id": "community_2",
+ "exact_match": True
+ }
+ "tag": "tag_id"
+ },
+ "set": {
+ "locPrf": 150,
+ "metric": 30,
+ "path": {
+ "num": 20000,
+ "action": "prepend",
+ },
+ "weight": 500,
+ "community": {
+ "num": "1:2 2:3",
+ "action": additive
+ }
+ "large_community": {
+ "num": "1:2:3 4:5;6",
+ "action": additive
+ },
+ }
+ }
+ ]
+ }
+ }
+ }
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ input_dict = deepcopy(input_dict)
+ try:
+ rmap_data_dict = {}
+
+ for router in input_dict.keys():
+ if "route_maps" not in input_dict[router]:
+ logger.debug("route_maps not present in input_dict")
+ continue
+ rmap_data = []
+ for rmap_name, rmap_value in input_dict[router]["route_maps"].items():
+ for rmap_dict in rmap_value:
+ del_action = rmap_dict.setdefault("delete", False)
+
+ if del_action:
+ rmap_data.append("no route-map {}".format(rmap_name))
+ continue
+
+ if "action" not in rmap_dict:
+ errormsg = "action not present in input_dict"
+ logger.error(errormsg)
+ return False
+
+ rmap_action = rmap_dict.setdefault("action", "deny")
+
+ seq_id = rmap_dict.setdefault("seq_id", None)
+ if seq_id is None:
+ seq_id = get_seq_id("route_maps", router, rmap_name)
+ else:
+ set_seq_id("route_maps", router, seq_id, rmap_name)
+
+ rmap_data.append(
+ "route-map {} {} {}".format(rmap_name, rmap_action, seq_id)
+ )
+
+ if "continue" in rmap_dict:
+ continue_to = rmap_dict["continue"]
+ if continue_to:
+ rmap_data.append("on-match goto {}".format(continue_to))
+ else:
+ logger.error(
+ "In continue, 'route-map entry "
+ "sequence number' is not provided"
+ )
+ return False
+
+ if "goto" in rmap_dict:
+ go_to = rmap_dict["goto"]
+ if go_to:
+ rmap_data.append("on-match goto {}".format(go_to))
+ else:
+ logger.error(
+ "In goto, 'Goto Clause number' is not" " provided"
+ )
+ return False
+
+ if "call" in rmap_dict:
+ call_rmap = rmap_dict["call"]
+ if call_rmap:
+ rmap_data.append("call {}".format(call_rmap))
+ else:
+ logger.error(
+ "In call, 'destination Route-Map' is" " not provided"
+ )
+ return False
+
+ # Verifying if SET criteria is defined
+ if "set" in rmap_dict:
+ set_data = rmap_dict["set"]
+ ipv4_data = set_data.setdefault("ipv4", {})
+ ipv6_data = set_data.setdefault("ipv6", {})
+ local_preference = set_data.setdefault("locPrf", None)
+ metric = set_data.setdefault("metric", None)
+ metric_type = set_data.setdefault("metric-type", None)
+ as_path = set_data.setdefault("path", {})
+ weight = set_data.setdefault("weight", None)
+ community = set_data.setdefault("community", {})
+ large_community = set_data.setdefault("large_community", {})
+ large_comm_list = set_data.setdefault("large_comm_list", {})
+ set_action = set_data.setdefault("set_action", None)
+ nexthop = set_data.setdefault("nexthop", None)
+ origin = set_data.setdefault("origin", None)
+ ext_comm_list = set_data.setdefault("extcommunity", {})
+ metrictype = set_data.setdefault("metric-type", None)
+
+ # Local Preference
+ if local_preference:
+ rmap_data.append(
+ "set local-preference {}".format(local_preference)
+ )
+
+ # Metric-Type
+ if metrictype:
+ rmap_data.append("set metric-type {}\n".format(metrictype))
+
+ # Metric
+ if metric:
+ del_comm = set_data.setdefault("delete", None)
+ if del_comm:
+ rmap_data.append("no set metric {}".format(metric))
+ else:
+ rmap_data.append("set metric {}".format(metric))
+
+ # Origin
+ if origin:
+ rmap_data.append("set origin {} \n".format(origin))
+
+ # AS Path Prepend
+ if as_path:
+ as_num = as_path.setdefault("as_num", None)
+ as_action = as_path.setdefault("as_action", None)
+ if as_action and as_num:
+ rmap_data.append(
+ "set as-path {} {}".format(as_action, as_num)
+ )
+
+ # Community
+ if community:
+ num = community.setdefault("num", None)
+ comm_action = community.setdefault("action", None)
+ if num:
+ cmd = "set community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+ rmap_data.append(cmd)
+ else:
+ logger.error("In community, AS Num not" " provided")
+ return False
+
+ if large_community:
+ num = large_community.setdefault("num", None)
+ comm_action = large_community.setdefault("action", None)
+ if num:
+ cmd = "set large-community {}".format(num)
+ if comm_action:
+ cmd = "{} {}".format(cmd, comm_action)
+
+ rmap_data.append(cmd)
+ else:
+ logger.error(
+ "In large_community, AS Num not" " provided"
+ )
+ return False
+ if large_comm_list:
+ id = large_comm_list.setdefault("id", None)
+ del_comm = large_comm_list.setdefault("delete", None)
+ if id:
+ cmd = "set large-comm-list {}".format(id)
+ if del_comm:
+ cmd = "{} delete".format(cmd)
+
+ rmap_data.append(cmd)
+ else:
+ logger.error("In large_comm_list 'id' not" " provided")
+ return False
+
+ if ext_comm_list:
+ rt = ext_comm_list.setdefault("rt", None)
+ del_comm = ext_comm_list.setdefault("delete", None)
+ if rt:
+ cmd = "set extcommunity rt {}".format(rt)
+ if del_comm:
+ cmd = "{} delete".format(cmd)
+
+ rmap_data.append(cmd)
+ else:
+ logger.debug("In ext_comm_list 'rt' not" " provided")
+ return False
+
+ # Weight
+ if weight:
+ rmap_data.append("set weight {}".format(weight))
+ if ipv6_data:
+ nexthop = ipv6_data.setdefault("nexthop", None)
+ if nexthop:
+ rmap_data.append("set ipv6 next-hop {}".format(nexthop))
+
+ # Adding MATCH and SET sequence to RMAP if defined
+ if "match" in rmap_dict:
+ match_data = rmap_dict["match"]
+ ipv4_data = match_data.setdefault("ipv4", {})
+ ipv6_data = match_data.setdefault("ipv6", {})
+ community = match_data.setdefault("community_list", {})
+ large_community = match_data.setdefault("large_community", {})
+ large_community_list = match_data.setdefault(
+ "large_community_list", {}
+ )
+
+ metric = match_data.setdefault("metric", None)
+ source_vrf = match_data.setdefault("source-vrf", None)
+
+ if ipv4_data:
+ # fetch prefix list data from rmap
+ prefix_name = ipv4_data.setdefault("prefix_lists", None)
+ if prefix_name:
+ rmap_data.append(
+ "match ip address"
+ " prefix-list {}".format(prefix_name)
+ )
+
+ # fetch tag data from rmap
+ tag = ipv4_data.setdefault("tag", None)
+ if tag:
+ rmap_data.append("match tag {}".format(tag))
+
+ # fetch large community data from rmap
+ large_community_list = ipv4_data.setdefault(
+ "large_community_list", {}
+ )
+ large_community = match_data.setdefault(
+ "large_community", {}
+ )
+
+ if ipv6_data:
+ prefix_name = ipv6_data.setdefault("prefix_lists", None)
+ if prefix_name:
+ rmap_data.append(
+ "match ipv6 address"
+ " prefix-list {}".format(prefix_name)
+ )
+
+ # fetch tag data from rmap
+ tag = ipv6_data.setdefault("tag", None)
+ if tag:
+ rmap_data.append("match tag {}".format(tag))
+
+ # fetch large community data from rmap
+ large_community_list = ipv6_data.setdefault(
+ "large_community_list", {}
+ )
+ large_community = match_data.setdefault(
+ "large_community", {}
+ )
+
+ if community:
+ if "id" not in community:
+ logger.error(
+ "'id' is mandatory for "
+ "community-list in match"
+ " criteria"
+ )
+ return False
+ cmd = "match community {}".format(community["id"])
+ exact_match = community.setdefault("exact_match", False)
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+
+ rmap_data.append(cmd)
+ if large_community:
+ if "id" not in large_community:
+ logger.error(
+ "'id' is mandatory for "
+ "large-community-list in match "
+ "criteria"
+ )
+ return False
+ cmd = "match large-community {}".format(
+ large_community["id"]
+ )
+ exact_match = large_community.setdefault(
+ "exact_match", False
+ )
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+ rmap_data.append(cmd)
+ if large_community_list:
+ if "id" not in large_community_list:
+ logger.error(
+ "'id' is mandatory for "
+ "large-community-list in match "
+ "criteria"
+ )
+ return False
+ cmd = "match large-community {}".format(
+ large_community_list["id"]
+ )
+ exact_match = large_community_list.setdefault(
+ "exact_match", False
+ )
+ if exact_match:
+ cmd = "{} exact-match".format(cmd)
+ rmap_data.append(cmd)
+
+ if source_vrf:
+ cmd = "match source-vrf {}".format(source_vrf)
+ rmap_data.append(cmd)
+
+ if metric:
+ cmd = "match metric {}".format(metric)
+ rmap_data.append(cmd)
+
+ if rmap_data:
+ rmap_data_dict[router] = rmap_data
+
+ result = create_common_configurations(
+ tgen, rmap_data_dict, "route_maps", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def delete_route_maps(tgen, input_dict):
+ """
+ Delete ip route maps from device
+ * `tgen` : Topogen object
+ * `input_dict` : for which router,
+ route map has to be deleted
+ Usage
+ -----
+ # Delete route-map rmap_1 and rmap_2 from router r1
+ input_dict = {
+ "r1": {
+ "route_maps": ["rmap_1", "rmap__2"]
+ }
+ }
+ result = delete_route_maps("ipv4", input_dict)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in input_dict.keys():
+ route_maps = input_dict[router]["route_maps"][:]
+ rmap_data = input_dict[router]
+ rmap_data["route_maps"] = {}
+ for route_map_name in route_maps:
+ rmap_data["route_maps"].update({route_map_name: [{"delete": True}]})
+
+ return create_route_maps(tgen, input_dict)
+
+
+def create_bgp_community_lists(tgen, input_dict, build=False):
+ """
+ Create bgp community-list or large-community-list on the devices as per
+ the arguments passed. Takes list of communities in input.
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ Usage
+ -----
+ input_dict_1 = {
+ "r3": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "rmap_lcomm_{}".format(addr_type),
+ "value": "1:1:1 1:2:3 2:1:1 2:2:2",
+ "large": True
+ }
+ ]
+ }
+ }
+ }
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ input_dict = deepcopy(input_dict)
+ try:
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ if "bgp_community_lists" not in input_dict[router]:
+ errormsg = "bgp_community_lists not present in input_dict"
+ logger.debug(errormsg)
+ continue
+
+ config_data = []
+
+ community_list = input_dict[router]["bgp_community_lists"]
+ for community_dict in community_list:
+ del_action = community_dict.setdefault("delete", False)
+ community_type = community_dict.setdefault("community_type", None)
+ action = community_dict.setdefault("action", None)
+ value = community_dict.setdefault("value", "")
+ large = community_dict.setdefault("large", None)
+ name = community_dict.setdefault("name", None)
+ if large:
+ cmd = "bgp large-community-list"
+ else:
+ cmd = "bgp community-list"
+
+ if not large and not (community_type and action and value):
+ errormsg = (
+ "community_type, action and value are "
+ "required in bgp_community_list"
+ )
+ logger.error(errormsg)
+ return False
+
+ cmd = "{} {} {} {} {}".format(cmd, community_type, name, action, value)
+
+ if del_action:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "bgp_community_list", build=build
+ )
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False):
+ """
+ Shutdown or bringup router's interface "
+ * `tgen` : Topogen object
+ * `dut` : Device under test
+ * `intf_name` : Interface name to be shut/no shut
+ * `ifaceaction` : Action, to shut/no shut interface,
+ by default is False
+ Usage
+ -----
+ dut = "r3"
+ intf = "r3-r1-eth0"
+ # Shut down interface
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ # Bring up interface
+ shutdown_bringup_interface(tgen, dut, intf, True)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ router_list = tgen.routers()
+ if ifaceaction:
+ logger.info("Bringing up interface {} : {}".format(dut, intf_name))
+ else:
+ logger.info("Shutting down interface {} : {}".format(dut, intf_name))
+
+ interface_set_status(router_list[dut], intf_name, ifaceaction)
+
+
+def addKernelRoute(
+ tgen, router, intf, group_addr_range, next_hop=None, src=None, del_action=None
+):
+ """
+ Add route to kernel
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `router`: router for which kernel routes needs to be added
+ * `intf`: interface name, for which kernel routes needs to be added
+ * `bindToAddress`: bind to <host>, an interface or multicast
+ address
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: addKernelRoute()")
+
+ rnode = tgen.gears[router]
+
+ if type(group_addr_range) is not list:
+ group_addr_range = [group_addr_range]
+
+ for grp_addr in group_addr_range:
+ addr_type = validate_ip_address(grp_addr)
+ if addr_type == "ipv4":
+ if next_hop is not None:
+ cmd = "ip route add {} via {}".format(grp_addr, next_hop)
+ else:
+ cmd = "ip route add {} dev {}".format(grp_addr, intf)
+ if del_action:
+ cmd = "ip route del {}".format(grp_addr)
+ verify_cmd = "ip route"
+ elif addr_type == "ipv6":
+ if intf and src:
+ cmd = "ip -6 route add {} dev {} src {}".format(grp_addr, intf, src)
+ else:
+ cmd = "ip -6 route add {} via {}".format(grp_addr, next_hop)
+ verify_cmd = "ip -6 route"
+ if del_action:
+ cmd = "ip -6 route del {}".format(grp_addr)
+
+ logger.info("[DUT: {}]: Running command: [{}]".format(router, cmd))
+ output = rnode.run(cmd)
+
+ def check_in_kernel(rnode, verify_cmd, grp_addr, router):
+ # Verifying if ip route added to kernel
+ errormsg = None
+ result = rnode.run(verify_cmd)
+ logger.debug("{}\n{}".format(verify_cmd, result))
+ if "/" in grp_addr:
+ ip, mask = grp_addr.split("/")
+ if mask == "32" or mask == "128":
+ grp_addr = ip
+ else:
+ mask = "32" if addr_type == "ipv4" else "128"
+
+ if not re_search(r"{}".format(grp_addr), result) and mask != "0":
+ errormsg = (
+ "[DUT: {}]: Kernal route is not added for group"
+ " address {} Config output: {}".format(
+ router, grp_addr, output
+ )
+ )
+
+ return errormsg
+
+ test_func = functools.partial(
+ check_in_kernel, rnode, verify_cmd, grp_addr, router
+ )
+ (result, out) = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, out
+
+ logger.debug("Exiting lib API: addKernelRoute()")
+ return True
+
+
+def configure_vxlan(tgen, input_dict):
+ """
+ Add and configure vxlan
+
+ * `tgen`: tgen object
+ * `input_dict` : data for vxlan config
+
+ Usage:
+ ------
+ input_dict= {
+ "dcg2":{
+ "vxlan":[{
+ "vxlan_name": "vxlan75100",
+ "vxlan_id": "75100",
+ "dstport": 4789,
+ "local_addr": "120.0.0.1",
+ "learning": "no",
+ "delete": True
+ }]
+ }
+ }
+
+ configure_vxlan(tgen, input_dict)
+
+ Returns:
+ -------
+ True or errormsg
+
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for dut in input_dict.keys():
+ rnode = router_list[dut]
+
+ if "vxlan" in input_dict[dut]:
+ for vxlan_dict in input_dict[dut]["vxlan"]:
+ cmd = "ip link "
+
+ del_vxlan = vxlan_dict.setdefault("delete", None)
+ vxlan_names = vxlan_dict.setdefault("vxlan_name", [])
+ vxlan_ids = vxlan_dict.setdefault("vxlan_id", [])
+ dstport = vxlan_dict.setdefault("dstport", None)
+ local_addr = vxlan_dict.setdefault("local_addr", None)
+ learning = vxlan_dict.setdefault("learning", None)
+
+ config_data = []
+ if vxlan_names and vxlan_ids:
+ for vxlan_name, vxlan_id in zip(vxlan_names, vxlan_ids):
+ cmd = "ip link"
+
+ if del_vxlan:
+ cmd = "{} del {} type vxlan id {}".format(
+ cmd, vxlan_name, vxlan_id
+ )
+ else:
+ cmd = "{} add {} type vxlan id {}".format(
+ cmd, vxlan_name, vxlan_id
+ )
+
+ if dstport:
+ cmd = "{} dstport {}".format(cmd, dstport)
+
+ if local_addr:
+ ip_cmd = "ip addr add {} dev {}".format(
+ local_addr, vxlan_name
+ )
+ if del_vxlan:
+ ip_cmd = "ip addr del {} dev {}".format(
+ local_addr, vxlan_name
+ )
+
+ config_data.append(ip_cmd)
+
+ cmd = "{} local {}".format(cmd, local_addr)
+
+ if learning == "no":
+ cmd = "{} nolearning".format(cmd)
+
+ elif learning == "yes":
+ cmd = "{} learning".format(cmd)
+
+ config_data.append(cmd)
+
+ try:
+ for _cmd in config_data:
+ logger.info("[DUT: %s]: Running command: %s", dut, _cmd)
+ rnode.run(_cmd)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def configure_brctl(tgen, topo, input_dict):
+ """
+ Add and configure brctl
+
+ * `tgen`: tgen object
+ * `input_dict` : data for brctl config
+
+ Usage:
+ ------
+ input_dict= {
+ dut:{
+ "brctl": [{
+ "brctl_name": "br100",
+ "addvxlan": "vxlan75100",
+ "vrf": "RED",
+ "stp": "off"
+ }]
+ }
+ }
+
+ configure_brctl(tgen, topo, input_dict)
+
+ Returns:
+ -------
+ True or errormsg
+
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for dut in input_dict.keys():
+ rnode = router_list[dut]
+
+ if "brctl" in input_dict[dut]:
+ for brctl_dict in input_dict[dut]["brctl"]:
+ brctl_names = brctl_dict.setdefault("brctl_name", [])
+ addvxlans = brctl_dict.setdefault("addvxlan", [])
+ stp_values = brctl_dict.setdefault("stp", [])
+ vrfs = brctl_dict.setdefault("vrf", [])
+
+ ip_cmd = "ip link set"
+ for brctl_name, vxlan, vrf, stp in zip(
+ brctl_names, addvxlans, vrfs, stp_values
+ ):
+ ip_cmd_list = []
+ cmd = "ip link add name {} type bridge stp_state {}".format(
+ brctl_name, stp
+ )
+
+ logger.info("[DUT: %s]: Running command: %s", dut, cmd)
+ rnode.run(cmd)
+
+ ip_cmd_list.append("{} up dev {}".format(ip_cmd, brctl_name))
+
+ if vxlan:
+ cmd = "{} dev {} master {}".format(ip_cmd, vxlan, brctl_name)
+
+ logger.info("[DUT: %s]: Running command: %s", dut, cmd)
+ rnode.run(cmd)
+
+ ip_cmd_list.append("{} up dev {}".format(ip_cmd, vxlan))
+
+ if vrf:
+ ip_cmd_list.append(
+ "{} dev {} master {}".format(ip_cmd, brctl_name, vrf)
+ )
+
+ for intf_name, data in topo["routers"][dut]["links"].items():
+ if "vrf" not in data:
+ continue
+
+ if data["vrf"] == vrf:
+ ip_cmd_list.append(
+ "{} up dev {}".format(ip_cmd, data["interface"])
+ )
+
+ try:
+ for _ip_cmd in ip_cmd_list:
+ logger.info("[DUT: %s]: Running command: %s", dut, _ip_cmd)
+ rnode.run(_ip_cmd)
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def configure_interface_mac(tgen, input_dict):
+ """
+ Add and configure brctl
+
+ * `tgen`: tgen object
+ * `input_dict` : data for mac config
+
+ input_mac= {
+ "edge1":{
+ "br75100": "00:80:48:BA:d1:00,
+ "br75200": "00:80:48:BA:d1:00
+ }
+ }
+
+ configure_interface_mac(tgen, input_mac)
+
+ Returns:
+ -------
+ True or errormsg
+
+ """
+
+ router_list = tgen.routers()
+ for dut in input_dict.keys():
+ rnode = router_list[dut]
+
+ for intf, mac in input_dict[dut].items():
+ cmd = "ip link set {} address {}".format(intf, mac)
+ logger.info("[DUT: %s]: Running command: %s", dut, cmd)
+
+ try:
+ result = rnode.run(cmd)
+ if len(result) != 0:
+ return result
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ return True
+
+
+#############################################
+# Verification APIs
+#############################################
+@retry(retry_timeout=40)
+def verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=None,
+ protocol=None,
+ tag=None,
+ metric=None,
+ fib=None,
+ count_only=False,
+ admin_distance=None,
+):
+ """
+ Data will be read from input_dict or input JSON file, API will generate
+ same prefixes, which were redistributed by either create_static_routes() or
+ advertise_networks_using_network_command() and do will verify next_hop and
+ each prefix/routes is present in "show ip/ipv6 route {bgp/stataic} json"
+ command o/p.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test, for which user wants to test the data
+ * `input_dict` : input dict, has details of static routes
+ * `next_hop`[optional]: next_hop which needs to be verified,
+ default: static
+ * `protocol`[optional]: protocol, default = None
+ * `count_only`[optional]: count of nexthops only, not specific addresses,
+ default = False
+
+ Usage
+ -----
+ # RIB can be verified for static routes OR network advertised using
+ network command. Following are input_dicts to create static routes
+ and advertise networks using network command. Any one of the input_dict
+ can be passed to verify_rib() to verify routes in DUT"s RIB.
+
+ # Creating static routes for r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{"network": "10.0.20.1/32", "no_of_ip": 9, \
+ "admin_distance": 100, "next_hop": "10.0.0.2", "tag": 4001}]
+ }}
+ # Advertising networks using network command in router r1
+ input_dict = {
+ "r1": {
+ "advertise_networks": [{"start_ip": "20.0.0.0/32",
+ "no_of_network": 10},
+ {"start_ip": "30.0.0.0/32"}]
+ }}
+ # Verifying ipv4 routes in router r1 learned via BGP
+ dut = "r2"
+ protocol = "bgp"
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol = protocol)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ additional_nexthops_in_required_nhs = []
+ found_hops = []
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s RIB:", router)
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ command = "show ip route"
+ else:
+ command = "show ipv6 route"
+
+ found_routes = []
+ missing_routes = []
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ if "vrf" in static_route and static_route["vrf"] is not None:
+ logger.info(
+ "[DUT: {}]: Verifying routes for VRF:"
+ " {}".format(router, static_route["vrf"])
+ )
+
+ cmd = "{} vrf {}".format(command, static_route["vrf"])
+
+ else:
+ cmd = "{}".format(command)
+
+ if protocol:
+ cmd = "{} {}".format(cmd, protocol)
+
+ cmd = "{} json".format(cmd)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+
+ network = static_route["network"]
+ if "no_of_ip" in static_route:
+ no_of_ip = static_route["no_of_ip"]
+ else:
+ no_of_ip = 1
+
+ if "tag" in static_route:
+ _tag = static_route["tag"]
+ else:
+ _tag = None
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ st_found = False
+ nh_found = False
+
+ for st_rt in ip_list:
+ st_rt = str(
+ ipaddress.ip_network(frr_unicode(st_rt), strict=False)
+ )
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if "queued" in rib_routes_json[st_rt][0]:
+ errormsg = "Route {} is queued\n".format(st_rt)
+ return errormsg
+
+ if fib and next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ for mnh in range(0, len(rib_routes_json[st_rt])):
+ if not "selected" in rib_routes_json[st_rt][mnh]:
+ continue
+
+ if (
+ "fib"
+ in rib_routes_json[st_rt][mnh]["nexthops"][0]
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in rib_routes_json[st_rt][
+ mnh
+ ]["nexthops"]
+ ]
+ )
+
+ if found_hops[0]:
+ missing_list_of_nexthops = set(
+ found_hops[0]
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops[0])
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Nexthop "
+ "%s is not active for route %s in "
+ "RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is not active"
+ " for route {} in RIB of router"
+ " {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+
+ elif next_hop and fib is None:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ found_hops = [
+ rib_r["ip"]
+ for rib_r in rib_routes_json[st_rt][0]["nexthops"]
+ if "ip" in rib_r
+ ]
+
+ # If somehow key "ip" is not found in nexthops JSON
+ # then found_hops would be 0, this particular
+ # situation will be handled here
+ if not len(found_hops):
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ next_hop,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+
+ # Check only the count of nexthops
+ if count_only:
+ if len(next_hop) == len(found_hops):
+ nh_found = True
+ else:
+ errormsg = (
+ "Nexthops are missing for "
+ "route {} in RIB of router {}: "
+ "expected {}, found {}\n".format(
+ st_rt,
+ dut,
+ len(next_hop),
+ len(found_hops),
+ )
+ )
+ return errormsg
+
+ # Check the actual nexthops
+ elif found_hops:
+ missing_list_of_nexthops = set(
+ found_hops
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops)
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+
+ if tag:
+ if "tag" not in rib_routes_json[st_rt][0]:
+ errormsg = (
+ "[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if _tag != rib_routes_json[st_rt][0]["tag"]:
+ errormsg = (
+ "[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".format(
+ dut,
+ _tag,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ if admin_distance is not None:
+ if "distance" not in rib_routes_json[st_rt][0]:
+ errormsg = (
+ "[DUT: {}]: admin distance is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if (
+ admin_distance
+ != rib_routes_json[st_rt][0]["distance"]
+ ):
+ errormsg = (
+ "[DUT: {}]: admin distance value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ admin_distance,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ if metric is not None:
+ if "metric" not in rib_routes_json[st_rt][0]:
+ errormsg = (
+ "[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if metric != rib_routes_json[st_rt][0]["metric"]:
+ errormsg = (
+ "[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ metric,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info(
+ "[DUT: {}]: Found next_hop {} for"
+ " RIB routes: {}".format(router, next_hop, found_routes)
+ )
+
+ if len(missing_routes) > 0:
+ errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
+ dut, missing_routes
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
+
+ continue
+
+ if "bgp" in input_dict[routerInput]:
+ if (
+ "advertise_networks"
+ not in input_dict[routerInput]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]
+ ):
+ continue
+
+ found_routes = []
+ missing_routes = []
+ advertise_network = input_dict[routerInput]["bgp"]["address_family"][
+ addr_type
+ ]["unicast"]["advertise_networks"]
+
+ # Continue if there are no network advertise
+ if len(advertise_network) == 0:
+ continue
+
+ for advertise_network_dict in advertise_network:
+ if "vrf" in advertise_network_dict:
+ cmd = "{} vrf {} json".format(
+ command, advertise_network_dict["vrf"]
+ )
+ else:
+ cmd = "{} json".format(command)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+
+ start_ip = advertise_network_dict["network"]
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ # Generating IPs for verification
+ ip_list = generate_ips(start_ip, no_of_network)
+ st_found = False
+ nh_found = False
+
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt), strict=False))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if "queued" in rib_routes_json[st_rt][0]:
+ errormsg = "Route {} is queued\n".format(st_rt)
+ return errormsg
+
+ if next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ count = 0
+ for nh in next_hop:
+ for nh_dict in rib_routes_json[st_rt][0]["nexthops"]:
+ if nh_dict["ip"] != nh:
+ continue
+ else:
+ count += 1
+
+ if count == len(next_hop):
+ nh_found = True
+ else:
+ errormsg = (
+ "Nexthop {} is Missing"
+ " for route {} in "
+ "RIB of router {}\n".format(next_hop, st_rt, dut)
+ )
+ return errormsg
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info(
+ "Found next_hop {} for all routes in RIB"
+ " of router {}\n".format(next_hop, dut)
+ )
+
+ if len(missing_routes) > 0:
+ errormsg = (
+ "Missing {} route in RIB of router {}, "
+ "routes: {} \n".format(addr_type, dut, missing_routes)
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "Verified {} routes in router {} RIB, found"
+ " routes are: {}\n".format(addr_type, dut, found_routes)
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=12)
+def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+ """
+ Data will be read from input_dict or input JSON file, API will generate
+ same prefixes, which were redistributed by either create_static_routes() or
+ advertise_networks_using_network_command() and will verify next_hop and
+ each prefix/routes is present in "show ip/ipv6 fib json"
+ command o/p.
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test, for which user wants to test the data
+ * `input_dict` : input dict, has details of static routes
+ * `next_hop`[optional]: next_hop which needs to be verified,
+ default: static
+
+ Usage
+ -----
+ input_routes_r1 = {
+ "r1": {
+ "static_routes": [{
+ "network": ["1.1.1.1/32],
+ "next_hop": "Null0",
+ "vrf": "RED"
+ }]
+ }
+ }
+ result = result = verify_fib_routes(tgen, "ipv4, "r1", input_routes_r1)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ if dut not in router_list:
+ return
+
+ for routerInput in input_dict.keys():
+ # XXX replace with router = dut; rnode = router_list[dut]
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s FIB routes:", router)
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ command = "show ip fib"
+ else:
+ command = "show ipv6 fib"
+
+ found_routes = []
+ missing_routes = []
+
+ if protocol:
+ command = "{} {}".format(command, protocol)
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ if "vrf" in static_route and static_route["vrf"] is not None:
+ logger.info(
+ "[DUT: {}]: Verifying routes for VRF:"
+ " {}".format(router, static_route["vrf"])
+ )
+
+ cmd = "{} vrf {}".format(command, static_route["vrf"])
+
+ else:
+ cmd = "{}".format(command)
+
+ cmd = "{} json".format(cmd)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "[DUT: {}]: No route found in fib".format(router)
+ return errormsg
+
+ network = static_route["network"]
+ if "no_of_ip" in static_route:
+ no_of_ip = static_route["no_of_ip"]
+ else:
+ no_of_ip = 1
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ st_found = False
+ nh_found = False
+
+ for st_rt in ip_list:
+ st_rt = str(
+ ipaddress.ip_network(frr_unicode(st_rt), strict=False)
+ )
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ count = 0
+ for nh in next_hop:
+ for nh_dict in rib_routes_json[st_rt][0][
+ "nexthops"
+ ]:
+ if nh_dict["ip"] != nh:
+ continue
+ else:
+ count += 1
+
+ if count == len(next_hop):
+ nh_found = True
+ else:
+ missing_routes.append(st_rt)
+ errormsg = (
+ "Nexthop {} is Missing"
+ " for route {} in "
+ "RIB of router {}\n".format(
+ next_hop, st_rt, dut
+ )
+ )
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if len(missing_routes) > 0:
+ errormsg = "[DUT: {}]: Missing route in FIB:" " {}".format(
+ dut, missing_routes
+ )
+ return errormsg
+
+ if nh_found:
+ logger.info(
+ "Found next_hop {} for all routes in RIB"
+ " of router {}\n".format(next_hop, dut)
+ )
+
+ if found_routes:
+ logger.info(
+ "[DUT: %s]: Verified routes in FIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
+
+ continue
+
+ if "bgp" in input_dict[routerInput]:
+ if (
+ "advertise_networks"
+ not in input_dict[routerInput]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]
+ ):
+ continue
+
+ found_routes = []
+ missing_routes = []
+ advertise_network = input_dict[routerInput]["bgp"]["address_family"][
+ addr_type
+ ]["unicast"]["advertise_networks"]
+
+ # Continue if there are no network advertise
+ if len(advertise_network) == 0:
+ continue
+
+ for advertise_network_dict in advertise_network:
+ if "vrf" in advertise_network_dict:
+ cmd = "{} vrf {} json".format(command, static_route["vrf"])
+ else:
+ cmd = "{} json".format(command)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary rib_routes_json is not empty
+ if bool(rib_routes_json) is False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+
+ start_ip = advertise_network_dict["network"]
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
+
+ # Generating IPs for verification
+ ip_list = generate_ips(start_ip, no_of_network)
+ st_found = False
+ nh_found = False
+
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt), strict=False))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != addr_type:
+ continue
+
+ if st_rt in rib_routes_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ count = 0
+ for nh in next_hop:
+ for nh_dict in rib_routes_json[st_rt][0]["nexthops"]:
+ if nh_dict["ip"] != nh:
+ continue
+ else:
+ count += 1
+
+ if count == len(next_hop):
+ nh_found = True
+ else:
+ missing_routes.append(st_rt)
+ errormsg = (
+ "Nexthop {} is Missing"
+ " for route {} in "
+ "RIB of router {}\n".format(next_hop, st_rt, dut)
+ )
+ return errormsg
+ else:
+ missing_routes.append(st_rt)
+
+ if len(missing_routes) > 0:
+ errormsg = "[DUT: {}]: Missing route in FIB: " "{} \n".format(
+ dut, missing_routes
+ )
+ return errormsg
+
+ if nh_found:
+ logger.info(
+ "Found next_hop {} for all routes in RIB"
+ " of router {}\n".format(next_hop, dut)
+ )
+
+ if found_routes:
+ logger.info(
+ "[DUT: {}]: Verified routes FIB"
+ ", found routes are: {}\n".format(dut, found_routes)
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_admin_distance_for_static_routes(tgen, input_dict):
+ """
+ API to verify admin distance for static routes as defined in input_dict/
+ input JSON by running show ip/ipv6 route json command.
+ Parameter
+ ---------
+ * `tgen` : topogen object
+ * `input_dict`: having details like - for which router and static routes
+ admin dsitance needs to be verified
+ Usage
+ -----
+ # To verify admin distance is 10 for prefix 10.0.20.1/32 having next_hop
+ 10.0.0.2 in router r1
+ input_dict = {
+ "r1": {
+ "static_routes": [{
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2"
+ }]
+ }
+ }
+ result = verify_admin_distance_for_static_routes(tgen, input_dict)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+ rnode = router_list[router]
+
+ for static_route in input_dict[router]["static_routes"]:
+ addr_type = validate_ip_address(static_route["network"])
+ # Command to execute
+ if addr_type == "ipv4":
+ command = "show ip route json"
+ else:
+ command = "show ipv6 route json"
+ show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
+
+ logger.info(
+ "Verifying admin distance for static route %s" " under dut %s:",
+ static_route,
+ router,
+ )
+ network = static_route["network"]
+ next_hop = static_route["next_hop"]
+ admin_distance = static_route["admin_distance"]
+ route_data = show_ip_route_json[network][0]
+ if network in show_ip_route_json:
+ if route_data["nexthops"][0]["ip"] == next_hop:
+ if route_data["distance"] != admin_distance:
+ errormsg = (
+ "Verification failed: admin distance"
+ " for static route {} under dut {},"
+ " found:{} but expected:{}".format(
+ static_route,
+ router,
+ route_data["distance"],
+ admin_distance,
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "Verification successful: admin"
+ " distance for static route %s under"
+ " dut %s, found:%s",
+ static_route,
+ router,
+ route_data["distance"],
+ )
+
+ else:
+ errormsg = (
+ "Static route {} not found in "
+ "show_ip_route_json for dut {}".format(network, router)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_prefix_lists(tgen, input_dict):
+ """
+ Running "show ip prefix-list" command and verifying given prefix-list
+ is present in router.
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `input_dict`: data to verify prefix lists
+ Usage
+ -----
+ # To verify pf_list_1 is present in router r1
+ input_dict = {
+ "r1": {
+ "prefix_lists": ["pf_list_1"]
+ }}
+ result = verify_prefix_lists("ipv4", input_dict, tgen)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+
+ # Show ip prefix list
+ show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
+
+ # Verify Prefix list is deleted
+ prefix_lists_addr = input_dict[router]["prefix_lists"]
+ for addr_type in prefix_lists_addr:
+ if not check_address_types(addr_type):
+ continue
+ # show ip prefix list
+ if addr_type == "ipv4":
+ cmd = "show ip prefix-list"
+ else:
+ cmd = "show {} prefix-list".format(addr_type)
+ show_prefix_list = run_frr_cmd(rnode, cmd)
+ for prefix_list in prefix_lists_addr[addr_type].keys():
+ if prefix_list in show_prefix_list:
+ errormsg = (
+ "Prefix list {} is/are present in the router"
+ " {}".format(prefix_list, router)
+ )
+ return errormsg
+
+ logger.info(
+ "Prefix list %s is/are not present in the router" " from router %s",
+ prefix_list,
+ router,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=12)
+def verify_route_maps(tgen, input_dict):
+ """
+ Running "show route-map" command and verifying given route-map
+ is present in router.
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `input_dict`: data to verify prefix lists
+ Usage
+ -----
+ # To verify rmap_1 and rmap_2 are present in router r1
+ input_dict = {
+ "r1": {
+ "route_maps": ["rmap_1", "rmap_2"]
+ }
+ }
+ result = verify_route_maps(tgen, input_dict)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+ # Show ip route-map
+ show_route_maps = rnode.vtysh_cmd("show route-map")
+
+ # Verify route-map is deleted
+ route_maps = input_dict[router]["route_maps"]
+ for route_map in route_maps:
+ if route_map in show_route_maps:
+ errormsg = "Route map {} is not deleted from router" " {}".format(
+ route_map, router
+ )
+ return errormsg
+
+ logger.info(
+ "Route map %s is/are deleted successfully from" " router %s",
+ route_maps,
+ router,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=16)
+def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
+ """
+ API to veiryf BGP large community is attached in route for any given
+ DUT by running "show bgp ipv4/6 {route address} json" command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test
+ * `network`: network for which set criteria needs to be verified
+ * `input_dict`: having details like - for which router, community and
+ values needs to be verified
+ Usage
+ -----
+ networks = ["200.50.2.0/32"]
+ input_dict = {
+ "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
+ }
+ result = verify_bgp_community(tgen, "ipv4", dut, network, input_dict=None)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
+ if router not in router_list:
+ return False
+
+ rnode = router_list[router]
+
+ logger.debug(
+ "Verifying BGP community attributes on dut %s: for %s " "network %s",
+ router,
+ addr_type,
+ network,
+ )
+
+ for net in network:
+ cmd = "show bgp {} {} json".format(addr_type, net)
+ show_bgp_json = rnode.vtysh_cmd(cmd, isjson=True)
+ logger.info(show_bgp_json)
+ if "paths" not in show_bgp_json:
+ return "Prefix {} not found in BGP table of router: {}".format(net, router)
+
+ as_paths = show_bgp_json["paths"]
+ found = False
+ for i in range(len(as_paths)):
+ if (
+ "largeCommunity" in show_bgp_json["paths"][i]
+ or "community" in show_bgp_json["paths"][i]
+ ):
+ found = True
+ logger.info(
+ "Large Community attribute is found for route:" " %s in router: %s",
+ net,
+ router,
+ )
+ if input_dict is not None:
+ for criteria, comm_val in input_dict.items():
+ show_val = show_bgp_json["paths"][i][criteria]["string"]
+ if comm_val == show_val:
+ logger.info(
+ "Verifying BGP %s for prefix: %s"
+ " in router: %s, found expected"
+ " value: %s",
+ criteria,
+ net,
+ router,
+ comm_val,
+ )
+ else:
+ errormsg = (
+ "Failed: Verifying BGP attribute"
+ " {} for route: {} in router: {}"
+ ", expected value: {} but found"
+ ": {}".format(criteria, net, router, comm_val, show_val)
+ )
+ return errormsg
+
+ if not found:
+ errormsg = (
+ "Large Community attribute is not found for route: "
+ "{} in router: {} ".format(net, router)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def get_ipv6_linklocal_address(topo, node, intf):
+ """
+ API to get the link local ipv6 address of a particular interface
+
+ Parameters
+ ----------
+ * `node`: node on which link local ip to be fetched.
+ * `intf` : interface for which link local ip needs to be returned.
+ * `topo` : base topo
+
+ Usage
+ -----
+ result = get_ipv6_linklocal_address(topo, 'r1', 'r2')
+
+ Returns link local ip of interface between r1 and r2.
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface
+ 2) errormsg - when link local ip not found
+ """
+ tgen = get_topogen()
+ ext_nh = tgen.net[node].get_ipv6_linklocal()
+ req_nh = topo[node]["links"][intf]["interface"]
+ llip = None
+ for llips in ext_nh:
+ if llips[0] == req_nh:
+ llip = llips[1]
+ logger.info("Link local ip found = %s", llip)
+ return llip
+
+ errormsg = "Failed: Link local ip not found on router {}, " "interface {}".format(
+ node, intf
+ )
+
+ return errormsg
+
+
+def verify_create_community_list(tgen, input_dict):
+ """
+ API is to verify if large community list is created for any given DUT in
+ input_dict by running "sh bgp large-community-list {"comm_name"} detail"
+ command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict`: having details like - for which router, large community
+ needs to be verified
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "large-community-list": {
+ "standard": {
+ "Test1": [{"action": "PERMIT", "attribute":\
+ ""}]
+ }}}}
+ result = verify_create_community_list(tgen, input_dict)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+
+ logger.info("Verifying large-community is created for dut %s:", router)
+
+ for comm_data in input_dict[router]["bgp_community_lists"]:
+ comm_name = comm_data["name"]
+ comm_type = comm_data["community_type"]
+ show_bgp_community = run_frr_cmd(
+ rnode, "show bgp large-community-list {} detail".format(comm_name)
+ )
+
+ # Verify community list and type
+ if comm_name in show_bgp_community and comm_type in show_bgp_community:
+ logger.info(
+ "BGP %s large-community-list %s is" " created", comm_type, comm_name
+ )
+ else:
+ errormsg = "BGP {} large-community-list {} is not" " created".format(
+ comm_type, comm_name
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_cli_json(tgen, input_dict):
+ """
+ API to verify if JSON is available for clis
+ command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict`: CLIs for which JSON needs to be verified
+ Usage
+ -----
+ input_dict = {
+ "edge1":{
+ "cli": ["show evpn vni detail", show evpn rmac vni all]
+ }
+ }
+
+ result = verify_cli_json(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for dut in input_dict.keys():
+ rnode = tgen.gears[dut]
+
+ for cli in input_dict[dut]["cli"]:
+ logger.info(
+ "[DUT: %s]: Verifying JSON is available for " "CLI %s :", dut, cli
+ )
+
+ test_cli = "{} json".format(cli)
+ ret_json = rnode.vtysh_cmd(test_cli, isjson=True)
+ if not bool(ret_json):
+ errormsg = "CLI: %s, JSON format is not available" % (cli)
+ return errormsg
+ elif "unknown" in ret_json or "Unknown" in ret_json:
+ errormsg = "CLI: %s, JSON format is not available" % (cli)
+ return errormsg
+ else:
+ logger.info(
+ "CLI : %s JSON format is available: " "\n %s", cli, ret_json
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+@retry(retry_timeout=12)
+def verify_evpn_vni(tgen, input_dict):
+ """
+ API to verify evpn vni details using "show evpn vni detail json"
+ command.
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict`: having details like - for which router, evpn details
+ needs to be verified
+ Usage
+ -----
+ input_dict = {
+ "edge1":{
+ "vni": [
+ {
+ "75100":{
+ "vrf": "RED",
+ "vxlanIntf": "vxlan75100",
+ "localVtepIp": "120.1.1.1",
+ "sviIntf": "br100"
+ }
+ }
+ ]
+ }
+ }
+
+ result = verify_evpn_vni(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for dut in input_dict.keys():
+ rnode = tgen.gears[dut]
+
+ logger.info("[DUT: %s]: Verifying evpn vni details :", dut)
+
+ cmd = "show evpn vni detail json"
+ evpn_all_vni_json = run_frr_cmd(rnode, cmd, isjson=True)
+ if not bool(evpn_all_vni_json):
+ errormsg = "No output for '{}' cli".format(cmd)
+ return errormsg
+
+ if "vni" in input_dict[dut]:
+ for vni_dict in input_dict[dut]["vni"]:
+ found = False
+ vni = vni_dict["name"]
+ for evpn_vni_json in evpn_all_vni_json:
+ if "vni" in evpn_vni_json:
+ if evpn_vni_json["vni"] != int(vni):
+ continue
+
+ for attribute in vni_dict.keys():
+ if vni_dict[attribute] != evpn_vni_json[attribute]:
+ errormsg = (
+ "[DUT: %s] Verifying "
+ "%s for VNI: %s [FAILED]||"
+ ", EXPECTED : %s "
+ " FOUND : %s"
+ % (
+ dut,
+ attribute,
+ vni,
+ vni_dict[attribute],
+ evpn_vni_json[attribute],
+ )
+ )
+ return errormsg
+
+ else:
+ found = True
+ logger.info(
+ "[DUT: %s] Verifying"
+ " %s for VNI: %s , "
+ "Found Expected : %s ",
+ dut,
+ attribute,
+ vni,
+ evpn_vni_json[attribute],
+ )
+
+ if evpn_vni_json["state"] != "Up":
+ errormsg = (
+ "[DUT: %s] Failed: Verifying"
+ " State for VNI: %s is not Up" % (dut, vni)
+ )
+ return errormsg
+
+ else:
+ errormsg = (
+ "[DUT: %s] Failed:"
+ " VNI: %s is not present in JSON" % (dut, vni)
+ )
+ return errormsg
+
+ if found:
+ logger.info(
+ "[DUT %s]: Verifying VNI : %s "
+ "details and state is Up [PASSED]!!",
+ dut,
+ vni,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] Failed:" " vni details are not present in input data" % (dut)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return False
+
+
+@retry(retry_timeout=12)
+def verify_vrf_vni(tgen, input_dict):
+ """
+ API to verify vrf vni details using "show vrf vni json"
+ command.
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict`: having details like - for which router, evpn details
+ needs to be verified
+ Usage
+ -----
+ input_dict = {
+ "edge1":{
+ "vrfs": [
+ {
+ "RED":{
+ "vni": 75000,
+ "vxlanIntf": "vxlan75100",
+ "sviIntf": "br100",
+ "routerMac": "00:80:48:ba:d1:00",
+ "state": "Up"
+ }
+ }
+ ]
+ }
+ }
+
+ result = verify_vrf_vni(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ for dut in input_dict.keys():
+ rnode = tgen.gears[dut]
+
+ logger.info("[DUT: %s]: Verifying vrf vni details :", dut)
+
+ cmd = "show vrf vni json"
+ vrf_all_vni_json = run_frr_cmd(rnode, cmd, isjson=True)
+ if not bool(vrf_all_vni_json):
+ errormsg = "No output for '{}' cli".format(cmd)
+ return errormsg
+
+ if "vrfs" in input_dict[dut]:
+ for vrfs in input_dict[dut]["vrfs"]:
+ for vrf, vrf_dict in vrfs.items():
+ found = False
+ for vrf_vni_json in vrf_all_vni_json["vrfs"]:
+ if "vrf" in vrf_vni_json:
+ if vrf_vni_json["vrf"] != vrf:
+ continue
+
+ for attribute in vrf_dict.keys():
+ if vrf_dict[attribute] == vrf_vni_json[attribute]:
+ found = True
+ logger.info(
+ "[DUT %s]: VRF: %s, "
+ "verifying %s "
+ ", Found Expected: %s "
+ "[PASSED]!!",
+ dut,
+ vrf,
+ attribute,
+ vrf_vni_json[attribute],
+ )
+ else:
+ errormsg = (
+ "[DUT: %s] VRF: %s, "
+ "verifying %s [FAILED!!] "
+ ", EXPECTED : %s "
+ ", FOUND : %s"
+ % (
+ dut,
+ vrf,
+ attribute,
+ vrf_dict[attribute],
+ vrf_vni_json[attribute],
+ )
+ )
+ return errormsg
+
+ else:
+ errormsg = "[DUT: %s] VRF: %s " "is not present in JSON" % (
+ dut,
+ vrf,
+ )
+ return errormsg
+
+ if found:
+ logger.info(
+ "[DUT %s] Verifying VRF: %s " " details [PASSED]!!",
+ dut,
+ vrf,
+ )
+ return True
+
+ else:
+ errormsg = (
+ "[DUT: %s] Failed:" " vrf details are not present in input data" % (dut)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return False
+
+
+def required_linux_kernel_version(required_version):
+ """
+ This API is used to check linux version compatibility of the test suite.
+ If version mentioned in required_version is higher than the linux kernel
+ of the system, test suite will be skipped. This API returns true or errormsg.
+
+ Parameters
+ ----------
+ * `required_version` : Kernel version required for the suites to run.
+
+ Usage
+ -----
+ result = linux_kernel_version_lowerthan('4.15')
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ system_kernel = platform.release()
+ if version_cmp(system_kernel, required_version) < 0:
+ error_msg = (
+ 'These tests will not run on kernel "{}", '
+ "they require kernel >= {})".format(system_kernel, required_version)
+ )
+
+ logger.info(error_msg)
+
+ return error_msg
+ return True
+
+
+class HostApplicationHelper(object):
+ """Helper to track and cleanup per-host based test processes."""
+
+ def __init__(self, tgen=None, base_cmd=None):
+ self.base_cmd_str = ""
+ self.host_procs = {}
+ self.tgen = None
+ self.set_base_cmd(base_cmd if base_cmd else [])
+ if tgen is not None:
+ self.init(tgen)
+
+ def __enter__(self):
+ self.init()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.cleanup()
+
+ def __str__(self):
+ return "HostApplicationHelper({})".format(self.base_cmd_str)
+
+ def set_base_cmd(self, base_cmd):
+ assert isinstance(base_cmd, list) or isinstance(base_cmd, tuple)
+ self.base_cmd = base_cmd
+ if base_cmd:
+ self.base_cmd_str = " ".join(base_cmd)
+ else:
+ self.base_cmd_str = ""
+
+ def init(self, tgen=None):
+ """Initialize the helper with tgen if needed.
+
+ If overridden, need to handle multiple entries but one init. Will be called on
+ object creation if tgen is supplied. Will be called again on __enter__ so should
+ not re-init if already inited.
+ """
+ if self.tgen:
+ assert tgen is None or self.tgen == tgen
+ else:
+ self.tgen = tgen
+
+ def started_proc(self, host, p):
+ """Called after process started on host.
+
+ Return value is passed to `stopping_proc` method."""
+ logger.debug("%s: Doing nothing after starting process", self)
+ return False
+
+ def stopping_proc(self, host, p, info):
+ """Called after process started on host."""
+ logger.debug("%s: Doing nothing before stopping process", self)
+
+ def _add_host_proc(self, host, p):
+ v = self.started_proc(host, p)
+
+ if host not in self.host_procs:
+ self.host_procs[host] = []
+ logger.debug("%s: %s: tracking process %s", self, host, p)
+ self.host_procs[host].append((p, v))
+
+ def stop_host(self, host):
+ """Stop the process on the host.
+
+ Override to do additional cleanup."""
+ if host in self.host_procs:
+ hlogger = self.tgen.net[host].logger
+ for p, v in self.host_procs[host]:
+ self.stopping_proc(host, p, v)
+ logger.debug("%s: %s: terminating process %s", self, host, p.pid)
+ hlogger.debug("%s: %s: terminating process %s", self, host, p.pid)
+ rc = p.poll()
+ if rc is not None:
+ logger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ else:
+ p.terminate()
+ p.wait()
+ logger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+
+ del self.host_procs[host]
+
+ def stop_all_hosts(self):
+ hosts = set(self.host_procs)
+ for host in hosts:
+ self.stop_host(host)
+
+ def cleanup(self):
+ self.stop_all_hosts()
+
+ def run(self, host, cmd_args, **kwargs):
+ cmd = list(self.base_cmd)
+ cmd.extend(cmd_args)
+ p = self.tgen.gears[host].popen(cmd, **kwargs)
+ assert p.poll() is None
+ self._add_host_proc(host, p)
+ return p
+
+ def check_procs(self):
+ """Check that all current processes are running, log errors if not.
+
+ Returns: List of stopped processes."""
+ procs = []
+
+ logger.debug("%s: checking procs on hosts %s", self, self.host_procs.keys())
+
+ for host in self.host_procs:
+ hlogger = self.tgen.net[host].logger
+ for p, _ in self.host_procs[host]:
+ logger.debug("%s: checking %s proc %s", self, host, p)
+ rc = p.poll()
+ if rc is None:
+ continue
+ logger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
+ hlogger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
+ procs.append(p)
+ return procs
+
+
+class IPerfHelper(HostApplicationHelper):
+ def __str__(self):
+ return "IPerfHelper()"
+
+ def run_join(
+ self,
+ host,
+ join_addr,
+ l4Type="UDP",
+ join_interval=1,
+ join_intf=None,
+ join_towards=None,
+ ):
+ """
+ Use iperf to send IGMP join and listen to traffic
+
+ Parameters:
+ -----------
+ * `host`: iperf host from where IGMP join would be sent
+ * `l4Type`: string, one of [ TCP, UDP ]
+ * `join_addr`: multicast address (or addresses) to join to
+ * `join_interval`: seconds between periodic bandwidth reports
+ * `join_intf`: the interface to bind the join to
+ * `join_towards`: router whos interface to bind the join to
+
+ returns: Success (bool)
+ """
+
+ iperf_path = self.tgen.net.get_exec_path("iperf")
+
+ assert join_addr
+ if not isinstance(join_addr, list) and not isinstance(join_addr, tuple):
+ join_addr = [ipaddress.IPv4Address(frr_unicode(join_addr))]
+
+ for bindTo in join_addr:
+ iperf_args = [iperf_path, "-s"]
+
+ if l4Type == "UDP":
+ iperf_args.append("-u")
+
+ iperf_args.append("-B")
+ if join_towards:
+ to_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards][
+ "interface"
+ ]
+ )
+ iperf_args.append("{}%{}".format(str(bindTo), to_intf))
+ elif join_intf:
+ iperf_args.append("{}%{}".format(str(bindTo), join_intf))
+ else:
+ iperf_args.append(str(bindTo))
+
+ if join_interval:
+ iperf_args.append("-i")
+ iperf_args.append(str(join_interval))
+
+ p = self.run(host, iperf_args)
+ if p.poll() is not None:
+ logger.error("IGMP join failed on %s: %s", bindTo, comm_error(p))
+ return False
+ return True
+
+ def run_traffic(
+ self, host, sentToAddress, ttl, time=0, l4Type="UDP", bind_towards=None
+ ):
+ """
+ Run iperf to send IGMP join and traffic
+
+ Parameters:
+ -----------
+ * `host`: iperf host to send traffic from
+ * `l4Type`: string, one of [ TCP, UDP ]
+ * `sentToAddress`: multicast address to send traffic to
+ * `ttl`: time to live
+ * `time`: time in seconds to transmit for
+ * `bind_towards`: Router who's interface the source ip address is got from
+
+ returns: Success (bool)
+ """
+
+ iperf_path = self.tgen.net.get_exec_path("iperf")
+
+ if sentToAddress and not isinstance(sentToAddress, list):
+ sentToAddress = [ipaddress.IPv4Address(frr_unicode(sentToAddress))]
+
+ for sendTo in sentToAddress:
+ iperf_args = [iperf_path, "-c", sendTo]
+
+ # Bind to Interface IP
+ if bind_towards:
+ ifaddr = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["ipv4"]
+ )
+ ipaddr = ipaddress.IPv4Interface(ifaddr).ip
+ iperf_args.append("-B")
+ iperf_args.append(str(ipaddr))
+
+ # UDP/TCP
+ if l4Type == "UDP":
+ iperf_args.append("-u")
+ iperf_args.append("-b")
+ iperf_args.append("0.012m")
+
+ # TTL
+ if ttl:
+ iperf_args.append("-T")
+ iperf_args.append(str(ttl))
+
+ # Time
+ if time:
+ iperf_args.append("-t")
+ iperf_args.append(str(time))
+
+ p = self.run(host, iperf_args)
+ if p.poll() is not None:
+ logger.error(
+ "mcast traffic send failed for %s: %s", sendTo, comm_error(p)
+ )
+ return False
+
+ return True
+
+
+def verify_ip_nht(tgen, input_dict):
+ """
+ Running "show ip nht" command and verifying given nexthop resolution
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `input_dict`: data to verify nexthop
+ Usage
+ -----
+ input_dict_4 = {
+ "r1": {
+ nh: {
+ "Address": nh,
+ "resolvedVia": "connected",
+ "nexthops": {
+ "nexthop1": {
+ "Interface": intf
+ }
+ }
+ }
+ }
+ }
+ result = verify_ip_nht(tgen, input_dict_4)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: verify_ip_nht()")
+
+ router_list = tgen.routers()
+ for router in input_dict.keys():
+ if router not in router_list:
+ continue
+
+ rnode = router_list[router]
+ nh_list = input_dict[router]
+
+ if validate_ip_address(next(iter(nh_list))) == "ipv6":
+ show_ip_nht = run_frr_cmd(rnode, "show ipv6 nht")
+ else:
+ show_ip_nht = run_frr_cmd(rnode, "show ip nht")
+
+ for nh in nh_list:
+ if nh in show_ip_nht:
+ nht = run_frr_cmd(rnode, "show ip nht {}".format(nh))
+ if "unresolved" in nht:
+ errormsg = "Nexthop {} became unresolved on {}".format(nh, router)
+ return errormsg
+ else:
+ logger.info("Nexthop %s is resolved on %s", nh, router)
+ return True
+ else:
+ errormsg = "Nexthop {} is resolved on {}".format(nh, router)
+ return errormsg
+
+ logger.debug("Exiting lib API: verify_ip_nht()")
+ return False
+
+
+def scapy_send_raw_packet(tgen, topo, senderRouter, intf, packet=None):
+ """
+ Using scapy Raw() method to send BSR raw packet from one FRR
+ to other
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `senderRouter` : Sender router
+ * `packet` : packet in raw format
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ global CD
+ result = ""
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ sender_interface = intf
+ rnode = tgen.routers()[senderRouter]
+
+ for destLink, data in topo["routers"][senderRouter]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if not packet:
+ packet = topo["routers"][senderRouter]["pkt"]["test_packets"][packet][
+ "data"
+ ]
+
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
+ script_path = os.path.join(CD, "send_bsr_packet.py")
+ cmd = "{} {} '{}' '{}' --interval=1 --count=1".format(
+ python3_path, script_path, packet, sender_interface
+ )
+
+ logger.info("Scapy cmd: \n %s", cmd)
+ result = rnode.run(cmd)
+
+ if result == "":
+ return result
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
diff --git a/tests/topotests/lib/exa-receive.py b/tests/topotests/lib/exa-receive.py
new file mode 100755
index 0000000..2ea3a75
--- /dev/null
+++ b/tests/topotests/lib/exa-receive.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+"""
+exa-receive.py: Save received routes form ExaBGP into file
+"""
+
+import argparse
+import os
+from sys import stdin
+from datetime import datetime
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--no-timestamp", dest="timestamp", action="store_false", help="Disable timestamps"
+)
+parser.add_argument(
+ "--logdir", default="/tmp/gearlogdir", help="The directory to store the peer log in"
+)
+parser.add_argument("peer", type=int, help="The peer number")
+args = parser.parse_args()
+
+savepath = os.path.join(args.logdir, "peer{}-received.log".format(args.peer))
+routesavefile = open(savepath, "w")
+
+while True:
+ try:
+ line = stdin.readline()
+ if not line:
+ break
+
+ if not args.timestamp:
+ routesavefile.write(line)
+ else:
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
+ routesavefile.write(timestamp + line)
+ routesavefile.flush()
+ except KeyboardInterrupt:
+ pass
+ except IOError:
+ # most likely a signal during readline
+ pass
+
+routesavefile.close()
diff --git a/tests/topotests/lib/fixtures.py b/tests/topotests/lib/fixtures.py
new file mode 100644
index 0000000..042ed09
--- /dev/null
+++ b/tests/topotests/lib/fixtures.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 eval: (yapf-mode 1) -*-
+# SPDX-License-Identifier: MIT
+#
+# August 27 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN")
+
+import lib.topojson as topojson
+import lib.topogen as topogen
+from lib.topolog import logger
+
+
+def tgen_json(request):
+ logger.info("Creating/starting topogen topology for %s", request.module.__name__)
+
+ tgen = topojson.setup_module_from_json(request.module.__file__)
+ yield tgen
+
+ logger.info("Stopping topogen topology for %s", request.module.__name__)
+ tgen.stop_topology()
+
+
+def topo(tgen):
+ """Make tgen json object available as test argument."""
+ return tgen.json_topo
+
+
+def tgen():
+ """Make global topogen object available as test argument."""
+ return topogen.get_topogen()
diff --git a/tests/topotests/lib/grpc-query.py b/tests/topotests/lib/grpc-query.py
new file mode 100755
index 0000000..8c4701c
--- /dev/null
+++ b/tests/topotests/lib/grpc-query.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: MIT
+#
+# February 22 2022, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2022, LabN Consulting, L.L.C.
+
+import argparse
+import logging
+import os
+import sys
+
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+# This is painful but works if you have installed grpc and grpc_tools would be *way*
+# better if we actually built and installed these but ... python packaging.
+try:
+ import grpc
+ import grpc_tools
+
+ sys.path.append(os.path.dirname(CWD))
+ from munet.base import commander
+
+ commander.cmd_raises(f"cp {CWD}/../../../grpc/frr-northbound.proto .")
+ commander.cmd_raises(
+ f"python3 -m grpc_tools.protoc --python_out=. --grpc_python_out=. -I . frr-northbound.proto"
+ )
+except Exception as error:
+ logging.error("can't create proto definition modules %s", error)
+ raise
+
+try:
+ sys.path[0:0] = "."
+ import frr_northbound_pb2
+ import frr_northbound_pb2_grpc
+except Exception as error:
+ logging.error("can't import proto definition modules %s", error)
+ raise
+
+
+class GRPCClient:
+ def __init__(self, server, port):
+ self.channel = grpc.insecure_channel("{}:{}".format(server, port))
+ self.stub = frr_northbound_pb2_grpc.NorthboundStub(self.channel)
+
+ def get_capabilities(self):
+ request = frr_northbound_pb2.GetCapabilitiesRequest()
+ response = "NONE"
+ try:
+ response = self.stub.GetCapabilities(request)
+ except Exception as error:
+ logging.error("Got exception from stub: %s", error)
+
+ logging.debug("GRPC Capabilities: %s", response)
+ return response
+
+ def get(self, xpath):
+ request = frr_northbound_pb2.GetRequest()
+ request.path.append(xpath)
+ request.type = frr_northbound_pb2.GetRequest.ALL
+ request.encoding = frr_northbound_pb2.XML
+ xml = ""
+ for r in self.stub.Get(request):
+ logging.info('GRPC Get path: "%s" value: %s', request.path, r)
+ xml += str(r.data.data)
+ return xml
+
+
+def next_action(action_list=None):
+ "Get next action from list or STDIN"
+ if action_list:
+ for action in action_list:
+ yield action
+ else:
+ while True:
+ try:
+ action = input("")
+ if not action:
+ break
+ yield action.strip()
+ except EOFError:
+ break
+
+
+def main(*args):
+ parser = argparse.ArgumentParser(description="gRPC Client")
+ parser.add_argument(
+ "-s", "--server", default="localhost", help="gRPC Server Address"
+ )
+ parser.add_argument(
+ "-p", "--port", type=int, default=50051, help="gRPC Server TCP Port"
+ )
+ parser.add_argument("-v", "--verbose", action="store_true", help="be verbose")
+ parser.add_argument("--check", action="store_true", help="check runable")
+ parser.add_argument("actions", nargs="*", help="GETCAP|GET,xpath")
+ args = parser.parse_args(*args)
+
+ level = logging.DEBUG if args.verbose else logging.INFO
+ logging.basicConfig(
+ level=level,
+ format="%(asctime)s %(levelname)s: GRPC-CLI-CLIENT: %(name)s %(message)s",
+ )
+
+ if args.check:
+ sys.exit(0)
+
+ c = GRPCClient(args.server, args.port)
+
+ for action in next_action(args.actions):
+ action = action.casefold()
+ logging.info("GOT ACTION: %s", action)
+ if action == "getcap":
+ caps = c.get_capabilities()
+ print("Capabilities:", caps)
+ elif action.startswith("get,"):
+ # Print Interface State and Config
+ _, xpath = action.split(",", 1)
+ print("Get XPath: ", xpath)
+ xml = c.get(xpath)
+ print("{}: {}".format(xpath, xml))
+ # for _ in range(0, 1):
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/topotests/lib/ltemplate.py b/tests/topotests/lib/ltemplate.py
new file mode 100644
index 0000000..e897a81
--- /dev/null
+++ b/tests/topotests/lib/ltemplate.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+ltemplate.py: LabN template for FRR tests.
+"""
+
+import os
+import sys
+import platform
+
+import pytest
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.lutil import *
+
+# Required to instantiate the topology builder class.
+
+customize = None
+
+
+class LTemplate:
+ test = None
+ testdir = None
+ scriptdir = None
+ logdir = None
+ prestarthooksuccess = True
+ poststarthooksuccess = True
+ iproute2Ver = None
+
+ def __init__(self, test, testdir):
+ pathname = os.path.join(testdir, "customize.py")
+ global customize
+ if sys.version_info >= (3, 5):
+ import importlib.util
+
+ spec = importlib.util.spec_from_file_location("customize", pathname)
+ customize = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(customize)
+ else:
+ import imp
+
+ customize = imp.load_source("customize", pathname)
+ self.test = test
+ self.testdir = testdir
+ self.scriptdir = testdir
+ self.logdir = ""
+ logger.info("LTemplate: " + test)
+
+ def setup_module(self, mod):
+ "Sets up the pytest environment"
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(customize.build_topo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+ tgen.start_topology()
+
+ self.logdir = tgen.logdir
+
+ logger.info("Topology started")
+ try:
+ self.prestarthooksuccess = customize.ltemplatePreRouterStartHook()
+ except AttributeError:
+ # not defined
+ logger.debug("ltemplatePreRouterStartHook() not defined")
+ if self.prestarthooksuccess != True:
+ logger.info("ltemplatePreRouterStartHook() failed, skipping test")
+ return
+
+ # This is a sample of configuration loading.
+ router_list = tgen.routers()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ logger.info("Setting up %s" % rname)
+ for rd_val in TopoRouter.RD:
+ config = os.path.join(
+ self.testdir, "{}/{}.conf".format(rname, TopoRouter.RD[rd_val])
+ )
+ prog = os.path.join(tgen.net[rname].daemondir, TopoRouter.RD[rd_val])
+ if os.path.exists(config):
+ if os.path.exists(prog):
+ router.load_config(rd_val, config)
+ else:
+ logger.warning(
+ "{} not found, but have {}.conf file".format(
+ prog, TopoRouter.RD[rd_val]
+ )
+ )
+
+ # After loading the configurations, this function loads configured daemons.
+ logger.info("Starting routers")
+ tgen.start_router()
+ try:
+ self.poststarthooksuccess = customize.ltemplatePostRouterStartHook()
+ except AttributeError:
+ # not defined
+ logger.debug("ltemplatePostRouterStartHook() not defined")
+ luStart(baseScriptDir=self.scriptdir, baseLogDir=self.logdir, net=tgen.net)
+
+
+# initialized by ltemplate_start
+_lt = None
+
+
+def setup_module(mod):
+ global _lt
+ root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ test = mod.__name__[: mod.__name__.rfind(".")]
+ testdir = os.path.join(root, test)
+
+ # don't do this for now as reload didn't work as expected
+ # fixup sys.path, want test dir there only once
+ # try:
+ # sys.path.remove(testdir)
+ # except ValueError:
+ # logger.debug(testdir+" not found in original sys.path")
+ # add testdir
+ # sys.path.append(testdir)
+
+ # init class
+ _lt = LTemplate(test, testdir)
+ _lt.setup_module(mod)
+
+ # drop testdir
+ # sys.path.remove(testdir)
+
+
+def teardown_module(mod):
+ global _lt
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ if _lt != None and _lt.scriptdir != None and _lt.prestarthooksuccess == True:
+ luShowResults(logger.info)
+ print(luFinish())
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+ _lt = None
+
+
+def ltemplateTest(
+ script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False
+):
+ global _lt
+ if _lt == None or _lt.prestarthooksuccess != True:
+ return
+
+ tgen = get_topogen()
+ if not os.path.isfile(script):
+ if not os.path.isfile(os.path.join(_lt.scriptdir, script)):
+ logger.error("Could not find script file: " + script)
+ assert "Could not find script file: " + script
+ logger.info("Starting template test: " + script)
+ numEntry = luNumFail()
+
+ if SkipIfFailed and tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ if numEntry > 0:
+ if not KeepGoing:
+ pytest.skip("Have %d errors" % numEntry)
+
+ if CheckFuncStr != None:
+ check = eval(CheckFuncStr)
+ if check != True:
+ pytest.skip("Check function '" + CheckFuncStr + "' returned: " + check)
+
+ if CallOnFail != None:
+ CallOnFail = eval(CallOnFail)
+ luInclude(script, CallOnFail)
+ numFail = luNumFail() - numEntry
+ if numFail > 0:
+ luShowFail()
+ fatal_error = "%d tests failed" % numFail
+ if not KeepGoing:
+ assert (
+ "scripts/cleanup_all.py failed" == "See summary output above"
+ ), fatal_error
+
+
+# Memory leak test template
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+class ltemplateRtrCmd:
+ def __init__(self):
+ self.resetCounts()
+
+ def doCmd(self, tgen, rtr, cmd, checkstr=None):
+ logger.info("doCmd: {} {}".format(rtr, cmd))
+ output = tgen.net[rtr].cmd(cmd).strip()
+ if len(output):
+ self.output += 1
+ if checkstr != None:
+ ret = re.search(checkstr, output)
+ if ret == None:
+ self.nomatch += 1
+ else:
+ self.match += 1
+ return ret
+ logger.info("output: " + output)
+ else:
+ logger.info("No output")
+ self.none += 1
+ return None
+
+ def resetCounts(self):
+ self.match = 0
+ self.nomatch = 0
+ self.output = 0
+ self.none = 0
+
+ def getMatch(self):
+ return self.match
+
+ def getNoMatch(self):
+ return self.nomatch
+
+ def getOutput(self):
+ return self.output
+
+ def getNone(self):
+ return self.none
+
+
+def ltemplateVersionCheck(
+ vstr, rname="r1", compstr="<", cli=False, kernel="4.9", iproute2=None, mpls=True
+):
+ tgen = get_topogen()
+ router = tgen.gears[rname]
+
+ if cli:
+ logger.info("calling mininet CLI")
+ tgen.mininet_cli()
+ logger.info("exited mininet CLI")
+
+ if _lt == None:
+ ret = "Template not initialized"
+ return ret
+
+ if _lt.prestarthooksuccess != True:
+ ret = "ltemplatePreRouterStartHook failed"
+ return ret
+
+ if _lt.poststarthooksuccess != True:
+ ret = "ltemplatePostRouterStartHook failed"
+ return ret
+
+ if mpls == True and tgen.hasmpls != True:
+ ret = "MPLS not initialized"
+ return ret
+
+ if kernel != None:
+ krel = platform.release()
+ if topotest.version_cmp(krel, kernel) < 0:
+ ret = "Skipping tests, old kernel ({} < {})".format(krel, kernel)
+ return ret
+
+ if iproute2 != None:
+ if _lt.iproute2Ver == None:
+ # collect/log info on iproute2
+ cc = ltemplateRtrCmd()
+ found = cc.doCmd(
+ tgen, rname, "apt-cache policy iproute2", r"Installed: ([\d\.]*)"
+ )
+ if found != None:
+ iproute2Ver = found.group(1)
+ else:
+ iproute2Ver = "0-unknown"
+ logger.info("Have iproute2 version=" + iproute2Ver)
+
+ if topotest.version_cmp(iproute2Ver, iproute2) < 0:
+ ret = "Skipping tests, old iproute2 ({} < {})".format(iproute2Ver, iproute2)
+ return ret
+
+ ret = True
+ try:
+ if router.has_version(compstr, vstr):
+ ret = "Skipping tests, old FRR version {} {}".format(compstr, vstr)
+ return ret
+ except:
+ ret = True
+
+ return ret
+
+
+# for testing
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/lutil.py b/tests/topotests/lib/lutil.py
new file mode 100644
index 0000000..1eb88f2
--- /dev/null
+++ b/tests/topotests/lib/lutil.py
@@ -0,0 +1,499 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Copyright 2017, LabN Consulting, L.L.C.
+
+import os
+import re
+import sys
+import time
+import json
+import math
+import time
+from lib.topolog import logger
+from lib.topotest import json_cmp
+
+
+# L utility functions
+#
+# These functions are inteneted to provide support for CI testing within MiniNet
+# environments.
+
+
+class lUtil:
+ # to be made configurable in the future
+ base_script_dir = "."
+ base_log_dir = "."
+ fout_name = "output.log"
+ fsum_name = "summary.txt"
+ l_level = 6
+ CallOnFail = False
+
+ l_total = 0
+ l_pass = 0
+ l_fail = 0
+ l_filename = ""
+ l_last = None
+ l_line = 0
+ l_dotall_experiment = False
+ l_last_nl = None
+ l_wait_strict = 1
+
+ fout = ""
+ fsum = ""
+ net = ""
+
+ def log(self, str, level=6):
+ if self.l_level > 0:
+ if self.fout == "":
+ self.fout = open(self.fout_name, "w")
+ self.fout.write(str + "\n")
+ if level <= self.l_level:
+ print(str)
+
+ def summary(self, str):
+ if self.fsum == "":
+ self.fsum = open(self.fsum_name, "w")
+ self.fsum.write(
+ "\
+******************************************************************************\n"
+ )
+ self.fsum.write(
+ "\
+Test Target Summary Pass Fail\n"
+ )
+ self.fsum.write(
+ "\
+******************************************************************************\n"
+ )
+ self.fsum.write(str + "\n")
+
+ def result(self, target, success, str, logstr=None):
+ if success:
+ p = 1
+ f = 0
+ self.l_pass += 1
+ sstr = "PASS"
+ else:
+ f = 1
+ p = 0
+ self.l_fail += 1
+ sstr = "FAIL"
+ self.l_total += 1
+ if logstr != None:
+ self.log("R:%d %s: %s" % (self.l_total, sstr, logstr))
+ res = "%-4d %-6s %-56s %-4d %d" % (self.l_total, target, str, p, f)
+ self.log("R:" + res)
+ self.summary(res)
+ if f == 1 and self.CallOnFail != False:
+ self.CallOnFail()
+
+ def closeFiles(self):
+ ret = (
+ "\
+******************************************************************************\n\
+Total %-4d %-4d %d\n\
+******************************************************************************"
+ % (self.l_total, self.l_pass, self.l_fail)
+ )
+ if self.fsum != "":
+ self.fsum.write(ret + "\n")
+ self.fsum.close()
+ self.fsum = ""
+ if self.fout != "":
+ if os.path.isfile(self.fsum_name):
+ r = open(self.fsum_name, "r")
+ self.fout.write(r.read())
+ r.close()
+ self.fout.close()
+ self.fout = ""
+ return ret
+
+ def setFilename(self, name):
+ str = "FILE: " + name
+ self.log(str)
+ self.summary(str)
+ self.l_filename = name
+ self.line = 0
+
+ def getCallOnFail(self):
+ return self.CallOnFail
+
+ def setCallOnFail(self, CallOnFail):
+ self.CallOnFail = CallOnFail
+
+ def strToArray(self, string):
+ a = []
+ c = 0
+ end = ""
+ words = string.split()
+ if len(words) < 1 or words[0].startswith("#"):
+ return a
+ words = string.split()
+ for word in words:
+ if len(end) == 0:
+ a.append(word)
+ else:
+ a[c] += str(" " + word)
+ if end == "\\":
+ end = ""
+ if not word.endswith("\\"):
+ if end != '"':
+ if word.startswith('"'):
+ end = '"'
+ else:
+ c += 1
+ else:
+ if word.endswith('"'):
+ end = ""
+ c += 1
+ else:
+ c += 1
+ else:
+ end = "\\"
+ # if len(end) == 0:
+ # print('%d:%s:' % (c, a[c-1]))
+
+ return a
+
+ def execTestFile(self, tstFile):
+ if os.path.isfile(tstFile):
+ f = open(tstFile)
+ for line in f:
+ if len(line) > 1:
+ a = self.strToArray(line)
+ if len(a) >= 6:
+ luCommand(a[1], a[2], a[3], a[4], a[5])
+ else:
+ self.l_line += 1
+ self.log("%s:%s %s" % (self.l_filename, self.l_line, line))
+ if len(a) >= 2:
+ if a[0] == "sleep":
+ time.sleep(int(a[1]))
+ elif a[0] == "include":
+ self.execTestFile(a[1])
+ f.close()
+ else:
+ self.log("unable to read: " + tstFile)
+ sys.exit(1)
+
+ def command(
+ self,
+ target,
+ command,
+ regexp,
+ op,
+ result,
+ returnJson,
+ startt=None,
+ force_result=False,
+ ):
+ global net
+ if op == "jsoncmp_pass" or op == "jsoncmp_fail":
+ returnJson = True
+
+ self.log(
+ "%s (#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:"
+ % (
+ time.asctime(),
+ self.l_total + 1,
+ self.l_filename,
+ self.l_line,
+ target,
+ command,
+ regexp,
+ op,
+ result,
+ )
+ )
+ if self.net == "":
+ return False
+ # self.log("Running %s %s" % (target, command))
+ js = None
+ out = self.net[target].cmd(command).rstrip()
+ if len(out) == 0:
+ report = "<no output>"
+ else:
+ report = out
+ if returnJson == True:
+ try:
+ js = json.loads(out)
+ except:
+ js = None
+ self.log(
+ "WARNING: JSON load failed -- confirm command output is in JSON format."
+ )
+ self.log("COMMAND OUTPUT:%s:" % report)
+
+ # JSON comparison
+ if op == "jsoncmp_pass" or op == "jsoncmp_fail":
+ try:
+ expect = json.loads(regexp)
+ except:
+ expect = None
+ self.log(
+ "WARNING: JSON load failed -- confirm regex input is in JSON format."
+ )
+ json_diff = json_cmp(js, expect)
+ if json_diff != None:
+ if op == "jsoncmp_fail":
+ success = True
+ else:
+ success = False
+ self.log("JSON DIFF:%s:" % json_diff)
+ ret = success
+ else:
+ if op == "jsoncmp_fail":
+ success = False
+ else:
+ success = True
+ self.result(target, success, result)
+ if js != None:
+ return js
+ return ret
+
+ # Experiment: can we achieve the same match behavior via DOTALL
+ # without converting newlines to spaces?
+ out_nl = out
+ search_nl = re.search(regexp, out_nl, re.DOTALL)
+ self.l_last_nl = search_nl
+ # Set up for comparison
+ if search_nl != None:
+ group_nl = search_nl.group()
+ group_nl_converted = " ".join(group_nl.splitlines())
+ else:
+ group_nl_converted = None
+
+ out = " ".join(out.splitlines())
+ search = re.search(regexp, out)
+ self.l_last = search
+ if search == None:
+ if op == "fail":
+ success = True
+ else:
+ success = False
+ ret = success
+ else:
+ ret = search.group()
+ if op != "fail":
+ success = True
+ level = 7
+ else:
+ success = False
+ level = 5
+ self.log("found:%s:" % ret, level)
+ # Experiment: compare matched strings obtained each way
+ if self.l_dotall_experiment and (group_nl_converted != ret):
+ self.log(
+ "DOTALL experiment: strings differ dotall=[%s] orig=[%s]"
+ % (group_nl_converted, ret),
+ 9,
+ )
+ if startt != None:
+ if js != None or ret is not False or force_result is not False:
+ delta = time.time() - startt
+ self.result(target, success, "%s +%4.2f secs" % (result, delta))
+ elif op == "pass" or op == "fail":
+ self.result(target, success, result)
+ if js != None:
+ return js
+ return ret
+
+ def wait(
+ self, target, command, regexp, op, result, wait, returnJson, wait_time=0.5
+ ):
+ self.log(
+ "%s:%s WAIT:%s:%s:%s:%s:%s:%s:%s:"
+ % (
+ self.l_filename,
+ self.l_line,
+ target,
+ command,
+ regexp,
+ op,
+ result,
+ wait,
+ wait_time,
+ )
+ )
+ found = False
+ n = 0
+ startt = time.time()
+
+ if (op == "wait-strict") or ((op == "wait") and self.l_wait_strict):
+ strict = True
+ else:
+ strict = False
+
+ # Calculate the amount of `sleep`s we are going to peform.
+ wait_count = int(math.ceil(wait / wait_time)) + 1
+
+ force_result = False
+ while wait_count > 0:
+ n += 1
+
+ # log a failure on last iteration if we don't get desired regexp
+ if strict and (wait_count == 1):
+ force_result = True
+
+ found = self.command(
+ target, command, regexp, op, result, returnJson, startt, force_result
+ )
+ if found is not False:
+ break
+
+ wait_count -= 1
+ if wait_count > 0:
+ time.sleep(wait_time)
+
+ delta = time.time() - startt
+ self.log("Done after %d loops, time=%s, Found=%s" % (n, delta, found))
+ return found
+
+
+# initialized by luStart
+LUtil = None
+
+
+# entry calls
+def luStart(
+ baseScriptDir=".",
+ baseLogDir=".",
+ net="",
+ fout="output.log",
+ fsum="summary.txt",
+ level=None,
+):
+ global LUtil
+ # init class
+ LUtil = lUtil()
+ LUtil.base_script_dir = baseScriptDir
+ LUtil.base_log_dir = baseLogDir
+ LUtil.net = net
+ if fout != "":
+ LUtil.fout_name = baseLogDir + "/" + fout
+ if fsum != None:
+ LUtil.fsum_name = baseLogDir + "/" + fsum
+ if level != None:
+ LUtil.l_level = level
+ LUtil.l_dotall_experiment = False
+ LUtil.l_dotall_experiment = True
+
+
+def luCommand(
+ target,
+ command,
+ regexp=".",
+ op="none",
+ result="",
+ time=10,
+ returnJson=False,
+ wait_time=0.5,
+):
+ waitops = ["wait", "wait-strict", "wait-nostrict"]
+
+ if op in waitops:
+ return LUtil.wait(
+ target, command, regexp, op, result, time, returnJson, wait_time
+ )
+ else:
+ return LUtil.command(target, command, regexp, op, result, returnJson)
+
+
+def luLast(usenl=False):
+ if usenl:
+ if LUtil.l_last_nl != None:
+ LUtil.log("luLast:%s:" % LUtil.l_last_nl.group(), 7)
+ return LUtil.l_last_nl
+ else:
+ if LUtil.l_last != None:
+ LUtil.log("luLast:%s:" % LUtil.l_last.group(), 7)
+ return LUtil.l_last
+
+
+def luInclude(filename, CallOnFail=None):
+ tstFile = LUtil.base_script_dir + "/" + filename
+ LUtil.setFilename(filename)
+ if CallOnFail != None:
+ oldCallOnFail = LUtil.getCallOnFail()
+ LUtil.setCallOnFail(CallOnFail)
+ if filename.endswith(".py"):
+ LUtil.log("luInclude: execfile " + tstFile)
+ with open(tstFile) as infile:
+ exec(infile.read())
+ else:
+ LUtil.log("luInclude: execTestFile " + tstFile)
+ LUtil.execTestFile(tstFile)
+ if CallOnFail != None:
+ LUtil.setCallOnFail(oldCallOnFail)
+
+
+def luFinish():
+ global LUtil
+ ret = LUtil.closeFiles()
+ # done
+ LUtil = None
+ return ret
+
+
+def luNumFail():
+ return LUtil.l_fail
+
+
+def luNumPass():
+ return LUtil.l_pass
+
+
+def luResult(target, success, str, logstr=None):
+ return LUtil.result(target, success, str, logstr)
+
+
+def luShowResults(prFunction):
+ printed = 0
+ sf = open(LUtil.fsum_name, "r")
+ for line in sf:
+ printed += 1
+ prFunction(line.rstrip())
+ sf.close()
+
+
+def luShowFail():
+ printed = 0
+ sf = open(LUtil.fsum_name, "r")
+ for line in sf:
+ if line[-2] != "0":
+ printed += 1
+ logger.error(line.rstrip())
+ sf.close()
+ if printed > 0:
+ logger.error("See %s for details of errors" % LUtil.fout_name)
+
+
+#
+# Sets default wait type for luCommand(op="wait) (may be overridden by
+# specifying luCommand(op="wait-strict") or luCommand(op="wait-nostrict")).
+#
+# "nostrict" is the historical default behavior, which is to ignore
+# failures to match the specified regexp in the specified time.
+#
+# "strict" means that failure to match the specified regexp in the
+# specified time yields an explicit, logged failure result
+#
+def luSetWaitType(waittype):
+ if waittype == "strict":
+ LUtil.l_wait_strict = 1
+ else:
+ if waittype == "nostrict":
+ LUtil.l_wait_strict = 0
+ else:
+ raise ValueError('waittype must be one of "strict" or "nostrict"')
+
+
+# for testing
+if __name__ == "__main__":
+ print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/lib")
+ luStart()
+ for arg in sys.argv[1:]:
+ luInclude(arg)
+ luFinish()
+ sys.exit(0)
diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py
new file mode 100755
index 0000000..5efbecd
--- /dev/null
+++ b/tests/topotests/lib/mcast-tester.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: ISC
+#
+# Copyright (C) 2021 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+
+"""
+Subscribe to a multicast group so that the kernel sends an IGMP JOIN
+for the multicast group we subscribed to.
+"""
+
+import argparse
+import json
+import ipaddress
+import os
+import socket
+import struct
+import subprocess
+import sys
+import time
+
+
+#
+# Functions
+#
+def interface_name_to_index(name):
+ "Gets the interface index using its name. Returns None on failure."
+ interfaces = json.loads(subprocess.check_output("ip -j link show", shell=True))
+
+ for interface in interfaces:
+ if interface["ifname"] == name:
+ return interface["ifindex"]
+
+ return None
+
+
+def multicast_join(sock, ifindex, group, port):
+ "Joins a multicast group."
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ if ip_version == 4:
+ mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex)
+ opt = socket.IP_ADD_MEMBERSHIP
+ else:
+ mreq = group.packed + struct.pack("@I", ifindex)
+ opt = socket.IPV6_JOIN_GROUP
+ sock.bind((str(group), port))
+ sock.setsockopt(ip_proto, opt, mreq)
+
+
+#
+# Main code.
+#
+parser = argparse.ArgumentParser(description="Multicast RX utility")
+parser.add_argument("group", help="Multicast IP")
+parser.add_argument("interface", help="Interface name")
+parser.add_argument("--port", type=int, default=1000, help="port to send to")
+parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets")
+parser.add_argument("--socket", help="Point to topotest UNIX socket")
+parser.add_argument(
+ "--send", help="Transmit instead of join with interval", type=float, default=0
+)
+args = parser.parse_args()
+
+# Get interface index/validate.
+ifindex = interface_name_to_index(args.interface)
+if ifindex is None:
+ sys.stderr.write("Interface {} does not exists\n".format(args.interface))
+ sys.exit(1)
+
+# We need root privileges to set up multicast.
+if os.geteuid() != 0:
+ sys.stderr.write("ERROR: You must have root privileges\n")
+ sys.exit(1)
+
+# Wait for topotest to synchronize with us.
+if not args.socket:
+ toposock = None
+else:
+ toposock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ while True:
+ try:
+ toposock.connect(args.socket)
+ break
+ except ConnectionRefusedError:
+ time.sleep(1)
+ continue
+ # Set topotest socket non blocking so we can multiplex the main loop.
+ toposock.setblocking(False)
+
+args.group = ipaddress.ip_address(args.group)
+ip_version = args.group.version
+ip_family = socket.AF_INET if ip_version == 4 else socket.AF_INET6
+ip_proto = socket.IPPROTO_IP if ip_version == 4 else socket.IPPROTO_IPV6
+
+msock = socket.socket(ip_family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+if args.send > 0:
+ # Prepare multicast bit in that interface.
+ msock.setsockopt(
+ socket.SOL_SOCKET,
+ 25,
+ struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")),
+ )
+
+ # Set packets TTL/hops.
+ ttlopt = socket.IP_MULTICAST_TTL if ip_version == 4 else socket.IPV6_MULTICAST_HOPS
+ if ip_version == 4:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("B", args.ttl))
+ else:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("I", args.ttl))
+
+ # Block to ensure packet send.
+ msock.setblocking(True)
+else:
+ multicast_join(msock, ifindex, args.group, args.port)
+
+
+def should_exit():
+ if not toposock:
+ # If we are sending then we have slept
+ if not args.send:
+ time.sleep(100)
+ return False
+ else:
+ try:
+ data = toposock.recv(1)
+ if data == b"":
+ print(" -> Connection closed")
+ return True
+ except BlockingIOError:
+ return False
+
+
+counter = 0
+while not should_exit():
+ if args.send > 0:
+ msock.sendto(b"test %d" % counter, (str(args.group), args.port))
+ counter += 1
+ time.sleep(args.send)
+
+msock.close()
+sys.exit(0)
diff --git a/tests/topotests/lib/micronet.py b/tests/topotests/lib/micronet.py
new file mode 100644
index 0000000..f4aa827
--- /dev/null
+++ b/tests/topotests/lib/micronet.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# July 9 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021-2023, LabN Consulting, L.L.C.
+#
+# flake8: noqa
+
+from munet.base import BaseMunet as Micronet
+from munet.base import (
+ Bridge,
+ Commander,
+ LinuxNamespace,
+ SharedNamespace,
+ Timeout,
+ cmd_error,
+ comm_error,
+ commander,
+ get_exec_path,
+ proc_error,
+ root_hostname,
+ shell_quote,
+)
diff --git a/tests/topotests/lib/micronet_compat.py b/tests/topotests/lib/micronet_compat.py
new file mode 100644
index 0000000..b348c85
--- /dev/null
+++ b/tests/topotests/lib/micronet_compat.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# July 11 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021-2023, LabN Consulting, L.L.C
+#
+import ipaddress
+import os
+
+from munet import cli
+from munet.base import BaseMunet, LinuxNamespace
+
+
+class Node(LinuxNamespace):
+ """Node (mininet compat)."""
+
+ def __init__(self, name, rundir=None, **kwargs):
+ nkwargs = {}
+
+ if "unet" in kwargs:
+ nkwargs["unet"] = kwargs["unet"]
+ if "private_mounts" in kwargs:
+ nkwargs["private_mounts"] = kwargs["private_mounts"]
+ if "logger" in kwargs:
+ nkwargs["logger"] = kwargs["logger"]
+
+ # This is expected by newer munet CLI code
+ self.config_dirname = ""
+ self.config = {"kind": "frr"}
+ self.mgmt_ip = None
+ self.mgmt_ip6 = None
+
+ super().__init__(name, **nkwargs)
+
+ self.rundir = self.unet.rundir.joinpath(self.name)
+
+ def cmd(self, cmd, **kwargs):
+ """Execute a command, joins stdout, stderr, ignores exit status."""
+
+ return super(Node, self).cmd_legacy(cmd, **kwargs)
+
+ def config_host(self, lo="up", **params):
+ """Called by Micronet when topology is built (but not started)."""
+ # mininet brings up loopback here.
+ del params
+ del lo
+
+ def intfNames(self):
+ return self.intfs
+
+ def terminate(self):
+ return
+
+ def add_vlan(self, vlanname, linkiface, vlanid):
+ self.logger.debug("Adding VLAN interface: %s (%s)", vlanname, vlanid)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises(
+ [
+ ip_path,
+ "link",
+ "add",
+ "link",
+ linkiface,
+ "name",
+ vlanname,
+ "type",
+ "vlan",
+ "id",
+ vlanid,
+ ]
+ )
+ self.cmd_raises([ip_path, "link", "set", "dev", vlanname, "up"])
+
+ def add_loop(self, loopname):
+ self.logger.debug("Adding Linux iface: %s", loopname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises([ip_path, "link", "add", loopname, "type", "dummy"])
+ self.cmd_raises([ip_path, "link", "set", "dev", loopname, "up"])
+
+ def add_l3vrf(self, vrfname, tableid):
+ self.logger.debug("Adding Linux VRF: %s", vrfname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises(
+ [ip_path, "link", "add", vrfname, "type", "vrf", "table", tableid]
+ )
+ self.cmd_raises([ip_path, "link", "set", "dev", vrfname, "up"])
+
+ def del_iface(self, iface):
+ self.logger.debug("Removing Linux Iface: %s", iface)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises([ip_path, "link", "del", iface])
+
+ def attach_iface_to_l3vrf(self, ifacename, vrfname):
+ self.logger.debug("Attaching Iface %s to Linux VRF %s", ifacename, vrfname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ if vrfname:
+ self.cmd_raises(
+ [ip_path, "link", "set", "dev", ifacename, "master", vrfname]
+ )
+ else:
+ self.cmd_raises([ip_path, "link", "set", "dev", ifacename, "nomaster"])
+
+ set_cwd = LinuxNamespace.set_ns_cwd
+
+
+class Topo(object): # pylint: disable=R0205
+ def __init__(self, *args, **kwargs):
+ raise Exception("Remove Me")
+
+
+class Mininet(BaseMunet):
+ """
+ Mininet using Micronet.
+ """
+
+ g_mnet_inst = None
+
+ def __init__(self, rundir=None, pytestconfig=None, logger=None):
+ """
+ Create a Micronet.
+ """
+ if Mininet.g_mnet_inst is not None:
+ Mininet.g_mnet_inst.stop()
+ Mininet.g_mnet_inst = self
+
+ self.configured_hosts = set()
+ self.host_params = {}
+ self.prefix_len = 8
+
+ # SNMPd used to require this, which was set int he mininet shell
+ # that all commands executed from. This is goofy default so let's not
+ # do it if we don't have to. The snmpd.conf files have been updated
+ # to set permissions to root:frr 770 to make this unneeded in that case
+ # os.umask(0)
+
+ super(Mininet, self).__init__(
+ pid=False, rundir=rundir, pytestconfig=pytestconfig, logger=logger
+ )
+
+ # From munet/munet/native.py
+ with open(os.path.join(self.rundir, "nspid"), "w", encoding="ascii") as f:
+ f.write(f"{self.pid}\n")
+
+ with open(os.path.join(self.rundir, "nspids"), "w", encoding="ascii") as f:
+ f.write(f'{" ".join([str(x) for x in self.pids])}\n')
+
+ hosts_file = os.path.join(self.rundir, "hosts.txt")
+ with open(hosts_file, "w", encoding="ascii") as hf:
+ hf.write(
+ f"""127.0.0.1\tlocalhost {self.name}
+::1\tip6-localhost ip6-loopback
+fe00::0\tip6-localnet
+ff00::0\tip6-mcastprefix
+ff02::1\tip6-allnodes
+ff02::2\tip6-allrouters
+"""
+ )
+ self.bind_mount(hosts_file, "/etc/hosts")
+
+ # Common CLI commands for any topology
+ cdict = {
+ "commands": [
+ #
+ # Window commands.
+ #
+ {
+ "name": "pcap",
+ "format": "pcap NETWORK",
+ "help": (
+ "capture packets from NETWORK into file capture-NETWORK.pcap"
+ " the command is run within a new window which also shows"
+ " packet summaries. NETWORK can also be an interface specified"
+ " as HOST:INTF. To capture inside the host namespace."
+ ),
+ "exec": "tshark -s 9200 -i {0} -P -w capture-{0}.pcap",
+ "top-level": True,
+ "new-window": {"background": True},
+ },
+ {
+ "name": "term",
+ "format": "term HOST [HOST ...]",
+ "help": "open terminal[s] (TMUX or XTerm) on HOST[S], * for all",
+ "exec": "bash",
+ "new-window": True,
+ },
+ {
+ "name": "vtysh",
+ "exec": "/usr/bin/vtysh",
+ "format": "vtysh ROUTER [ROUTER ...]",
+ "new-window": True,
+ "kinds": ["frr"],
+ },
+ {
+ "name": "xterm",
+ "format": "xterm HOST [HOST ...]",
+ "help": "open XTerm[s] on HOST[S], * for all",
+ "exec": "bash",
+ "new-window": {
+ "forcex": True,
+ },
+ },
+ {
+ "name": "logd",
+ "exec": "tail -F %RUNDIR%/{}.log",
+ "format": "logd HOST [HOST ...] DAEMON",
+ "help": (
+ "tail -f on the logfile of the given "
+ "DAEMON for the given HOST[S]"
+ ),
+ "new-window": True,
+ },
+ {
+ "name": "stdlog",
+ "exec": (
+ "[ -e %RUNDIR%/frr.log ] && tail -F %RUNDIR%/frr.log "
+ "|| tail -F /var/log/frr.log"
+ ),
+ "format": "stdlog HOST [HOST ...]",
+ "help": "tail -f on the `frr.log` for the given HOST[S]",
+ "new-window": True,
+ },
+ {
+ "name": "stdout",
+ "exec": "tail -F %RUNDIR%/{0}.err",
+ "format": "stdout HOST [HOST ...] DAEMON",
+ "help": (
+ "tail -f on the stdout of the given DAEMON for the given HOST[S]"
+ ),
+ "new-window": True,
+ },
+ {
+ "name": "stderr",
+ "exec": "tail -F %RUNDIR%/{0}.out",
+ "format": "stderr HOST [HOST ...] DAEMON",
+ "help": (
+ "tail -f on the stderr of the given DAEMON for the given HOST[S]"
+ ),
+ "new-window": True,
+ },
+ #
+ # Non-window commands.
+ #
+ {
+ "name": "",
+ "exec": "vtysh -c '{}'",
+ "format": "[ROUTER ...] COMMAND",
+ "help": "execute vtysh COMMAND on the router[s]",
+ "kinds": ["frr"],
+ },
+ {
+ "name": "sh",
+ "format": "[HOST ...] sh <SHELL-COMMAND>",
+ "help": "execute <SHELL-COMMAND> on hosts",
+ "exec": "{}",
+ },
+ {
+ "name": "shi",
+ "format": "[HOST ...] shi <INTERACTIVE-COMMAND>",
+ "help": "execute <INTERACTIVE-COMMAND> on HOST[s]",
+ "exec": "{}",
+ "interactive": True,
+ },
+ ]
+ }
+
+ cli.add_cli_config(self, cdict)
+
+ shellopt = self.cfgopt.get_option_list("--shell")
+ if "all" in shellopt or "." in shellopt:
+ self.run_in_window("bash", title="munet")
+
+ # This is expected by newer munet CLI code
+ self.config_dirname = ""
+ self.config = {}
+
+ self.logger.debug("%s: Creating", self)
+
+ def __str__(self):
+ return "Mininet()"
+
+ def configure_hosts(self):
+ """
+ Configure hosts once the topology has been built.
+
+ This function can be called multiple times if routers are added to the topology
+ later.
+ """
+ if not self.hosts:
+ return
+
+ self.logger.debug("Configuring hosts: %s", self.hosts.keys())
+
+ for name in sorted(self.hosts.keys()):
+ if name in self.configured_hosts:
+ continue
+
+ host = self.hosts[name]
+ first_intf = host.intfs[0] if host.intfs else None
+ params = self.host_params[name]
+
+ if first_intf and "ip" in params:
+ ip = params["ip"]
+ i = ip.find("/")
+ if i == -1:
+ plen = self.prefix_len
+ else:
+ plen = int(ip[i + 1 :])
+ ip = ip[:i]
+
+ host.cmd_raises("ip addr add {}/{} dev {}".format(ip, plen, first_intf))
+
+ # can be used by munet cli
+ host.mgmt_ip = ipaddress.ip_address(ip)
+
+ if "defaultRoute" in params:
+ host.cmd_raises(
+ "ip route add default {}".format(params["defaultRoute"])
+ )
+
+ host.config_host()
+
+ self.configured_hosts.add(name)
+
+ def add_host(self, name, cls=Node, **kwargs):
+ """Add a host to micronet."""
+
+ self.host_params[name] = kwargs
+ super(Mininet, self).add_host(name, cls=cls, **kwargs)
+
+ def start(self):
+ """Start the micronet topology."""
+ pcapopt = self.cfgopt.get_option_list("--pcap")
+ if "all" in pcapopt:
+ pcapopt = self.switches.keys()
+ for pcap in pcapopt:
+ if ":" in pcap:
+ host, intf = pcap.split(":")
+ pcap = f"{host}-{intf}"
+ host = self.hosts[host]
+ else:
+ host = self
+ intf = pcap
+ pcapfile = f"{self.rundir}/capture-{pcap}.pcap"
+ host.run_in_window(
+ f"tshark -s 9200 -i {intf} -P -w {pcapfile}",
+ background=True,
+ title=f"cap:{pcap}",
+ )
+
+ self.logger.debug("%s: Starting (no-op).", self)
+
+ def stop(self):
+ """Stop the mininet topology (deletes)."""
+ self.logger.debug("%s: Stopping (deleting).", self)
+
+ self.delete()
+
+ self.logger.debug("%s: Stopped (deleted).", self)
+
+ if Mininet.g_mnet_inst == self:
+ Mininet.g_mnet_inst = None
+
+ def cli(self):
+ cli.cli(self)
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
new file mode 100644
index 0000000..5b18f8b
--- /dev/null
+++ b/tests/topotests/lib/ospf.py
@@ -0,0 +1,3028 @@
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+
+import ipaddress
+import sys
+from copy import deepcopy
+from time import sleep
+
+# Import common_config to use commomnly used APIs
+from lib.common_config import (
+ create_common_configurations,
+ InvalidCLIError,
+ generate_ips,
+ retry,
+ run_frr_cmd,
+ validate_ip_address,
+)
+from lib.topolog import logger
+from lib.topotest import frr_unicode
+
+################################
+# Configure procs
+################################
+
+
+def create_router_ospf(tgen, topo=None, input_dict=None, build=False, load_config=True):
+ """
+ API to configure ospf on router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ * `load_config` : Loading the config to router this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "ospf": {
+ "router_id": "22.22.22.22",
+ "area": [{ "id": "0.0.0.0", "type": "nssa"}]
+ }
+ }
+
+ result = create_router_ospf(tgen, topo, input_dict)
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: create_router_ospf()")
+ result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+
+ for ospf in ["ospf", "ospf6"]:
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ if ospf not in input_dict[router]:
+ logger.debug("Router %s: %s not present in input_dict", router, ospf)
+ continue
+
+ config_data = __create_ospf_global(
+ tgen, input_dict, router, build, load_config, ospf
+ )
+ if config_data:
+ if router not in config_data_dict:
+ config_data_dict[router] = config_data
+ else:
+ config_data_dict[router].extend(config_data)
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, ospf, build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_router_ospf (ipv4)", exc_info=True)
+ result = False
+
+ logger.debug("Exiting lib API: create_router_ospf()")
+ return result
+
+
+def __create_ospf_global(tgen, input_dict, router, build, load_config, ospf):
+ """
+ Helper API to create ospf global configuration.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+ * `load_config` : Loading the config to router this is set as True.
+ * `ospf` : either 'ospf' or 'ospf6'
+
+ Usage
+ -----
+ input_dict = {
+ "routers": {
+ "r1": {
+ "links": {
+ "r3": {
+ "ipv6": "2013:13::1/64",
+ "ospf6": {
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "1.1.1.1",
+ "neighbors": {
+ "r3": {
+ "area": "1.1.1.1"
+ }
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ list of configuration commands
+ """
+
+ config_data = []
+
+ if ospf not in input_dict[router]:
+ return config_data
+
+ logger.debug("Entering lib API: __create_ospf_global()")
+
+ ospf_data = input_dict[router][ospf]
+ del_ospf_action = ospf_data.setdefault("delete", False)
+ if del_ospf_action:
+ config_data = ["no router {}".format(ospf)]
+ return config_data
+
+ cmd = "router {}".format(ospf)
+
+ config_data.append(cmd)
+
+ # router id
+ router_id = ospf_data.setdefault("router_id", None)
+ del_router_id = ospf_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no {} router-id".format(ospf))
+ if router_id:
+ config_data.append("{} router-id {}".format(ospf, router_id))
+
+ # log-adjacency-changes
+ log_adj_changes = ospf_data.setdefault("log_adj_changes", None)
+ del_log_adj_changes = ospf_data.setdefault("del_log_adj_changes", False)
+ if del_log_adj_changes:
+ config_data.append("no log-adjacency-changes detail")
+ if log_adj_changes:
+ config_data.append("log-adjacency-changes {}".format(log_adj_changes))
+
+ # aggregation timer
+ aggr_timer = ospf_data.setdefault("aggr_timer", None)
+ del_aggr_timer = ospf_data.setdefault("del_aggr_timer", False)
+ if del_aggr_timer:
+ config_data.append("no aggregation timer")
+ if aggr_timer:
+ config_data.append("aggregation timer {}".format(aggr_timer))
+
+ # maximum path information
+ ecmp_data = ospf_data.setdefault("maximum-paths", {})
+ if ecmp_data:
+ cmd = "maximum-paths {}".format(ecmp_data)
+ del_action = ospf_data.setdefault("del_max_path", False)
+ if del_action:
+ cmd = "no maximum-paths"
+ config_data.append(cmd)
+
+ # Flood reduction.
+ flood_data = ospf_data.setdefault("flood-reduction", {})
+ if flood_data:
+ cmd = "flood-reduction"
+ del_action = ospf_data.setdefault("del_flood_reduction", False)
+ if del_action:
+ cmd = "no flood-reduction"
+ config_data.append(cmd)
+
+ # LSA refresh timer - A hidden command.
+ refresh_data = ospf_data.setdefault("lsa-refresh", {})
+ if refresh_data:
+ cmd = "ospf lsa-refresh {}".format(refresh_data)
+ del_action = ospf_data.setdefault("del_lsa_refresh", False)
+ if del_action:
+ cmd = "no ospf lsa-refresh"
+ config_data.append(cmd)
+
+ # redistribute command
+ redistribute_data = ospf_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.debug(
+ "Router %s: 'redist_type' not present in " "input_dict", router
+ )
+ else:
+ cmd = "redistribute {}".format(redistribute["redist_type"])
+ for red_type in redistribute_data:
+ if "route_map" in red_type:
+ cmd = cmd + " route-map {}".format(red_type["route_map"])
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # area information
+ area_data = ospf_data.setdefault("area", {})
+ if area_data:
+ for area in area_data:
+ if "id" not in area:
+ logger.debug(
+ "Router %s: 'area id' not present in " "input_dict", router
+ )
+ else:
+ cmd = "area {}".format(area["id"])
+
+ if "type" in area:
+ cmd = cmd + " {}".format(area["type"])
+
+ if "flood-reduction" in area:
+ cmd = cmd + " flood-reduction"
+
+ del_action = area.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # def route information
+ def_rte_data = ospf_data.setdefault("default-information", {})
+ if def_rte_data:
+ if "originate" not in def_rte_data:
+ logger.debug(
+ "Router %s: 'originate key' not present in " "input_dict", router
+ )
+ else:
+ cmd = "default-information originate"
+
+ if "always" in def_rte_data:
+ cmd = cmd + " always"
+
+ if "metric" in def_rte_data:
+ cmd = cmd + " metric {}".format(def_rte_data["metric"])
+
+ if "metric-type" in def_rte_data:
+ cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
+
+ if "route-map" in def_rte_data:
+ cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
+
+ del_action = def_rte_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # summary information
+ summary_data = ospf_data.setdefault("summary-address", {})
+ if summary_data:
+ for summary in summary_data:
+ if "prefix" not in summary:
+ logger.debug(
+ "Router %s: 'summary-address' not present in " "input_dict",
+ router,
+ )
+ else:
+ cmd = "summary {}/{}".format(summary["prefix"], summary["mask"])
+
+ _tag = summary.setdefault("tag", None)
+ if _tag:
+ cmd = "{} tag {}".format(cmd, _tag)
+
+ _advertise = summary.setdefault("advertise", True)
+ if not _advertise:
+ cmd = "{} no-advertise".format(cmd)
+
+ del_action = summary.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # ospf gr information
+ gr_data = ospf_data.setdefault("graceful-restart", {})
+ if gr_data:
+ if "opaque" in gr_data and gr_data["opaque"]:
+ cmd = "capability opaque"
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "helper enable" in gr_data and not gr_data["helper enable"]:
+ cmd = "graceful-restart helper enable"
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ elif "helper enable" in gr_data and type(gr_data["helper enable"]) is list:
+ for rtrs in gr_data["helper enable"]:
+ cmd = "graceful-restart helper enable {}".format(rtrs)
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "helper" in gr_data:
+ if type(gr_data["helper"]) is not list:
+ gr_data["helper"] = list(gr_data["helper"])
+ for helper_role in gr_data["helper"]:
+ cmd = "graceful-restart helper {}".format(helper_role)
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "supported-grace-time" in gr_data:
+ cmd = "graceful-restart helper supported-grace-time {}".format(
+ gr_data["supported-grace-time"]
+ )
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ config_data.append("exit")
+ logger.debug("Exiting lib API: create_ospf_global()")
+
+ return config_data
+
+
+def config_ospf_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
+ """
+ API to configure ospf on router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ * `load_config` : Loading the config to router this is set as True.
+
+ Usage
+ -----
+ r1_ospf_auth = {
+ "r1": {
+ "links": {
+ "r2": {
+ "ospf": {
+ "authentication": "message-digest",
+ "authentication-key": "ospf",
+ "message-digest-key": "10"
+ }
+ }
+ }
+ }
+ }
+ result = config_ospf_interface(tgen, topo, r1_ospf_auth)
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Enter lib config_ospf_interface")
+ result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ config_data = []
+ for lnk in input_dict[router]["links"].keys():
+ if "ospf" not in input_dict[router]["links"][lnk]:
+ logger.debug(
+ "Router %s: ospf config is not present in" "input_dict", router
+ )
+ continue
+ ospf_data = input_dict[router]["links"][lnk]["ospf"]
+ data_ospf_area = ospf_data.setdefault("area", None)
+ data_ospf_auth = ospf_data.setdefault("authentication", None)
+ data_ospf_dr_priority = ospf_data.setdefault("priority", None)
+ data_ospf_cost = ospf_data.setdefault("cost", None)
+ data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None)
+
+ try:
+ intf = topo["routers"][router]["links"][lnk]["interface"]
+ except KeyError:
+ intf = topo["switches"][router]["links"][lnk]["interface"]
+
+ # interface
+ cmd = "interface {}".format(intf)
+
+ config_data.append(cmd)
+ # interface area config
+ if data_ospf_area:
+ cmd = "ip ospf area {}".format(data_ospf_area)
+ config_data.append(cmd)
+
+ # interface ospf auth
+ if data_ospf_auth:
+ if data_ospf_auth == "null":
+ cmd = "ip ospf authentication null"
+ elif data_ospf_auth == "message-digest":
+ cmd = "ip ospf authentication message-digest"
+ elif data_ospf_auth == "key-chain":
+ cmd = "ip ospf authentication key-chain {}".format(
+ ospf_data["keychain"]
+ )
+ else:
+ cmd = "ip ospf authentication"
+
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "message-digest-key" in ospf_data:
+ cmd = "ip ospf message-digest-key {} md5 {}".format(
+ ospf_data["message-digest-key"], ospf_data["authentication-key"]
+ )
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if (
+ "authentication-key" in ospf_data
+ and "message-digest-key" not in ospf_data
+ ):
+ cmd = "ip ospf authentication-key {}".format(
+ ospf_data["authentication-key"]
+ )
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf dr priority
+ if data_ospf_dr_priority:
+ cmd = "ip ospf priority {}".format(ospf_data["priority"])
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf cost
+ if data_ospf_cost:
+ cmd = "ip ospf cost {}".format(ospf_data["cost"])
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf mtu
+ if data_ospf_mtu:
+ cmd = "ip ospf mtu-ignore"
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if build:
+ return config_data
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+
+ logger.debug("Exiting lib API: config_ospf_interface()")
+ return result
+
+
+def clear_ospf(tgen, router, ospf=None):
+ """
+ This API is to clear ospf neighborship by running
+ clear ip ospf interface * command,
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `router`: device under test
+
+ Usage
+ -----
+ clear_ospf(tgen, "r1")
+ """
+
+ logger.debug("Entering lib API: clear_ospf()")
+ if router not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[router]
+ # Clearing OSPF
+ if ospf:
+ version = "ipv6"
+ else:
+ version = "ip"
+
+ cmd = "clear {} ospf interface".format(version)
+ logger.info("Clearing ospf process on router %s.. using command '%s'", router, cmd)
+ run_frr_cmd(rnode, cmd)
+
+ logger.debug("Exiting lib API: clear_ospf()")
+
+
+def redistribute_ospf(tgen, topo, dut, route_type, **kwargs):
+ """
+ Redstribution of routes inside ospf.
+
+ Parameters
+ ----------
+ * `tgen`: Topogen object
+ * `topo` : json file data
+ * `dut`: device under test
+ * `route_type`: "static" or "connected" or ....
+ * `kwargs`: pass extra information (see below)
+
+ Usage
+ -----
+ redistribute_ospf(tgen, topo, "r0", "static", delete=True)
+ redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4")
+ """
+
+ ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": route_type}]}}}
+ for k, v in kwargs.items():
+ ospf_red[dut]["ospf"]["redistribute"][0][k] = v
+
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+################################
+# Verification procs
+################################
+@retry(retry_timeout=80)
+def verify_ospf_neighbor(
+ tgen, topo=None, dut=None, input_dict=None, lan=False, expected=True
+):
+ """
+ This API is to verify ospf neighborship by running
+ show ip ospf neighbour command,
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `lan` : verify neighbors in lan topology
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ 1. To check FULL neighbors.
+ verify_ospf_neighbor(tgen, topo, dut=dut)
+
+ 2. To check neighbors with their roles.
+ input_dict = {
+ "r0": {
+ "ospf": {
+ "neighbors": {
+ "r1": {
+ "nbrState": "Full",
+ "role": "DR"
+ },
+ "r2": {
+ "nbrState": "Full",
+ "role": "DROther"
+ },
+ "r3": {
+ "nbrState": "Full",
+ "role": "DROther"
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+ logger.debug("Entering lib API: verify_ospf_neighbor()")
+ result = False
+ if topo is None:
+ topo = tgen.json_topo
+
+ if input_dict:
+ for router, rnode in tgen.routers().items():
+ if "ospf" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf neighbor all json", isjson=True
+ )
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ ospf_data_list = input_dict[router]["ospf"]
+ ospf_nbr_list = ospf_data_list["neighbors"]
+
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ data_ip = topo["routers"][ospf_nbr]["links"]
+ data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"]
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo["switches"]:
+ if "ospf" in topo["switches"][switch]["links"][router]:
+ neighbor_ip = data_ip[switch]["ipv4"].split("/")[0]
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip[router]["ipv4"].split("/")[0]
+
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+ try:
+ nh_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[0]
+ intf_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[1]
+ except KeyError:
+ errormsg = "[DUT: {}] OSPF peer {} missing".format(router, nbr_rid)
+ return errormsg
+
+ nbr_state = nbr_data.setdefault("nbrState", None)
+ nbr_role = nbr_data.setdefault("role", None)
+
+ if nbr_state:
+ if nbr_state == nh_state:
+ logger.info(
+ "[DUT: {}] OSPF Nbr is {}:{} State {}".format(
+ router, ospf_nbr, nbr_rid, nh_state
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF is not Converged, neighbor"
+ " state is {}".format(router, nh_state)
+ )
+ return errormsg
+ if nbr_role:
+ if nbr_role == intf_state:
+ logger.info(
+ "[DUT: {}] OSPF Nbr is {}: {} Role {}".format(
+ router, ospf_nbr, nbr_rid, nbr_role
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF is not Converged with rid"
+ "{}, role is {}".format(router, nbr_rid, intf_state)
+ )
+ return errormsg
+ continue
+ else:
+ for router, rnode in tgen.routers().items():
+ if "ospf" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf neighbor all json", isjson=True
+ )
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ ospf_data_list = topo["routers"][router]["ospf"]
+ ospf_neighbors = ospf_data_list["neighbors"]
+ total_peer = 0
+ total_peer = len(ospf_neighbors.keys())
+ no_of_ospf_nbr = 0
+ ospf_nbr_list = ospf_data_list["neighbors"]
+ no_of_peer = 0
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ if nbr_data:
+ data_ip = topo["routers"][nbr_data["nbr"]]["links"]
+ data_rid = topo["routers"][nbr_data["nbr"]]["ospf"]["router_id"]
+ else:
+ data_ip = topo["routers"][ospf_nbr]["links"]
+ data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"]
+ logger.info("ospf neighbor %s: router-id: %s", router, data_rid)
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo["switches"]:
+ if "ospf" in topo["switches"][switch]["links"][router]:
+ neighbor_ip = data_ip[switch]["ipv4"].split("/")[0]
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip[router]["ipv4"].split("/")[0]
+
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+
+ try:
+ nh_state = show_ospf_json[nbr_rid][0]["nbrState"].split("/")[0]
+ except KeyError:
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
+ )
+ return errormsg
+
+ if nh_state == "Full":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("[DUT: {}] OSPF is Converged".format(router))
+ result = True
+ else:
+ errormsg = "[DUT: {}] OSPF is not Converged".format(router)
+ return errormsg
+
+ logger.debug("Exiting API: verify_ospf_neighbor()")
+ return result
+
+
+@retry(retry_timeout=50)
+def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False):
+ """
+ This API is to verify ospf neighborship by running
+ show ipv6 ospf neighbour command,
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `lan` : verify neighbors in lan topology
+
+ Usage
+ -----
+ 1. To check FULL neighbors.
+ verify_ospf_neighbor(tgen, topo, dut=dut)
+
+ 2. To check neighbors with their roles.
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "neighbors": {
+ "r1": {
+ "state": "Full",
+ "role": "DR"
+ },
+ "r2": {
+ "state": "Full",
+ "role": "DROther"
+ },
+ "r3": {
+ "state": "Full",
+ "role": "DROther"
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_neighbor(tgen, topo, dut, input_dict, lan=True)
+
+ 3. To check there are no neighbors.
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "neighbors": []
+ }
+ }
+ }
+ result = verify_ospf6_neighbor(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ if input_dict:
+ for router, rnode in tgen.routers().items():
+ if "ospf6" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ ospf_data_list = input_dict[router]["ospf6"]
+ ospf_nbr_list = ospf_data_list["neighbors"]
+
+ # Check if looking for no neighbors
+ if ospf_nbr_list == []:
+ if show_ospf_json["neighbors"] == []:
+ logger.info("[DUT: {}] OSPF6 no neighbors found".format(router))
+ return True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF6 active neighbors found, expected None".format(
+ router
+ )
+ )
+ return errormsg
+
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
+ neighbor_ip = data_ip
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip[router]["ipv6"].split("/")[0]
+
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
+ try:
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
+ except TypeError:
+ errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
+ router, nbr_rid, ospf_nbr
+ )
+ return errormsg
+
+ nbr_state = nbr_data.setdefault("state", None)
+ nbr_role = nbr_data.setdefault("role", None)
+
+ if nbr_state:
+ if nbr_state == nh_state:
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}:{} State {}".format(
+ router, ospf_nbr, nbr_rid, nh_state
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged, neighbor"
+ " state is {} , Expected state is {}".format(
+ router, nh_state, nbr_state
+ )
+ )
+ return errormsg
+ if nbr_role:
+ if nbr_role == intf_state:
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
+ router, ospf_nbr, nbr_rid, nbr_role
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged with rid"
+ "{}, role is {}, Expected role is {}".format(
+ router, nbr_rid, intf_state, nbr_role
+ )
+ )
+ return errormsg
+ continue
+ else:
+ for router, rnode in tgen.routers().items():
+ if "ospf6" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF6 neighborship on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ ospf_data_list = topo["routers"][router]["ospf6"]
+ ospf_neighbors = ospf_data_list["neighbors"]
+ total_peer = 0
+ total_peer = len(ospf_neighbors.keys())
+ no_of_ospf_nbr = 0
+ ospf_nbr_list = ospf_data_list["neighbors"]
+ no_of_peer = 0
+ for ospf_nbr, nbr_data in ospf_nbr_list.items():
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+ logger.info("ospf neighbor %s: router-id: %s", ospf_nbr, data_rid)
+ if ospf_nbr in data_ip:
+ nbr_details = nbr_data[ospf_nbr]
+ elif lan:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
+ neighbor_ip = data_ip
+ else:
+ continue
+ else:
+ neighbor_ip = data_ip
+
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ nbr_rid = data_rid
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
+ try:
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
+ except TypeError:
+ errormsg = (
+ "[DUT: {}] missing OSPF neighbor {} with router-id {}".format(
+ router, ospf_nbr, nbr_rid
+ )
+ )
+ return errormsg
+
+ if nh_state == "Full":
+ no_of_peer += 1
+
+ if no_of_peer == total_peer:
+ logger.info("[DUT: {}] OSPF6 is Converged".format(router))
+ result = True
+ else:
+ errormsg = "[DUT: {}] OSPF6 is not Converged".format(router)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=40)
+def verify_ospf_rib(
+ tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None, expected=True
+):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf route command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `next_hop` : next to be verified
+ * `tag` : tag to be verified
+ * `metric` : metric to be verified
+ * `fib` : True if the route is installed in FIB.
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": ip_net,
+ "no_of_ip": 1,
+ "routeType": "N"
+ }
+ ]
+ }
+ }
+
+ result = verify_ospf_rib(tgen, dut, input_dict,next_hop=nh)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.info("Entering lib API: verify_ospf_rib()")
+ result = False
+ router_list = tgen.routers()
+ additional_nexthops_in_required_nhs = []
+ found_hops = []
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s RIB:", router)
+
+ # Verifying RIB routes
+ command = "show ip ospf route"
+
+ found_routes = []
+ missing_routes = []
+
+ if (
+ "static_routes" in input_dict[routerInput]
+ or "prefix" in input_dict[routerInput]
+ ):
+ if "prefix" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["prefix"]
+ else:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ cmd = "{}".format(command)
+
+ cmd = "{} json".format(cmd)
+
+ ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Verifying output dictionary ospf_rib_json is not empty
+ if bool(ospf_rib_json) is False:
+ errormsg = (
+ "[DUT: {}] No routes found in OSPF route "
+ "table".format(router)
+ )
+ return errormsg
+
+ network = static_route["network"]
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ _tag = static_route.setdefault("tag", None)
+ _rtype = static_route.setdefault("routeType", None)
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ st_found = False
+ nh_found = False
+
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != "ipv4":
+ continue
+
+ if st_rt in ospf_rib_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if fib and next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ for mnh in range(0, len(ospf_rib_json[st_rt])):
+ if (
+ "fib"
+ in ospf_rib_json[st_rt][mnh]["nexthops"][0]
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in ospf_rib_json[st_rt][mnh][
+ "nexthops"
+ ]
+ ]
+ )
+
+ if found_hops[0]:
+ missing_list_of_nexthops = set(
+ found_hops[0]
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops[0])
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Nexthop "
+ "%s is not active for route %s in "
+ "RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is not active"
+ " for route {} in RIB of router"
+ " {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+
+ elif next_hop and fib is None:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ found_hops = [
+ rib_r["ip"]
+ for rib_r in ospf_rib_json[st_rt]["nexthops"]
+ ]
+
+ if found_hops:
+ missing_list_of_nexthops = set(
+ found_hops
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops)
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+ if _rtype:
+ if "routeType" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: routeType missing"
+ " for route {} in OSPF RIB \n".format(
+ dut, st_rt
+ )
+ )
+ return errormsg
+ elif _rtype != ospf_rib_json[st_rt]["routeType"]:
+ errormsg = (
+ "[DUT: {}]: routeType mismatch"
+ " for route {} in OSPF RIB \n".format(
+ dut, st_rt
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "[DUT: {}]: Found routeType {}"
+ " for route {}".format(dut, _rtype, st_rt)
+ )
+ if tag:
+ if "tag" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if _tag != ospf_rib_json[st_rt]["tag"]:
+ errormsg = (
+ "[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".format(
+ dut,
+ _tag,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ if metric is not None:
+ if "type2cost" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if metric != ospf_rib_json[st_rt]["type2cost"]:
+ errormsg = (
+ "[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ metric,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info(
+ "[DUT: {}]: Found next_hop {} for all OSPF"
+ " routes in RIB".format(router, next_hop)
+ )
+
+ if len(missing_routes) > 0:
+ errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
+ dut, missing_routes
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
+ result = True
+
+ logger.info("Exiting lib API: verify_ospf_rib()")
+ return result
+
+
+@retry(retry_timeout=20)
+def verify_ospf_interface(
+ tgen, topo=None, dut=None, lan=False, input_dict=None, expected=True
+):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf interface command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * `dut`: device under test
+ * `lan`: if set to true this interface belongs to LAN.
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict= {
+ 'r0': {
+ 'links':{
+ 's1': {
+ 'ospf':{
+ 'priority':98,
+ 'timerDeadSecs': 4,
+ 'area': '0.0.0.3',
+ 'mcastMemberOspfDesignatedRouters': True,
+ 'mcastMemberOspfAllRouters': True,
+ 'ospfEnabled': True,
+
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: verify_ospf_interface()")
+ result = False
+ if topo is None:
+ topo = tgen.json_topo
+
+ for router, rnode in tgen.routers().items():
+ if "ospf" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF interface on router %s:", router)
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf interface json", isjson=True)
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ ospf_intf_data = input_dict[router]["links"]
+ for ospf_intf, intf_data in ospf_intf_data.items():
+ intf = topo["routers"][router]["links"][ospf_intf]["interface"]
+ if intf in show_ospf_json["interfaces"]:
+ for intf_attribute in intf_data["ospf"]:
+ if (
+ intf_data["ospf"][intf_attribute]
+ == show_ospf_json["interfaces"][intf][intf_attribute]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF interface %s: %s is %s",
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf"][intf_attribute],
+ )
+ else:
+ errormsg = "[DUT: {}] OSPF interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf"][intf_attribute],
+ show_ospf_json["interfaces"][intf][intf_attribute],
+ )
+ return errormsg
+ result = True
+ logger.debug("Exiting API: verify_ospf_interface()")
+ return result
+
+
+@retry(retry_timeout=40)
+def verify_ospf_database(
+ tgen, topo, dut, input_dict, vrf=None, lsatype=None, rid=None, expected=True
+):
+ """
+ This API is to verify ospf lsa's by running
+ show ip ospf database command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `topo` : next to be verified
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "areas": {
+ "0.0.0.0": {
+ "Router Link States": {
+ "100.1.1.0-100.1.1.0": {
+ "LSID": "100.1.1.0",
+ "Advertised router": "100.1.1.0",
+ "LSA Age": 130,
+ "Sequence Number": "80000006",
+ "Checksum": "a703",
+ "Router links": 3
+ }
+ },
+ "Net Link States": {
+ "10.0.0.2-100.1.1.1": {
+ "LSID": "10.0.0.2",
+ "Advertised router": "100.1.1.1",
+ "LSA Age": 137,
+ "Sequence Number": "80000001",
+ "Checksum": "9583"
+ }
+ },
+ },
+ }
+ }
+ result = verify_ospf_database(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ result = False
+ router = dut
+ logger.debug("Entering lib API: verify_ospf_database()")
+
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("Verifying OSPF interface on router %s:", dut)
+
+ if not rid:
+ rid = "self-originate"
+ if lsatype:
+ if vrf is None:
+ command = "show ip ospf database {} {} json".format(lsatype, rid)
+ else:
+ command = "show ip ospf database {} {} vrf {} json".format(
+ lsatype, rid, vrf
+ )
+ else:
+ if vrf is None:
+ command = "show ip ospf database json"
+ else:
+ command = "show ip ospf database vrf {} json".format(vrf)
+
+ show_ospf_json = run_frr_cmd(rnode, command, isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # for inter and inter lsa's
+ ospf_db_data = input_dict.setdefault("areas", None)
+ ospf_external_lsa = input_dict.setdefault("AS External Link States", None)
+ # import pdb; pdb.set_trace()
+ if ospf_db_data:
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if ospf_area in show_ospf_json["routerLinkStates"]["areas"]:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ _advrtr = lsa.setdefault("advertisedRouter", None)
+ _options = lsa.setdefault("options", None)
+
+ if (
+ _options
+ and lsa["lsaId"]
+ == show_ospf_json["routerLinkStates"]["areas"][ospf_area][
+ 0
+ ]["linkStateId"]
+ and lsa["options"]
+ == show_ospf_json["routerLinkStates"]["areas"][ospf_area][
+ 0
+ ]["options"]
+ ):
+ result = True
+ break
+ else:
+ errormsg = '[DUT: {}] OSPF LSA options: expected {}, Received Options are {} lsa["options"] {} OSPF LSAID: expected lsaid {}, Received lsaid {}'.format(
+ dut,
+ show_ospf_json["routerLinkStates"]["areas"][ospf_area][
+ 0
+ ]["options"],
+ _options,
+ lsa["options"],
+ show_ospf_json["routerLinkStates"]["areas"][ospf_area][
+ 0
+ ]["linkStateId"],
+ lsa["lsaId"],
+ )
+ return errormsg
+ if "Net Link States" in area_lsa:
+ for lsa in area_lsa["Net Link States"]:
+ if lsa in show_ospf_json["areas"][ospf_area]["Net Link States"]:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Network LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+ if "Summary Link States" in area_lsa:
+ for lsa in area_lsa["Summary Link States"]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area]["Summary Link States"]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+ if "ASBR-Summary Link States" in area_lsa:
+ for lsa in area_lsa["ASBR-Summary Link States"]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "ASBR-Summary Link States"
+ ]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+ if ospf_external_lsa:
+ for ospf_ext_lsa, ext_lsa_data in ospf_external_lsa.items():
+ if ospf_ext_lsa in show_ospf_json["AS External Link States"]:
+ logger.info(
+ "[DUT: %s] OSPF LSDB:External LSA %s", router, ospf_ext_lsa
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB : expected"
+ " External LSA is {}".format(router, ospf_ext_lsa)
+ )
+ return errormsg
+
+ logger.debug("Exiting API: verify_ospf_database()")
+ return result
+
+
+@retry(retry_timeout=20)
+def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf interface command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+
+ Usage
+ -----
+ input_dict = {
+ "11.0.0.0/8": {
+ "summaryAddress": "11.0.0.0/8",
+ "metricType": "E2",
+ "metric": 20,
+ "tag": 0,
+ "externalRouteCount": 5
+ }
+ }
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ router = dut
+
+ logger.info("Verifying OSPF summary on router %s:", router)
+
+ rnode = tgen.routers()[dut]
+
+ if ospf:
+ if "ospf6" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(router)
+ return errormsg
+
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf summary detail json", isjson=True
+ )
+ else:
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router)
+ return errormsg
+
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf summary detail json", isjson=True
+ )
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ ospf_summary_data = input_dict
+
+ if ospf:
+ show_ospf_json = show_ospf_json["default"]
+
+ for ospf_summ, summ_data in ospf_summary_data.items():
+ if ospf_summ not in show_ospf_json:
+ continue
+ summary = ospf_summary_data[ospf_summ]["summaryAddress"]
+
+ if summary in show_ospf_json:
+ for summ in summ_data:
+ if summ_data[summ] == show_ospf_json[summary][summ]:
+ logger.info(
+ "[DUT: %s] OSPF summary %s:%s is %s",
+ router,
+ summary,
+ summ,
+ summ_data[summ],
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF summary {} : {} is {}, "
+ "Expected is {}".format(
+ router,
+ summary,
+ summ,
+ show_ospf_json[summary][summ],
+ summ_data[summ],
+ )
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=30)
+def verify_ospf6_rib(
+ tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None
+):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf route command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `next_hop` : next to be verified
+ * `tag` : tag to be verified
+ * `metric` : metric to be verified
+ * `fib` : True if the route is installed in FIB.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": ip_net,
+ "no_of_ip": 1,
+ "routeType": "N"
+ }
+ ]
+ }
+ }
+
+ result = verify_ospf6_rib(tgen, dut, input_dict,next_hop=nh)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ router_list = tgen.routers()
+ additional_nexthops_in_required_nhs = []
+ found_hops = []
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s RIB:", router)
+
+ # Verifying RIB routes
+ command = "show ipv6 ospf route detail"
+
+ found_routes = []
+ missing_routes = []
+
+ if (
+ "static_routes" in input_dict[routerInput]
+ or "prefix" in input_dict[routerInput]
+ ):
+ if "prefix" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["prefix"]
+ else:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ cmd = "{}".format(command)
+
+ cmd = "{} json".format(cmd)
+
+ ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ # Fix for PR 2644182
+ try:
+ ospf_rib_json = ospf_rib_json["routes"]
+ except KeyError:
+ pass
+
+ # Verifying output dictionary ospf_rib_json is not empty
+ if bool(ospf_rib_json) is False:
+ errormsg = (
+ "[DUT: {}] No routes found in OSPF6 route "
+ "table".format(router)
+ )
+ return errormsg
+
+ network = static_route["network"]
+ no_of_ip = static_route.setdefault("no_of_ip", 1)
+ _tag = static_route.setdefault("tag", None)
+ _rtype = static_route.setdefault("routeType", None)
+
+ # Generating IPs for verification
+ ip_list = generate_ips(network, no_of_ip)
+ if len(ip_list) == 1:
+ ip_list = [network]
+ st_found = False
+ nh_found = False
+ for st_rt in ip_list:
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
+
+ _addr_type = validate_ip_address(st_rt)
+ if _addr_type != "ipv6":
+ continue
+
+ if st_rt in ospf_rib_json:
+ st_found = True
+ found_routes.append(st_rt)
+
+ if fib and next_hop:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+
+ for mnh in range(0, len(ospf_rib_json[st_rt])):
+ if (
+ "fib"
+ in ospf_rib_json[st_rt][mnh]["nextHops"][0]
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in ospf_rib_json[st_rt][mnh][
+ "nextHops"
+ ]
+ ]
+ )
+
+ if found_hops[0]:
+ missing_list_of_nexthops = set(
+ found_hops[0]
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops[0])
+
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Nexthop "
+ "%s is not active for route %s in "
+ "RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is not active"
+ " for route {} in RIB of router"
+ " {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+
+ elif next_hop and fib is None:
+ if type(next_hop) is not list:
+ next_hop = [next_hop]
+ found_hops = [
+ rib_r["nextHop"]
+ for rib_r in ospf_rib_json[st_rt]["nextHops"]
+ ]
+
+ if found_hops:
+ missing_list_of_nexthops = set(
+ found_hops
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops)
+ if additional_nexthops_in_required_nhs:
+ logger.info(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
+ return errormsg
+ else:
+ nh_found = True
+ if _rtype:
+ if "destinationType" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: destinationType missing"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+ elif _rtype != ospf_rib_json[st_rt]["destinationType"]:
+ errormsg = (
+ "[DUT: {}]: destinationType mismatch"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+ else:
+ logger.info(
+ "DUT: {}]: Found destinationType {}"
+ "for route {}".format(dut, _rtype, st_rt)
+ )
+ if tag:
+ if "tag" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if _tag != ospf_rib_json[st_rt]["tag"]:
+ errormsg = (
+ "[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".format(
+ dut,
+ _tag,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ if metric is not None:
+ if "metricCostE2" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
+ return errormsg
+
+ if metric != ospf_rib_json[st_rt]["metricCostE2"]:
+ errormsg = (
+ "[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ metric,
+ st_rt,
+ )
+ )
+ return errormsg
+
+ else:
+ missing_routes.append(st_rt)
+
+ if nh_found:
+ logger.info(
+ "[DUT: {}]: Found next_hop {} for all OSPF"
+ " routes in RIB".format(router, next_hop)
+ )
+
+ if len(missing_routes) > 0:
+ errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
+ dut, missing_routes
+ )
+ return errormsg
+
+ if found_routes:
+ logger.info(
+ "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
+ result = True
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=6)
+def verify_ospf6_interface(tgen, topo=None, dut=None, lan=False, input_dict=None):
+ """
+ This API is to verify ospf routes by running
+ show ip ospf interface command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * `dut`: device under test
+ * `lan`: if set to true this interface belongs to LAN.
+ * `input_dict` : Input dict data, required when configuring from testcase
+
+ Usage
+ -----
+ input_dict= {
+ 'r0': {
+ 'links':{
+ 's1': {
+ 'ospf6':{
+ 'priority':98,
+ 'timerDeadSecs': 4,
+ 'area': '0.0.0.3',
+ 'mcastMemberOspfDesignatedRouters': True,
+ 'mcastMemberOspfAllRouters': True,
+ 'ospfEnabled': True,
+
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ logger.debug("Entering lib API: verify_ospf6_interface")
+ result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ for router, rnode in tgen.routers().items():
+ if "ospf6" not in topo["routers"][router]:
+ continue
+
+ if dut is not None and dut != router:
+ continue
+
+ logger.info("Verifying OSPF interface on router %s:", router)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf interface json", isjson=True
+ )
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF6 is not running"
+ return errormsg
+
+ # To find neighbor ip type
+ ospf_intf_data = input_dict[router]["links"]
+ for ospf_intf, intf_data in ospf_intf_data.items():
+ intf = topo["routers"][router]["links"][ospf_intf]["interface"]
+ if intf in show_ospf_json:
+ for intf_attribute in intf_data["ospf6"]:
+ if intf_data["ospf6"][intf_attribute] is not list:
+ if (
+ intf_data["ospf6"][intf_attribute]
+ == show_ospf_json[intf][intf_attribute]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF6 interface %s: %s is %s",
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ )
+ elif intf_data["ospf6"][intf_attribute] is list:
+ for addr_list in len(show_ospf_json[intf][intf_attribute]):
+ if (
+ show_ospf_json[intf][intf_attribute][addr_list][
+ "address"
+ ].split("/")[0]
+ == intf_data["ospf6"]["internetAddress"][0]["address"]
+ ):
+ break
+ else:
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
+ return errormsg
+ else:
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
+ return errormsg
+ result = True
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=20)
+def verify_ospf6_database(tgen, topo, dut, input_dict):
+ """
+ This API is to verify ospf lsa's by running
+ show ip ospf database command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `topo` : next to be verified
+
+ Usage
+ -----
+ input_dict = {
+ "areas": {
+ "0.0.0.0": {
+ "routerLinkStates": {
+ "100.1.1.0-100.1.1.0": {
+ "LSID": "100.1.1.0",
+ "Advertised router": "100.1.1.0",
+ "LSA Age": 130,
+ "Sequence Number": "80000006",
+ "Checksum": "a703",
+ "Router links": 3
+ }
+ },
+ "networkLinkStates": {
+ "10.0.0.2-100.1.1.1": {
+ "LSID": "10.0.0.2",
+ "Advertised router": "100.1.1.1",
+ "LSA Age": 137,
+ "Sequence Number": "80000001",
+ "Checksum": "9583"
+ }
+ },
+ },
+ }
+ }
+ result = verify_ospf_database(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ result = False
+ router = dut
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("Verifying OSPF interface on router %s:", dut)
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # for inter and inter lsa's
+ ospf_db_data = input_dict.setdefault("areas", None)
+ ospf_external_lsa = input_dict.setdefault("asExternalLinkStates", None)
+
+ if ospf_db_data:
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if ospf_area in show_ospf_json["areas"]:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ for rtrlsa in show_ospf_json["areas"][ospf_area][
+ "routerLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == rtrlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == rtrlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Router LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "networkLinkStates" in area_lsa:
+ for lsa in area_lsa["networkLinkStates"]:
+ for netlsa in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]
+ ):
+ if (
+ lsa["lsaId"] == netlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == netlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Network LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "summaryLinkStates" in area_lsa:
+ for lsa in area_lsa["summaryLinkStates"]:
+ for t3lsa in show_ospf_json["areas"][ospf_area][
+ "summaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t3lsa["lsaId"]
+ and lsa["advertisedRouter"] == t3lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "nssaExternalLinkStates" in area_lsa:
+ for lsa in area_lsa["nssaExternalLinkStates"]:
+ for t7lsa in show_ospf_json["areas"][ospf_area][
+ "nssaExternalLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t7lsa["lsaId"]
+ and lsa["advertisedRouter"] == t7lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Type7 LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "asbrSummaryLinkStates" in area_lsa:
+ for lsa in area_lsa["asbrSummaryLinkStates"]:
+ for t4lsa in show_ospf_json["areas"][ospf_area][
+ "asbrSummaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t4lsa["lsaId"]
+ and lsa["advertisedRouter"] == t4lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "linkLocalOpaqueLsa" in area_lsa:
+ for lsa in area_lsa["linkLocalOpaqueLsa"]:
+ try:
+ for lnklsa in show_ospf_json["areas"][ospf_area][
+ "linkLocalOpaqueLsa"
+ ]:
+ if (
+ lsa["lsaId"] in lnklsa["lsaId"]
+ and "linkLocalOpaqueLsa"
+ in show_ospf_json["areas"][ospf_area]
+ ):
+ logger.info(
+ (
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s",
+ ospf_area,
+ lsa,
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present"
+ return errormsg
+
+ if ospf_external_lsa:
+ for lsa in ospf_external_lsa:
+ try:
+ for t5lsa in show_ospf_json["asExternalLinkStates"]:
+ if (
+ lsa["lsaId"] == t5lsa["lsaId"]
+ and lsa["advertisedRouter"] == t5lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ except KeyError:
+ result = False
+ if result:
+ logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, lsa)
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB : expected"
+ " External LSA is {}".format(router, lsa)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def config_ospf6_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
+ """
+ API to configure ospf on router.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+ * `load_config` : Loading the config to router this is set as True.
+
+ Usage
+ -----
+ r1_ospf_auth = {
+ "r1": {
+ "links": {
+ "r2": {
+ "ospf": {
+ "authentication": 'message-digest',
+ "authentication-key": "ospf",
+ "message-digest-key": "10"
+ }
+ }
+ }
+ }
+ }
+ result = config_ospf6_interface(tgen, topo, r1_ospf_auth)
+
+ Returns
+ -------
+ True or False
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if topo is None:
+ topo = tgen.json_topo
+
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ config_data = []
+ for lnk in input_dict[router]["links"].keys():
+ if "ospf6" not in input_dict[router]["links"][lnk]:
+ logger.debug(
+ "Router %s: ospf6 config is not present in"
+ "input_dict, passed input_dict %s",
+ router,
+ str(input_dict),
+ )
+ continue
+ ospf_data = input_dict[router]["links"][lnk]["ospf6"]
+ data_ospf_area = ospf_data.setdefault("area", None)
+ data_ospf_auth = ospf_data.setdefault("hash-algo", None)
+ data_ospf_keychain = ospf_data.setdefault("keychain", None)
+ data_ospf_dr_priority = ospf_data.setdefault("priority", None)
+ data_ospf_cost = ospf_data.setdefault("cost", None)
+ data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None)
+
+ try:
+ intf = topo["routers"][router]["links"][lnk]["interface"]
+ except KeyError:
+ intf = topo["switches"][router]["links"][lnk]["interface"]
+
+ # interface
+ cmd = "interface {}".format(intf)
+
+ config_data.append(cmd)
+ # interface area config
+ if data_ospf_area:
+ cmd = "ipv6 ospf area {}".format(data_ospf_area)
+ config_data.append(cmd)
+
+ # interface ospf auth
+ if data_ospf_auth:
+ cmd = "ipv6 ospf6 authentication"
+
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+
+ if "hash-algo" in ospf_data:
+ cmd = "{} key-id {} hash-algo {} key {}".format(
+ cmd,
+ ospf_data["key-id"],
+ ospf_data["hash-algo"],
+ ospf_data["key"],
+ )
+ config_data.append(cmd)
+
+ # interface ospf auth with keychain
+ if data_ospf_keychain:
+ cmd = "ipv6 ospf6 authentication"
+
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+
+ if "keychain" in ospf_data:
+ cmd = "{} keychain {}".format(cmd, ospf_data["keychain"])
+ config_data.append(cmd)
+
+ # interface ospf dr priority
+ if data_ospf_dr_priority:
+ cmd = "ipv6 ospf priority {}".format(ospf_data["priority"])
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf cost
+ if data_ospf_cost:
+ cmd = "ipv6 ospf cost {}".format(ospf_data["cost"])
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # interface ospf mtu
+ if data_ospf_mtu:
+ cmd = "ipv6 ospf mtu-ignore"
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if build:
+ return config_data
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=20)
+def verify_ospf_gr_helper(tgen, topo, dut, input_dict=None):
+ """
+ This API is used to vreify gr helper using command
+ show ip ospf graceful-restart helper
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * 'dut' : router
+ * 'input_dict' - values to be verified
+
+ Usage:
+ -------
+ input_dict = {
+ "helperSupport":"Disabled",
+ "strictLsaCheck":"Enabled",
+ "restartSupport":"Planned and Unplanned Restarts",
+ "supportedGracePeriod":1800
+ }
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+ logger.info("Verifying OSPF GR details on router %s:", dut)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf graceful-restart helper json", isjson=True
+ )
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ raise ValueError(errormsg)
+ return errormsg
+
+ for ospf_gr, gr_data in input_dict.items():
+ try:
+ if input_dict[ospf_gr] == show_ospf_json[ospf_gr]:
+ logger.info(
+ "[DUT: FRR] OSPF GR Helper: %s is %s",
+ ospf_gr,
+ show_ospf_json[ospf_gr],
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF GR Helper: {} expected is {}, Found "
+ "is {}".format(
+ ospf_gr, input_dict[ospf_gr], show_ospf_json[ospf_gr]
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+
+ except KeyError:
+ errormsg = "[DUT: FRR] OSPF GR Helper: {}".format(ospf_gr)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def get_ospf_database(tgen, topo, dut, input_dict, vrf=None, lsatype=None, rid=None):
+ """
+ This API is to return ospf lsa's by running
+ show ip ospf database command.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `dut`: device under test
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `topo` : next to be verified
+ * `vrf` : vrf to be checked
+ * `lsatype` : type of lsa to be checked
+ * `rid` : router id for lsa to be checked
+ Usage
+ -----
+ input_dict = {
+ "areas": {
+ "0.0.0.0": {
+ "routerLinkStates": {
+ "100.1.1.0-100.1.1.0": {
+ "LSID": "100.1.1.0",
+ "Advertised router": "100.1.1.0",
+ "LSA Age": 130,
+ "Sequence Number": "80000006",
+ "Checksum": "a703",
+ "Router links": 3
+ }
+ },
+ "networkLinkStates": {
+ "10.0.0.2-100.1.1.1": {
+ "LSID": "10.0.0.2",
+ "Advertised router": "100.1.1.1",
+ "LSA Age": 137,
+ "Sequence Number": "80000001",
+ "Checksum": "9583"
+ }
+ },
+ },
+ }
+ }
+ result = get_ospf_database(tgen, topo, dut, input_dict)
+
+ Returns
+ -------
+ True or False (Error Message)
+ """
+
+ result = False
+ router = dut
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ sleep(10)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("Verifying OSPF interface on router %s:", dut)
+ if not rid:
+ rid = "self-originate"
+ if lsatype:
+ if vrf is None:
+ command = "show ip ospf database {} {} json".format(lsatype, rid)
+ else:
+ command = "show ip ospf database {} {} vrf {} json".format(
+ lsatype, rid, vrf
+ )
+ else:
+ if vrf is None:
+ command = "show ip ospf database json"
+ else:
+ command = "show ip ospf database vrf {} json".format(vrf)
+
+ show_ospf_json = run_frr_cmd(rnode, command, isjson=True)
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ return errormsg
+
+ # for inter and inter lsa's
+ ospf_db_data = input_dict.setdefault("areas", None)
+ ospf_external_lsa = input_dict.setdefault("asExternalLinkStates", None)
+
+ if ospf_db_data:
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if "areas" in show_ospf_json and ospf_area in show_ospf_json["areas"]:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ for rtrlsa in show_ospf_json["areas"][ospf_area][
+ "routerLinkStates"
+ ]:
+ _advrtr = lsa.setdefault("advertisedRouter", None)
+ _options = lsa.setdefault("options", None)
+ if (
+ _advrtr
+ and lsa["lsaId"] == rtrlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == rtrlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if (
+ _options
+ and lsa["lsaId"] == rtrlsa["lsaId"]
+ and lsa["options"] == rtrlsa["options"]
+ ):
+ result = True
+ break
+
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Router LSA is {}\n found Router LSA: {}".format(
+ router, ospf_area, lsa, rtrlsa
+ )
+ )
+ return errormsg
+
+ if "networkLinkStates" in area_lsa:
+ for lsa in area_lsa["networkLinkStates"]:
+ for netlsa in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]
+ ):
+ if (
+ lsa["lsaId"] == netlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == netlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Network LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "summaryLinkStates" in area_lsa:
+ for lsa in area_lsa["summaryLinkStates"]:
+ for t3lsa in show_ospf_json["areas"][ospf_area][
+ "summaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t3lsa["lsaId"]
+ and lsa["advertisedRouter"] == t3lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "nssaExternalLinkStates" in area_lsa:
+ for lsa in area_lsa["nssaExternalLinkStates"]:
+ for t7lsa in show_ospf_json["areas"][ospf_area][
+ "nssaExternalLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t7lsa["lsaId"]
+ and lsa["advertisedRouter"] == t7lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Type7 LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "asbrSummaryLinkStates" in area_lsa:
+ for lsa in area_lsa["asbrSummaryLinkStates"]:
+ for t4lsa in show_ospf_json["areas"][ospf_area][
+ "asbrSummaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t4lsa["lsaId"]
+ and lsa["advertisedRouter"] == t4lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "linkLocalOpaqueLsa" in area_lsa:
+ for lsa in area_lsa["linkLocalOpaqueLsa"]:
+ try:
+ for lnklsa in show_ospf_json["areas"][ospf_area][
+ "linkLocalOpaqueLsa"
+ ]:
+ if (
+ lsa["lsaId"] in lnklsa["lsaId"]
+ and "linkLocalOpaqueLsa"
+ in show_ospf_json["areas"][ospf_area]
+ ):
+ logger.info(
+ (
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s",
+ ospf_area,
+ lsa,
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present"
+ return errormsg
+ else:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ for rtrlsa in show_ospf_json["routerLinkStates"]:
+ _advrtr = lsa.setdefault("advertisedRouter", None)
+ _options = lsa.setdefault("options", None)
+ _age = lsa.setdefault("lsaAge", None)
+ if (
+ _options
+ and lsa["options"]
+ == show_ospf_json["routerLinkStates"][rtrlsa][
+ ospf_area
+ ][0]["options"]
+ ):
+ result = True
+ break
+ if (
+ _age != "get"
+ and lsa["lsaAge"]
+ == show_ospf_json["routerLinkStates"][rtrlsa][
+ ospf_area
+ ][0]["lsaAge"]
+ ):
+ result = True
+ break
+
+ if _age == "get":
+ return "{}".format(
+ show_ospf_json["routerLinkStates"][rtrlsa][
+ ospf_area
+ ][0]["lsaAge"]
+ )
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Router LSA is {}\n found Router LSA: {}".format(
+ router,
+ ospf_area,
+ lsa,
+ show_ospf_json["routerLinkStates"],
+ )
+ )
+ return errormsg
+
+ if "networkLinkStates" in area_lsa:
+ for lsa in area_lsa["networkLinkStates"]:
+ for netlsa in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]
+ ):
+ if (
+ lsa["lsaId"] == netlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == netlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Network LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "summaryLinkStates" in area_lsa:
+ for lsa in area_lsa["summaryLinkStates"]:
+ for t3lsa in show_ospf_json["areas"][ospf_area][
+ "summaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t3lsa["lsaId"]
+ and lsa["advertisedRouter"] == t3lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "nssaExternalLinkStates" in area_lsa:
+ for lsa in area_lsa["nssaExternalLinkStates"]:
+ for t7lsa in show_ospf_json["areas"][ospf_area][
+ "nssaExternalLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t7lsa["lsaId"]
+ and lsa["advertisedRouter"] == t7lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " Type7 LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "asbrSummaryLinkStates" in area_lsa:
+ for lsa in area_lsa["asbrSummaryLinkStates"]:
+ for t4lsa in show_ospf_json["areas"][ospf_area][
+ "asbrSummaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t4lsa["lsaId"]
+ and lsa["advertisedRouter"] == t4lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
+
+ if "linkLocalOpaqueLsa" in area_lsa:
+ for lsa in area_lsa["linkLocalOpaqueLsa"]:
+ try:
+ for lnklsa in show_ospf_json["areas"][ospf_area][
+ "linkLocalOpaqueLsa"
+ ]:
+ if (
+ lsa["lsaId"] in lnklsa["lsaId"]
+ and "linkLocalOpaqueLsa"
+ in show_ospf_json["areas"][ospf_area]
+ ):
+ logger.info(
+ (
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s",
+ ospf_area,
+ lsa,
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present"
+ return errormsg
+
+ if ospf_external_lsa:
+ for lsa in ospf_external_lsa:
+ try:
+ for t5lsa in show_ospf_json["asExternalLinkStates"]:
+ if (
+ lsa["lsaId"] == t5lsa["lsaId"]
+ and lsa["advertisedRouter"] == t5lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ except KeyError:
+ result = False
+ if result:
+ logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, lsa)
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB : expected"
+ " External LSA is {}".format(router, lsa)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
new file mode 100644
index 0000000..f7440ef
--- /dev/null
+++ b/tests/topotests/lib/pim.py
@@ -0,0 +1,5203 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+
+import datetime
+import functools
+import os
+import re
+import sys
+import traceback
+from copy import deepcopy
+from time import sleep
+
+# Import common_config to use commomnly used APIs
+from lib.common_config import (
+ HostApplicationHelper,
+ InvalidCLIError,
+ create_common_configuration,
+ create_common_configurations,
+ get_frr_ipv6_linklocal,
+ retry,
+ run_frr_cmd,
+ validate_ip_address,
+)
+from lib.micronet import get_exec_path
+from lib.topolog import logger
+from lib.topotest import frr_unicode
+
+from lib import topotest
+
+####
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+
+def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True):
+ """
+ API to configure pim/pim6 on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from
+ testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "pim": {
+ "join-prune-interval": "5",
+ "rp": [{
+ "rp_addr" : "1.0.3.17".
+ "keep-alive-timer": "100"
+ "group_addr_range": ["224.1.1.0/24", "225.1.1.0/24"]
+ "prefix-list": "pf_list_1"
+ "delete": True
+ }]
+ },
+ "pim6": {
+ "disable" : ["l1-i1-eth1"],
+ "rp": [{
+ "rp_addr" : "2001:db8:f::5:17".
+ "keep-alive-timer": "100"
+ "group_addr_range": ["FF00::/8"]
+ "prefix-list": "pf_list_1"
+ "delete": True
+ }]
+ }
+ }
+ }
+
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ config_data = _enable_disable_pim_config(tgen, topo, input_dict, router, build)
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ # Now add RP config to all routers
+ for router in input_dict.keys():
+ if "pim" in input_dict[router] or "pim6" in input_dict[router]:
+ _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "pim", build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_pim_config", exc_info=True)
+ result = False
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
+ """
+ Helper API to create pim RP configurations.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+ * `config_data_dict` : OUT: adds `router` config to dictinary
+ Returns
+ -------
+ None
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ rp_data = []
+
+ # PIMv4
+ pim_data = None
+ if "pim" in input_dict[router]:
+ pim_data = input_dict[router]["pim"]
+ if "rp" in input_dict[router]["pim"]:
+ rp_data += pim_data["rp"]
+
+ # pim6
+ pim6_data = None
+ if "pim6" in input_dict[router]:
+ pim6_data = input_dict[router]["pim6"]
+ if "rp" in input_dict[router]["pim6"]:
+ rp_data += pim6_data["rp"]
+
+ # Configure this RP on every router.
+ for dut in tgen.routers():
+ # At least one interface must be enabled for PIM on the router
+ pim_if_enabled = False
+ pim6_if_enabled = False
+ for destLink, data in topo[dut]["links"].items():
+ if "pim" in data:
+ pim_if_enabled = True
+ if "pim6" in data:
+ pim6_if_enabled = True
+ if not pim_if_enabled and pim_data:
+ continue
+ if not pim6_if_enabled and pim6_data:
+ continue
+
+ config_data = []
+
+ if rp_data:
+ for rp_dict in deepcopy(rp_data):
+ # ip address of RP
+ if "rp_addr" not in rp_dict and build:
+ logger.error(
+ "Router %s: 'ip address of RP' not "
+ "present in input_dict/JSON",
+ router,
+ )
+
+ return False
+ rp_addr = rp_dict.setdefault("rp_addr", None)
+ if rp_addr:
+ addr_type = validate_ip_address(rp_addr)
+ # Keep alive Timer
+ keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None)
+
+ # Group Address range to cover
+ if "group_addr_range" not in rp_dict and build:
+ logger.error(
+ "Router %s:'Group Address range to cover'"
+ " not present in input_dict/JSON",
+ router,
+ )
+
+ return False
+ group_addr_range = rp_dict.setdefault("group_addr_range", None)
+
+ # Group prefix-list filter
+ prefix_list = rp_dict.setdefault("prefix_list", None)
+
+ # Delete rp config
+ del_action = rp_dict.setdefault("delete", False)
+
+ if keep_alive_timer:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp keep-alive-timer {}".format(keep_alive_timer)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if rp_addr:
+ if group_addr_range:
+ if type(group_addr_range) is not list:
+ group_addr_range = [group_addr_range]
+
+ for grp_addr in group_addr_range:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp {} {}".format(rp_addr, grp_addr)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp {} {}".format(rp_addr, grp_addr)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if prefix_list:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp {} prefix-list {}".format(
+ rp_addr, prefix_list
+ )
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp {} prefix-list {}".format(
+ rp_addr, prefix_list
+ )
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if config_data:
+ if dut not in config_data_dict:
+ config_data_dict[dut] = config_data
+ else:
+ config_data_dict[dut].extend(config_data)
+
+
+def create_igmp_config(tgen, topo, input_dict=None, build=False):
+ """
+ API to configure igmp on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from
+ testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ "r1-r0-eth0" :{
+ "igmp":{
+ "version": "2",
+ "delete": True
+ "query": {
+ "query-interval" : 100,
+ "query-max-response-time": 200
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
+ for router in input_dict.keys():
+ if "igmp" not in input_dict[router]:
+ logger.debug("Router %s: 'igmp' is not present in " "input_dict", router)
+ continue
+
+ igmp_data = input_dict[router]["igmp"]
+
+ if "interfaces" in igmp_data:
+ config_data = []
+ intf_data = igmp_data["interfaces"]
+
+ for intf_name in intf_data.keys():
+ cmd = "interface {}".format(intf_name)
+ config_data.append(cmd)
+ protocol = "igmp"
+ del_action = intf_data[intf_name]["igmp"].setdefault("delete", False)
+ del_attr = intf_data[intf_name]["igmp"].setdefault("delete_attr", False)
+ cmd = "ip igmp"
+ if del_action:
+ cmd = "no {}".format(cmd)
+ if not del_attr:
+ config_data.append(cmd)
+
+ for attribute, data in intf_data[intf_name]["igmp"].items():
+ if attribute == "version":
+ cmd = "ip {} {} {}".format(protocol, attribute, data)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ if not del_attr:
+ config_data.append(cmd)
+
+ if attribute == "join":
+ for group in data:
+ cmd = "ip {} {} {}".format(protocol, attribute, group)
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if attribute == "query":
+ for query, value in data.items():
+ if query != "delete":
+ cmd = "ip {} {} {}".format(protocol, query, value)
+
+ if "delete" in intf_data[intf_name][protocol]["query"]:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ if config_data:
+ config_data_dict[router] = config_data
+
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+ except InvalidCLIError:
+ logger.error("create_igmp_config", exc_info=True)
+ result = False
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def create_mld_config(tgen, topo, input_dict=None, build=False):
+ """
+ API to configure mld for pim6 on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from
+ testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "mld": {
+ "interfaces": {
+ "r1-r0-eth0" :{
+ "mld":{
+ "version": "2",
+ "delete": True
+ "query": {
+ "query-interval" : 100,
+ "query-max-response-time": 200
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+ for router in input_dict.keys():
+ if "mld" not in input_dict[router]:
+ logger.debug("Router %s: 'mld' is not present in " "input_dict", router)
+ continue
+
+ mld_data = input_dict[router]["mld"]
+
+ if "interfaces" in mld_data:
+ config_data = []
+ intf_data = mld_data["interfaces"]
+
+ for intf_name in intf_data.keys():
+ cmd = "interface {}".format(intf_name)
+ config_data.append(cmd)
+ protocol = "mld"
+ del_action = intf_data[intf_name]["mld"].setdefault("delete", False)
+ cmd = "ipv6 mld"
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ del_attr = intf_data[intf_name]["mld"].setdefault("delete_attr", False)
+ join = intf_data[intf_name]["mld"].setdefault("join", None)
+ source = intf_data[intf_name]["mld"].setdefault("source", None)
+ version = intf_data[intf_name]["mld"].setdefault("version", False)
+ query = intf_data[intf_name]["mld"].setdefault("query", {})
+
+ if version:
+ cmd = "ipv6 {} version {}".format(protocol, version)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if source and join:
+ for group in join:
+ cmd = "ipv6 {} join {} {}".format(protocol, group, source)
+
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ elif join:
+ for group in join:
+ cmd = "ipv6 {} join {}".format(protocol, group)
+
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if query:
+ for _query, value in query.items():
+ if _query != "delete":
+ cmd = "ipv6 {} {} {}".format(protocol, _query, value)
+
+ if "delete" in intf_data[intf_name][protocol]["query"]:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ try:
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=build
+ )
+ except InvalidCLIError:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
+ """
+ Helper API to enable or disable pim on interfaces
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `router` : router id to be configured.
+ * `build` : Only for initial setup phase this is set as True.
+
+ Returns
+ -------
+ list of config
+ """
+
+ config_data = []
+
+ # Enable pim/pim6 on interfaces
+ for destRouterLink, data in sorted(topo[router]["links"].items()):
+ if "pim" in data and data["pim"] == "enable":
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ cmd = "interface {}".format(interface_name)
+ config_data.append(cmd)
+ config_data.append("ip pim")
+
+ if "pim" in input_dict[router]:
+ if "disable" in input_dict[router]["pim"]:
+ enable_flag = False
+ interfaces = input_dict[router]["pim"]["disable"]
+
+ if type(interfaces) is not list:
+ interfaces = [interfaces]
+
+ for interface in interfaces:
+ cmd = "interface {}".format(interface)
+ config_data.append(cmd)
+ config_data.append("no ip pim")
+
+ if "pim6" in data and data["pim6"] == "enable":
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ cmd = "interface {}".format(interface_name)
+ config_data.append(cmd)
+ config_data.append("ipv6 pim")
+
+ if "pim6" in input_dict[router]:
+ if "disable" in input_dict[router]["pim6"]:
+ enable_flag = False
+ interfaces = input_dict[router]["pim6"]["disable"]
+
+ if type(interfaces) is not list:
+ interfaces = [interfaces]
+
+ for interface in interfaces:
+ cmd = "interface {}".format(interface)
+ config_data.append(cmd)
+ config_data.append("no ipv6 pim")
+
+ # pim global config
+ if "pim" in input_dict[router]:
+ pim_data = input_dict[router]["pim"]
+ del_action = pim_data.setdefault("delete", False)
+ for t in [
+ "join-prune-interval",
+ "keep-alive-timer",
+ "register-suppress-time",
+ ]:
+ if t in pim_data:
+ cmd = "ip pim {} {}".format(t, pim_data[t])
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ # pim6 global config
+ if "pim6" in input_dict[router]:
+ pim6_data = input_dict[router]["pim6"]
+ del_action = pim6_data.setdefault("delete", False)
+ for t in [
+ "join-prune-interval",
+ "keep-alive-timer",
+ "register-suppress-time",
+ ]:
+ if t in pim6_data:
+ cmd = "ipv6 pim {} {}".format(t, pim6_data[t])
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ return config_data
+
+
+def find_rp_details(tgen, topo):
+ """
+ Find who is RP in topology and returns list of RPs
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ rp_details = {}
+
+ router_list = tgen.routers()
+ topo_data = topo["routers"]
+
+ for router in router_list.keys():
+ if "pim" not in topo_data[router]:
+ continue
+
+ pim_data = topo_data[router]["pim"]
+ if "rp" in pim_data:
+ rp_data = pim_data["rp"]
+ for rp_dict in rp_data:
+ # ip address of RP
+ rp_addr = rp_dict["rp_addr"]
+
+ for link, data in topo["routers"][router]["links"].items():
+ if data["ipv4"].split("/")[0] == rp_addr:
+ rp_details[router] = rp_addr
+
+ return rp_details
+
+
+def configure_pim_force_expire(tgen, topo, input_dict, build=False):
+ """
+ Helper API to create pim configuration.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "pim": {
+ "force_expire":{
+ "10.0.10.1": ["255.1.1.1"]
+ }
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+
+ Returns
+ -------
+ True or False
+ """
+
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ config_data_dict = {}
+
+ for dut in input_dict.keys():
+ if "pim" not in input_dict[dut]:
+ continue
+
+ pim_data = input_dict[dut]["pim"]
+
+ config_data = []
+ if "force_expire" in pim_data:
+ force_expire_data = pim_data["force_expire"]
+
+ for source, groups in force_expire_data.items():
+ if type(groups) is not list:
+ groups = [groups]
+
+ for group in groups:
+ cmd = "ip pim force-expire source {} group {}".format(
+ source, group
+ )
+ config_data.append(cmd)
+
+ if config_data:
+ config_data_dict[dut] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "pim", build=build
+ )
+ except InvalidCLIError:
+ logger.error("configure_pim_force_expire", exc_info=True)
+ result = False
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+#############################################
+# Verification APIs
+#############################################
+@retry(retry_timeout=12)
+def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True):
+ """
+ Verify all PIM neighbors are up and running, config is verified
+ using "show ip pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : dut info
+ * `iface` : link for which PIM nbr need to check
+ * `nbr_ip` : neighbor ip of interface
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if dut is not None and dut != router:
+ continue
+
+ rnode = tgen.routers()[router]
+ show_ip_pim_neighbor_json = rnode.vtysh_cmd(
+ "show ip pim neighbor json", isjson=True
+ )
+
+ for destLink, data in topo["routers"][router]["links"].items():
+ if iface is not None and iface != data["interface"]:
+ continue
+
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" not in data:
+ continue
+
+ if "pim" in data and data["pim"] == "disable":
+ continue
+
+ if "pim" in data and data["pim"] == "enable":
+ local_interface = data["interface"]
+
+ if "-" in destLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destLink.split("-")
+
+ # destRouter
+ destLink = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, router)
+ curRouter = "-".join(tempList)
+ else:
+ curRouter = router
+ if destLink not in topo["routers"]:
+ continue
+ data = topo["routers"][destLink]["links"][curRouter]
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" not in data:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM neighbor status:", router)
+
+ if "pim" in data and data["pim"] == "enable":
+ pim_nh_intf_ip = data["ipv4"].split("/")[0]
+
+ # Verifying PIM neighbor
+ if local_interface in show_ip_pim_neighbor_json:
+ if show_ip_pim_neighbor_json[local_interface]:
+ if (
+ show_ip_pim_neighbor_json[local_interface][pim_nh_intf_ip][
+ "neighbor"
+ ]
+ != pim_nh_intf_ip
+ ):
+ errormsg = (
+ "[DUT %s]: Local interface: %s, PIM"
+ " neighbor check failed "
+ "Expected neighbor: %s, Found neighbor:"
+ " %s"
+ % (
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ show_ip_pim_neighbor_json[local_interface][
+ pim_nh_intf_ip
+ ]["neighbor"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Local interface: %s, Found"
+ " expected PIM neighbor %s",
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, and"
+ "interface ip: %s is not found in "
+ "PIM neighbor " % (router, local_interface, pim_nh_intf_ip)
+ )
+ return errormsg
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, is not "
+ "present in PIM neighbor " % (router, local_interface)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=12)
+def verify_pim6_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True):
+ """
+ Verify all pim6 neighbors are up and running, config is verified
+ using "show ipv6 pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : dut info
+ * `iface` : link for which PIM nbr need to check
+ * `nbr_ip` : neighbor ip of interface
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim6_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if dut is not None and dut != router:
+ continue
+
+ rnode = tgen.routers()[router]
+ show_ip_pim_neighbor_json = rnode.vtysh_cmd(
+ "show ipv6 pim neighbor json", isjson=True
+ )
+
+ for destLink, data in topo["routers"][router]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if iface is not None and iface != data["interface"]:
+ continue
+
+ if "pim6" not in data:
+ continue
+
+ if "pim6" in data and data["pim6"] == "disable":
+ continue
+
+ if "pim6" in data and data["pim6"] == "enable":
+ local_interface = data["interface"]
+
+ if "-" in destLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destLink.split("-")
+
+ # destRouter
+ destLink = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, router)
+ curRouter = "-".join(tempList)
+ else:
+ curRouter = router
+ if destLink not in topo["routers"]:
+ continue
+ data = topo["routers"][destLink]["links"][curRouter]
+ peer_interface = data["interface"]
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim6" not in data:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM neighbor status:", router)
+
+ if "pim6" in data and data["pim6"] == "enable":
+ pim_nh_intf_ip = get_frr_ipv6_linklocal(tgen, destLink, peer_interface)
+
+ # Verifying PIM neighbor
+ if local_interface in show_ip_pim_neighbor_json:
+ if show_ip_pim_neighbor_json[local_interface]:
+ if (
+ show_ip_pim_neighbor_json[local_interface][pim_nh_intf_ip][
+ "neighbor"
+ ]
+ != pim_nh_intf_ip
+ ):
+ errormsg = (
+ "[DUT %s]: Local interface: %s, PIM6"
+ " neighbor check failed "
+ "Expected neighbor: %s, Found neighbor:"
+ " %s"
+ % (
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ show_ip_pim_neighbor_json[local_interface][
+ pim_nh_intf_ip
+ ]["neighbor"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Local interface: %s, Found"
+ " expected PIM6 neighbor %s",
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, and"
+ "interface ip: %s is not found in "
+ "PIM6 neighbor " % (router, local_interface, pim_nh_intf_ip)
+ )
+ return errormsg
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, is not "
+ "present in PIM6 neighbor " % (router, local_interface)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
+ """
+ Verify IGMP groups are received from an intended interface
+ by running "show ip igmp groups" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups would be received
+ * `group_addresses`: IGMP group address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_igmp_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying IGMP groups received:", dut)
+ show_ip_igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface in show_ip_igmp_json:
+ show_ip_igmp_json = show_ip_igmp_json[interface]["groups"]
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying IGMP group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ found = False
+ for grp_addr in group_addresses:
+ for index in show_ip_igmp_json:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if found is not True:
+ errormsg = (
+ "[DUT %s]: Verifying IGMP group received"
+ " from interface %s [FAILED]!! "
+ " Expected not found: %s" % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying IGMP group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=2)
+def verify_upstream_iif(
+ tgen,
+ dut,
+ iif,
+ src_address,
+ group_addresses,
+ joinState=None,
+ regState=None,
+ refCount=1,
+ addr_type="ipv4",
+ expected=True,
+):
+ """
+ Verify upstream inbound interface is updated correctly
+ by running "show ip pim upstream" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `iif`: inbound interface
+ * `src_address`: source address
+ * `group_addresses`: IGMP group address
+ * `joinState`: upstream join state
+ * `refCount`: refCount value
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ iif = "r1-r0-eth0"
+ src_address = "*"
+ group_address = "225.1.1.1"
+ result = verify_upstream_iif(tgen, dut, iif, src_address, group_address,
+ state, refCount)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info(
+ "[DUT: %s]: Verifying upstream Inbound Interface"
+ " for IGMP/MLD groups received:",
+ dut,
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if type(iif) is not list:
+ iif = [iif]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} pim upstream json".format(ip_cmd)
+ show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ for grp_addr in group_addresses:
+ # Verify group address
+ if grp_addr not in show_ip_pim_upstream_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % (
+ dut,
+ grp_addr,
+ )
+ return errormsg
+ group_addr_json = show_ip_pim_upstream_json[grp_addr]
+
+ # Verify source address
+ if src_address not in group_addr_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+
+ # Verify Inbound Interface
+ found = False
+ for in_interface in iif:
+ if group_addr_json[src_address]["inboundInterface"] == in_interface:
+ if refCount > 0:
+ logger.info(
+ "[DUT %s]: Verifying refCount "
+ "for (%s,%s) [PASSED]!! "
+ " Found Expected: %s",
+ dut,
+ src_address,
+ grp_addr,
+ group_addr_json[src_address]["refCount"],
+ )
+ found = True
+ if found:
+ if joinState is None:
+ if group_addr_json[src_address]["joinState"] != "Joined":
+ errormsg = (
+ "[DUT %s]: Verifying iif "
+ "(Inbound Interface) and joinState "
+ "for (%s, %s), Expected iif: %s, "
+ "Found iif : %s, and Expected "
+ "joinState :%s , Found joinState: %s"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ in_interface,
+ group_addr_json[src_address]["inboundInterface"],
+ "Joined",
+ group_addr_json[src_address]["joinState"],
+ )
+ )
+ return errormsg
+
+ elif group_addr_json[src_address]["joinState"] != joinState:
+ errormsg = (
+ "[DUT %s]: Verifying iif "
+ "(Inbound Interface) and joinState "
+ "for (%s, %s), Expected iif: %s, "
+ "Found iif : %s, and Expected "
+ "joinState :%s , Found joinState: %s"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ in_interface,
+ group_addr_json[src_address]["inboundInterface"],
+ joinState,
+ group_addr_json[src_address]["joinState"],
+ )
+ )
+ return errormsg
+
+ if regState:
+ if group_addr_json[src_address]["regState"] != regState:
+ errormsg = (
+ "[DUT %s]: Verifying iif "
+ "(Inbound Interface) and regState "
+ "for (%s, %s), Expected iif: %s, "
+ "Found iif : %s, and Expected "
+ "regState :%s , Found regState: %s"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ in_interface,
+ group_addr_json[src_address]["inboundInterface"],
+ regState,
+ group_addr_json[src_address]["regState"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying iif(Inbound Interface)"
+ " for (%s,%s) and joinState is %s regstate is %s [PASSED]!! "
+ " Found Expected: (%s)",
+ dut,
+ src_address,
+ grp_addr,
+ group_addr_json[src_address]["joinState"],
+ group_addr_json[src_address]["regState"],
+ group_addr_json[src_address]["inboundInterface"],
+ )
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying iif "
+ "(Inbound Interface) for (%s, %s) "
+ "[FAILED]!! "
+ " Expected: %s, Found: %s"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ in_interface,
+ group_addr_json[src_address]["inboundInterface"],
+ )
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=12)
+def verify_join_state_and_timer(
+ tgen, dut, iif, src_address, group_addresses, addr_type="ipv4", expected=True
+):
+ """
+ Verify join state is updated correctly and join timer is
+ running with the help of "show ip pim upstream" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `iif`: inbound interface
+ * `src_address`: source address
+ * `group_addresses`: IGMP group address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ iif = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_join_state_and_timer(tgen, dut, iif, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ errormsg = ""
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info(
+ "[DUT: %s]: Verifying Join state and Join Timer" " for IGMP groups received:",
+ dut,
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ cmd = "show ip pim upstream json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 pim upstream json"
+ show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ for grp_addr in group_addresses:
+ # Verify group address
+ if grp_addr not in show_ip_pim_upstream_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % (
+ dut,
+ grp_addr,
+ )
+ return errormsg
+
+ group_addr_json = show_ip_pim_upstream_json[grp_addr]
+
+ # Verify source address
+ if src_address not in group_addr_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+
+ # Verify join state
+ joinState = group_addr_json[src_address]["joinState"]
+ if joinState != "Joined":
+ error = (
+ "[DUT %s]: Verifying join state for"
+ " (%s,%s) [FAILED]!! "
+ " Expected: %s, Found: %s"
+ % (dut, src_address, grp_addr, "Joined", joinState)
+ )
+ errormsg = errormsg + "\n" + str(error)
+ else:
+ logger.info(
+ "[DUT %s]: Verifying join state for"
+ " (%s,%s) [PASSED]!! "
+ " Found Expected: %s",
+ dut,
+ src_address,
+ grp_addr,
+ joinState,
+ )
+
+ # Verify join timer
+ joinTimer = group_addr_json[src_address]["joinTimer"]
+ if not re.match(r"(\d{2}):(\d{2}):(\d{2})", joinTimer):
+ error = (
+ "[DUT %s]: Verifying join timer for"
+ " (%s,%s) [FAILED]!! "
+ " Expected: %s, Found: %s"
+ ) % (
+ dut,
+ src_address,
+ grp_addr,
+ "join timer should be running",
+ joinTimer,
+ )
+ errormsg = errormsg + "\n" + str(error)
+ else:
+ logger.info(
+ "[DUT %s]: Verifying join timer is running"
+ " for (%s,%s) [PASSED]!! "
+ " Found Expected: %s",
+ dut,
+ src_address,
+ grp_addr,
+ joinTimer,
+ )
+
+ if errormsg != "":
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=120, diag_pct=0)
+def verify_mroutes(
+ tgen,
+ dut,
+ src_address,
+ group_addresses,
+ iif,
+ oil,
+ return_uptime=False,
+ mwait=0,
+ addr_type="ipv4",
+ expected=True,
+):
+ """
+ Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes
+ by running "show ip/ipv6 mroute" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `src_address`: source address
+ * `group_addresses`: IGMP group address
+ * `iif`: Incoming interface
+ * `oil`: Outgoing interface
+ * `return_uptime`: If True, return uptime dict, default is False
+ * `mwait`: Wait time, default is 0
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ group_address = "225.1.1.1"
+ result = verify_mroutes(tgen, dut, src_address, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ if not isinstance(group_addresses, list):
+ group_addresses = [group_addresses]
+
+ if not isinstance(iif, list) and iif != "none":
+ iif = [iif]
+
+ if not isinstance(oil, list) and oil != "none":
+ oil = [oil]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ if return_uptime:
+ logger.info("Sleeping for %s sec..", mwait)
+ sleep(mwait)
+
+ logger.info("[DUT: %s]: Verifying ip mroutes", dut)
+ show_ip_mroute_json = run_frr_cmd(
+ rnode, "show {} mroute json".format(ip_cmd), isjson=True
+ )
+
+ if return_uptime:
+ uptime_dict = {}
+
+ if bool(show_ip_mroute_json) == False:
+ error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut)
+ return error_msg
+
+ for grp_addr in group_addresses:
+ if grp_addr not in show_ip_mroute_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+ else:
+ if return_uptime:
+ uptime_dict[grp_addr] = {}
+
+ group_addr_json = show_ip_mroute_json[grp_addr]
+
+ if src_address not in group_addr_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+ else:
+ if return_uptime:
+ uptime_dict[grp_addr][src_address] = {}
+
+ mroutes = group_addr_json[src_address]
+
+ if mroutes["installed"] != 0:
+ logger.info(
+ "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr
+ )
+
+ if "oil" not in mroutes:
+ if oil == "none" and mroutes["iif"] in iif:
+ logger.info(
+ "[DUT %s]: Verifying (%s, %s) mroute,"
+ " [PASSED]!! Found Expected: "
+ "(iif: %s, oil: %s, installed: (%s,%s))",
+ dut,
+ src_address,
+ grp_addr,
+ mroutes["iif"],
+ oil,
+ src_address,
+ grp_addr,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying (%s, %s) mroute,"
+ " [FAILED]!! "
+ "Expected: (oil: %s, installed:"
+ " (%s,%s)) Found: ( oil: none, "
+ "installed: (%s,%s))"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ oil,
+ src_address,
+ grp_addr,
+ src_address,
+ grp_addr,
+ )
+ )
+
+ return errormsg
+
+ else:
+ found = False
+ for route, data in mroutes["oil"].items():
+ if route in oil and route != "pimreg":
+ if (
+ data["source"] == src_address
+ and data["group"] == grp_addr
+ and data["inboundInterface"] in iif
+ and data["outboundInterface"] in oil
+ ):
+ if return_uptime:
+ uptime_dict[grp_addr][src_address] = data["upTime"]
+
+ logger.info(
+ "[DUT %s]: Verifying (%s, %s)"
+ " mroute, [PASSED]!! "
+ "Found Expected: "
+ "(iif: %s, oil: %s, installed:"
+ " (%s,%s)",
+ dut,
+ src_address,
+ grp_addr,
+ data["inboundInterface"],
+ data["outboundInterface"],
+ data["source"],
+ data["group"],
+ )
+ found = True
+ break
+ else:
+ continue
+
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying (%s, %s)"
+ " mroute [FAILED]!! "
+ "Expected in: (iif: %s, oil: %s,"
+ " installed: (%s,%s)) Found: "
+ "(iif: %s, oil: %s, "
+ "installed: (%s,%s))"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ iif,
+ oil,
+ src_address,
+ grp_addr,
+ data["inboundInterface"],
+ data["outboundInterface"],
+ data["source"],
+ data["group"],
+ )
+ )
+ return errormsg
+
+ else:
+ errormsg = "[DUT %s]: mroute (%s,%s) is not installed" % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if return_uptime == False else uptime_dict
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_rp_info(
+ tgen,
+ topo,
+ dut,
+ group_addresses,
+ oif=None,
+ rp=None,
+ source=None,
+ iamrp=None,
+ addr_type="ipv4",
+ expected=True,
+):
+ """
+ Verify pim rp info by running "show ip pim rp-info" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: JSON file handler
+ * `dut`: device under test
+ * `group_addresses`: IGMP group address
+ * `oif`: outbound interface name
+ * `rp`: RP address
+ * `source`: Source of RP
+ * `iamrp`: User defined RP
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ result = verify_pim_rp_info(tgen, topo, dut, group_address,
+ rp=rp, source="BSR")
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if type(oif) is not list:
+ oif = [oif]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ for grp_addr in group_addresses:
+ if rp is None:
+ rp_details = find_rp_details(tgen, topo)
+
+ if dut in rp_details:
+ iamRP = True
+ else:
+ iamRP = False
+ else:
+ if addr_type == "ipv4":
+ show_ip_route_json = run_frr_cmd(
+ rnode, "show ip route connected json", isjson=True
+ )
+ elif addr_type == "ipv6":
+ show_ip_route_json = run_frr_cmd(
+ rnode, "show ipv6 route connected json", isjson=True
+ )
+ for _rp in show_ip_route_json.keys():
+ if rp == _rp.split("/")[0]:
+ iamRP = True
+ break
+ else:
+ iamRP = False
+
+ logger.info("[DUT: %s]: Verifying ip rp info", dut)
+ cmd = "show {} pim rp-info json".format(ip_cmd)
+ show_ip_rp_info_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if rp not in show_ip_rp_info_json:
+ errormsg = (
+ "[DUT %s]: Verifying rp-info "
+ "for rp_address %s [FAILED]!! " % (dut, rp)
+ )
+ return errormsg
+ else:
+ group_addr_json = show_ip_rp_info_json[rp]
+
+ for rp_json in group_addr_json:
+ if "rpAddress" not in rp_json:
+ errormsg = "[DUT %s]: %s key not " "present in rp-info " % (
+ dut,
+ "rpAddress",
+ )
+ return errormsg
+
+ if oif is not None:
+ found = False
+ if rp_json["outboundInterface"] not in oif:
+ errormsg = (
+ "[DUT %s]: Verifying OIF "
+ "for group %s and RP %s [FAILED]!! "
+ "Expected interfaces: (%s),"
+ " Found: (%s)"
+ % (dut, grp_addr, rp, oif, rp_json["outboundInterface"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying OIF "
+ "for group %s and RP %s [PASSED]!! "
+ "Found Expected: (%s)"
+ % (dut, grp_addr, rp, rp_json["outboundInterface"])
+ )
+
+ if source is not None:
+ if rp_json["source"] != source:
+ errormsg = (
+ "[DUT %s]: Verifying SOURCE "
+ "for group %s and RP %s [FAILED]!! "
+ "Expected: (%s),"
+ " Found: (%s)" % (dut, grp_addr, rp, source, rp_json["source"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying SOURCE "
+ "for group %s and RP %s [PASSED]!! "
+ "Found Expected: (%s)" % (dut, grp_addr, rp, rp_json["source"])
+ )
+
+ if rp_json["group"] == grp_addr and iamrp is not None:
+ if iamRP:
+ if rp_json["iAmRP"]:
+ logger.info(
+ "[DUT %s]: Verifying group "
+ "and iAmRP [PASSED]!!"
+ " Found Expected: (%s, %s:%s)",
+ dut,
+ grp_addr,
+ "iAmRP",
+ rp_json["iAmRP"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying group"
+ "%s and iAmRP [FAILED]!! "
+ "Expected: (iAmRP: %s),"
+ " Found: (iAmRP: %s)"
+ % (dut, grp_addr, "true", rp_json["iAmRP"])
+ )
+ return errormsg
+
+ if not iamRP:
+ if rp_json["iAmRP"] == False:
+ logger.info(
+ "[DUT %s]: Verifying group "
+ "and iAmNotRP [PASSED]!!"
+ " Found Expected: (%s, %s:%s)",
+ dut,
+ grp_addr,
+ "iAmRP",
+ rp_json["iAmRP"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying group"
+ "%s and iAmRP [FAILED]!! "
+ "Expected: (iAmRP: %s),"
+ " Found: (iAmRP: %s)"
+ % (dut, grp_addr, "false", rp_json["iAmRP"])
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_state(
+ tgen,
+ dut,
+ iif,
+ oil,
+ group_addresses,
+ src_address=None,
+ installed_fl=None,
+ addr_type="ipv4",
+ expected=True,
+):
+ """
+ Verify pim state by running "show ip pim state" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `iif`: inbound interface
+ * `oil`: outbound interface
+ * `group_addresses`: IGMP group address
+ * `src_address`: source address, default = None
+ * installed_fl` : Installed flag
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ iif = "r1-r3-eth1"
+ oil = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_pim_state(tgen, dut, iif, oil, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim state", dut)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ logger.info("[DUT: %s]: Verifying pim state", dut)
+ show_pim_state_json = run_frr_cmd(
+ rnode, "show {} pim state json".format(ip_cmd), isjson=True
+ )
+
+ if installed_fl is None:
+ installed_fl = 1
+
+ for grp_addr in group_addresses:
+ if src_address is None:
+ src_address = "*"
+ pim_state_json = show_pim_state_json[grp_addr][src_address]
+ else:
+ pim_state_json = show_pim_state_json[grp_addr][src_address]
+
+ if pim_state_json["installed"] == installed_fl:
+ logger.info(
+ "[DUT %s]: group %s is installed flag: %s",
+ dut,
+ grp_addr,
+ pim_state_json["installed"],
+ )
+ for interface, data in pim_state_json[iif].items():
+ if interface != oil:
+ continue
+
+ # Verify iif, oil and installed state
+ if (
+ data["group"] == grp_addr
+ and data["installed"] == installed_fl
+ and data["inboundInterface"] == iif
+ and data["outboundInterface"] == oil
+ ):
+ logger.info(
+ "[DUT %s]: Verifying pim state for group"
+ " %s [PASSED]!! Found Expected: "
+ "(iif: %s, oil: %s, installed: %s) ",
+ dut,
+ grp_addr,
+ data["inboundInterface"],
+ data["outboundInterface"],
+ data["installed"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying pim state for group"
+ " %s, [FAILED]!! Expected: "
+ "(iif: %s, oil: %s, installed: %s) "
+ % (dut, grp_addr, iif, oil, "1"),
+ "Found: (iif: %s, oil: %s, installed: %s)"
+ % (
+ data["inboundInterface"],
+ data["outboundInterface"],
+ data["installed"],
+ ),
+ )
+ return errormsg
+ else:
+ errormsg = "[DUT %s]: %s install flag value not as expected" % (
+ dut,
+ grp_addr,
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def get_pim_interface_traffic(tgen, input_dict):
+ """
+ get ip pim interface traffic by running
+ "show ip pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and from which interfaces
+ traffic needs to be retrieved
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "r1-r0-eth0": {
+ "helloRx": 0,
+ "helloTx": 1,
+ "joinRx": 0,
+ "joinTx": 0
+ }
+ }
+ }
+
+ result = get_pim_interface_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ output_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
+
+ def show_pim_intf_traffic(rnode, dut, input_dict, output_dict):
+ show_pim_intf_traffic_json = run_frr_cmd(
+ rnode, "show ip pim interface traffic json", isjson=True
+ )
+
+ output_dict[dut] = {}
+ for intf, data in input_dict[dut].items():
+ interface_json = show_pim_intf_traffic_json[intf]
+ for state in data:
+ # Verify Tx/Rx
+ if state in interface_json:
+ output_dict[dut][state] = interface_json[state]
+ else:
+ errormsg = (
+ "[DUT %s]: %s is not present"
+ "for interface %s [FAILED]!! " % (dut, state, intf)
+ )
+ return errormsg
+ return None
+
+ test_func = functools.partial(
+ show_pim_intf_traffic, rnode, dut, input_dict, output_dict
+ )
+ (result, out) = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ if not result:
+ return out
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return output_dict
+
+
+def get_pim6_interface_traffic(tgen, input_dict):
+ """
+ get ipv6 pim interface traffic by running
+ "show ipv6 pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and from which interfaces
+ traffic needs to be retrieved
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "r1-r0-eth0": {
+ "helloRx": 0,
+ "helloTx": 1,
+ "joinRx": 0,
+ "joinTx": 0
+ }
+ }
+ }
+
+ result = get_pim_interface_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ output_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
+
+ def show_pim_intf_traffic(rnode, dut, input_dict, output_dict):
+ show_pim_intf_traffic_json = run_frr_cmd(
+ rnode, "show ipv6 pim interface traffic json", isjson=True
+ )
+
+ output_dict[dut] = {}
+ for intf, data in input_dict[dut].items():
+ interface_json = show_pim_intf_traffic_json[intf]
+ for state in data:
+ # Verify Tx/Rx
+ if state in interface_json:
+ output_dict[dut][state] = interface_json[state]
+ else:
+ errormsg = (
+ "[DUT %s]: %s is not present"
+ "for interface %s [FAILED]!! " % (dut, state, intf)
+ )
+ return errormsg
+ return None
+
+ test_func = functools.partial(
+ show_pim_intf_traffic, rnode, dut, input_dict, output_dict
+ )
+ (result, out) = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ if not result:
+ return out
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return output_dict
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_pim_interface(
+ tgen, topo, dut, interface=None, interface_ip=None, addr_type="ipv4", expected=True
+):
+ """
+ Verify all PIM interface are up and running, config is verified
+ using "show ip pim interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `interface` : interface name
+ * `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim_interfacetgen, topo, dut, interface=ens192, interface_ip=20.1.1.1)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
+
+ rnode = tgen.routers()[dut]
+
+ if addr_type == "ipv4":
+ addr_cmd = "ip"
+ pim_cmd = "pim"
+ elif addr_type == "ipv6":
+ addr_cmd = "ipv6"
+ pim_cmd = "pim6"
+ show_pim_interface_json = rnode.vtysh_cmd(
+ "show {} pim interface json".format(addr_cmd), isjson=True
+ )
+
+ logger.info("show_pim_interface_json: \n %s", show_pim_interface_json)
+
+ if interface_ip:
+ if interface in show_pim_interface_json:
+ pim_intf_json = show_pim_interface_json[interface]
+ if pim_intf_json["address"] != interface_ip:
+ errormsg = (
+ "[DUT %s]: %s interface "
+ "%s is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (
+ dut,
+ pim_cmd,
+ addr_cmd,
+ pim_intf_json["address"],
+ interface_ip,
+ )
+ )
+ return errormsg
+ else:
+ logger.info(
+ "[DUT %s]: %s interface "
+ "%s is correct "
+ "[Passed]!! Expected : %s, Found : %s"
+ % (
+ dut,
+ pim_cmd,
+ addr_cmd,
+ pim_intf_json["address"],
+ interface_ip,
+ )
+ )
+ return True
+ else:
+ for destLink, data in topo["routers"][dut]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if pim_cmd in data and data[pim_cmd] == "enable":
+ pim_interface = data["interface"]
+ pim_intf_ip = data[addr_type].split("/")[0]
+
+ if pim_interface in show_pim_interface_json:
+ pim_intf_json = show_pim_interface_json[pim_interface]
+ else:
+ errormsg = (
+ "[DUT %s]: %s interface: %s "
+ "PIM interface %s: %s, not Found"
+ % (dut, pim_cmd, pim_interface, addr_cmd, pim_intf_ip)
+ )
+ return errormsg
+
+ # Verifying PIM interface
+ if (
+ pim_intf_json["address"] != pim_intf_ip
+ and pim_intf_json["state"] != "up"
+ ):
+ errormsg = (
+ "[DUT %s]: %s interface: %s "
+ "PIM interface %s: %s, status check "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (
+ dut,
+ pim_cmd,
+ pim_interface,
+ addr_cmd,
+ pim_intf_ip,
+ pim_interface,
+ pim_intf_json["state"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: %s interface: %s, "
+ "interface %s: %s, status: %s"
+ " [PASSED]!!",
+ dut,
+ pim_cmd,
+ pim_interface,
+ addr_cmd,
+ pim_intf_ip,
+ pim_intf_json["state"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def clear_pim_interface_traffic(tgen, topo):
+ """
+ Clear ip pim interface traffic by running
+ "clear ip pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ Usage
+ -----
+
+ result = clear_pim_interface_traffic(tgen, topo)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in tgen.routers():
+ if "pim" not in topo["routers"][dut]:
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Clearing pim interface traffic", dut)
+ result = run_frr_cmd(rnode, "clear ip pim interface traffic")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_pim6_interface_traffic(tgen, topo):
+ """
+ Clear ipv6 pim interface traffic by running
+ "clear ipv6 pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ Usage
+ -----
+
+ result = clear_pim6_interface_traffic(tgen, topo)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in tgen.routers():
+ if "pim" not in topo["routers"][dut]:
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Clearing pim6 interface traffic", dut)
+ result = run_frr_cmd(rnode, "clear ipv6 pim interface traffic")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_pim6_interfaces(tgen, topo):
+ """
+ Clear ipv6 pim interface by running
+ "clear ipv6 pim interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ Usage
+ -----
+
+ result = clear_pim6_interfaces(tgen, topo)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in tgen.routers():
+ if "pim" not in topo["routers"][dut]:
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Clearing pim6 interfaces", dut)
+ result = run_frr_cmd(rnode, "clear ipv6 pim interface")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_pim_interfaces(tgen, dut):
+ """
+ Clear ip/ipv6 pim interface by running
+ "clear ip/ipv6 pim interfaces" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: Device Under Test
+ Usage
+ -----
+
+ result = clear_pim_interfaces(tgen, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ nh_before_clear = {}
+ nh_after_clear = {}
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verify pim neighbor before pim" " neighbor clear", dut)
+ # To add uptime initially
+ sleep(10)
+ run_json_before = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True)
+
+ for key, value in run_json_before.items():
+ if bool(value):
+ for _key, _value in value.items():
+ nh_before_clear[key] = _value["upTime"]
+
+ # Clearing PIM neighbors
+ logger.info("[DUT: %s]: Clearing pim interfaces", dut)
+ run_frr_cmd(rnode, "clear ip pim interfaces")
+
+ logger.info("[DUT: %s]: Verify pim neighbor after pim" " neighbor clear", dut)
+
+ found = False
+
+ # Waiting for maximum 60 sec
+ fail_intf = []
+ for retry in range(1, 13):
+ sleep(5)
+ logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut)
+ run_json_after = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True)
+ found = True
+ for pim_intf in nh_before_clear.keys():
+ if pim_intf not in run_json_after or not run_json_after[pim_intf]:
+ found = False
+ fail_intf.append(pim_intf)
+
+ if found is True:
+ break
+ else:
+ errormsg = (
+ "[DUT: %s]: pim neighborship is not formed for %s"
+ "after clear_ip_pim_interfaces %s [FAILED!!]",
+ dut,
+ fail_intf,
+ )
+ return errormsg
+
+ for key, value in run_json_after.items():
+ if bool(value):
+ for _key, _value in value.items():
+ nh_after_clear[key] = _value["upTime"]
+
+ # Verify uptime for neighbors
+ for pim_intf in nh_before_clear.keys():
+ d1 = datetime.datetime.strptime(nh_before_clear[pim_intf], "%H:%M:%S")
+ d2 = datetime.datetime.strptime(nh_after_clear[pim_intf], "%H:%M:%S")
+ if d2 >= d1:
+ errormsg = (
+ "[DUT: %s]: PIM neighborship is not cleared for",
+ " interface %s [FAILED!!]",
+ dut,
+ pim_intf,
+ )
+
+ logger.info("[DUT: %s]: PIM neighborship is cleared [PASSED!!]")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_igmp_interfaces(tgen, dut):
+ """
+ Clear ip/ipv6 igmp interfaces by running
+ "clear ip/ipv6 igmp interfaces" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+
+ Usage
+ -----
+ dut = "r1"
+ result = clear_igmp_interfaces(tgen, dut)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ group_before_clear = {}
+ group_after_clear = {}
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: IGMP group uptime before clear" " igmp groups:", dut)
+ igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True)
+
+ total_groups_before_clear = igmp_json["totalGroups"]
+
+ for key, value in igmp_json.items():
+ if type(value) is not dict:
+ continue
+
+ groups = value["groups"]
+ group = groups[0]["group"]
+ uptime = groups[0]["uptime"]
+ group_before_clear[group] = uptime
+
+ logger.info("[DUT: %s]: Clearing ip igmp interfaces", dut)
+ result = run_frr_cmd(rnode, "clear ip igmp interfaces")
+
+ # Waiting for maximum 60 sec
+ for retry in range(1, 13):
+ logger.info(
+ "[DUT: %s]: Waiting for 5 sec for igmp interfaces" " to come up", dut
+ )
+ sleep(5)
+ igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True)
+
+ total_groups_after_clear = igmp_json["totalGroups"]
+
+ if total_groups_before_clear == total_groups_after_clear:
+ break
+
+ for key, value in igmp_json.items():
+ if type(value) is not dict:
+ continue
+
+ groups = value["groups"]
+ group = groups[0]["group"]
+ uptime = groups[0]["uptime"]
+ group_after_clear[group] = uptime
+
+ # Verify uptime for groups
+ for group in group_before_clear.keys():
+ d1 = datetime.datetime.strptime(group_before_clear[group], "%H:%M:%S")
+ d2 = datetime.datetime.strptime(group_after_clear[group], "%H:%M:%S")
+ if d2 >= d1:
+ errormsg = ("[DUT: %s]: IGMP group is not cleared", " [FAILED!!]", dut)
+
+ logger.info("[DUT: %s]: IGMP group is cleared [PASSED!!]")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+@retry(retry_timeout=20)
+def clear_mroute_verify(tgen, dut, expected=True):
+ """
+ Clear ip/ipv6 mroute by running "clear ip/ipv6 mroute" cli and verify
+ mroutes are up again after mroute clear
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: Device Under Test
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+
+ result = clear_mroute_verify(tgen, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ mroute_before_clear = {}
+ mroute_after_clear = {}
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: IP mroutes uptime before clear", dut)
+ mroute_json_1 = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
+
+ for group in mroute_json_1.keys():
+ mroute_before_clear[group] = {}
+ for key in mroute_json_1[group].keys():
+ for _key, _value in mroute_json_1[group][key]["oil"].items():
+ if _key != "pimreg":
+ mroute_before_clear[group][key] = _value["upTime"]
+
+ logger.info("[DUT: %s]: Clearing ip mroute", dut)
+ result = run_frr_cmd(rnode, "clear ip mroute")
+
+ # RFC 3376: 8.2. Query Interval - Default: 125 seconds
+ # So waiting for maximum 130 sec to get the igmp report
+ for retry in range(1, 26):
+ logger.info("[DUT: %s]: Waiting for 2 sec for mroutes" " to come up", dut)
+ sleep(5)
+ keys_json1 = mroute_json_1.keys()
+ mroute_json_2 = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
+
+ if bool(mroute_json_2):
+ keys_json2 = mroute_json_2.keys()
+
+ for group in mroute_json_2.keys():
+ flag = False
+ for key in mroute_json_2[group].keys():
+ if "oil" not in mroute_json_2[group]:
+ continue
+
+ for _key, _value in mroute_json_2[group][key]["oil"].items():
+ if _key != "pimreg" and keys_json1 == keys_json2:
+ break
+ flag = True
+ if flag:
+ break
+ else:
+ continue
+
+ for group in mroute_json_2.keys():
+ mroute_after_clear[group] = {}
+ for key in mroute_json_2[group].keys():
+ for _key, _value in mroute_json_2[group][key]["oil"].items():
+ if _key != "pimreg":
+ mroute_after_clear[group][key] = _value["upTime"]
+
+ # Verify uptime for mroute
+ for group in mroute_before_clear.keys():
+ for source in mroute_before_clear[group].keys():
+ if set(mroute_before_clear[group]) != set(mroute_after_clear[group]):
+ errormsg = (
+ "[DUT: %s]: mroute (%s, %s) has not come"
+ " up after mroute clear [FAILED!!]" % (dut, source, group)
+ )
+ return errormsg
+
+ d1 = datetime.datetime.strptime(
+ mroute_before_clear[group][source], "%H:%M:%S"
+ )
+ d2 = datetime.datetime.strptime(
+ mroute_after_clear[group][source], "%H:%M:%S"
+ )
+ if d2 >= d1:
+ errormsg = "[DUT: %s]: IP mroute is not cleared" " [FAILED!!]" % (dut)
+
+ logger.info("[DUT: %s]: IP mroute is cleared [PASSED!!]", dut)
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_mroute(tgen, dut=None):
+ """
+ Clear ip/ipv6 mroute by running "clear ip mroute" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test, default None
+
+ Usage
+ -----
+ clear_mroute(tgen, dut)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ if dut is not None and router != dut:
+ continue
+
+ logger.debug("[DUT: %s]: Clearing ip mroute", router)
+ rnode.vtysh_cmd("clear ip mroute")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+
+def clear_pim6_mroute(tgen, dut=None):
+ """
+ Clear ipv6 mroute by running "clear ipv6 mroute" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test, default None
+
+ Usage
+ -----
+ clear_mroute(tgen, dut)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ if dut is not None and router != dut:
+ continue
+
+ logger.debug("[DUT: %s]: Clearing ipv6 mroute", router)
+ rnode.vtysh_cmd("clear ipv6 mroute")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def reconfig_interfaces(tgen, topo, senderRouter, receiverRouter, packet=None):
+ """
+ Configure interface ip for sender and receiver routers
+ as per bsr packet
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `senderRouter` : Sender router
+ * `receiverRouter` : Receiver router
+ * `packet` : BSR packet in raw format
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ config_data = []
+
+ src_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["src_ip"]
+ dest_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["dest_ip"]
+
+ for destLink, data in topo["routers"][senderRouter]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" in data and data["pim"] == "enable":
+ sender_interface = data["interface"]
+ sender_interface_ip = data["ipv4"]
+
+ config_data.append("interface {}".format(sender_interface))
+ config_data.append("no ip address {}".format(sender_interface_ip))
+ config_data.append("ip address {}".format(src_ip))
+
+ result = create_common_configuration(
+ tgen, senderRouter, config_data, "interface_config"
+ )
+ if result is not True:
+ return False
+
+ config_data = []
+ links = topo["routers"][destLink]["links"]
+ pim_neighbor = {key: links[key] for key in [senderRouter]}
+
+ data = pim_neighbor[senderRouter]
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" in data and data["pim"] == "enable":
+ receiver_interface = data["interface"]
+ receiver_interface_ip = data["ipv4"]
+
+ config_data.append("interface {}".format(receiver_interface))
+ config_data.append("no ip address {}".format(receiver_interface_ip))
+ config_data.append("ip address {}".format(dest_ip))
+
+ result = create_common_configuration(
+ tgen, receiverRouter, config_data, "interface_config"
+ )
+ if result is not True:
+ return False
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: reconfig_interfaces()")
+ return result
+
+
+def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping):
+ """
+ Add physical interfaces tp RP for all the RPs
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `interface` : RP interface
+ * `rp` : rp for given topology
+ * `rp_mapping` : dictionary of all groups and RPs
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ config_data = []
+
+ for group, rp_list in rp_mapping.items():
+ for _rp in rp_list:
+ config_data.append("interface {}".format(interface))
+ config_data.append("ip address {}".format(_rp))
+ config_data.append("ip pim")
+
+ # Why not config just once, why per group?
+ result = create_common_configuration(
+ tgen, rp, config_data, "interface_config"
+ )
+ if result is not True:
+ return False
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def scapy_send_bsr_raw_packet(tgen, topo, senderRouter, receiverRouter, packet=None):
+ """
+ Using scapy Raw() method to send BSR raw packet from one FRR
+ to other
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `senderRouter` : Sender router
+ * `receiverRouter` : Receiver router
+ * `packet` : BSR packet in raw format
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ global CWD
+ result = ""
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
+ script_path = os.path.join(CWD, "send_bsr_packet.py")
+ node = tgen.net[senderRouter]
+
+ for destLink, data in topo["routers"][senderRouter]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" in data and data["pim"] == "enable":
+ sender_interface = data["interface"]
+
+ packet = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["data"]
+
+ cmd = [
+ python3_path,
+ script_path,
+ packet,
+ sender_interface,
+ "--interval=1",
+ "--count=1",
+ ]
+ logger.info("Scapy cmd: \n %s", cmd)
+ node.cmd_raises(cmd)
+
+ logger.debug("Exiting lib API: scapy_send_bsr_raw_packet")
+ return True
+
+
+def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None):
+ """
+ Find which RP is having lowest prioriy and returns rp IP
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `bsr`: BSR address
+ * 'grp': Group Address
+
+ Usage
+ -----
+ dut = "r1"
+ result = verify_pim_rp_info(tgen, dut, bsr)
+
+ Returns:
+ dictionary: group and RP, which has to be installed as per
+ lowest priority or highest priority
+ """
+
+ rp_details = {}
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Fetching rp details from bsrp-info", dut)
+ bsrp_json = run_frr_cmd(rnode, "show ip pim bsrp-info json", isjson=True)
+
+ if grp not in bsrp_json:
+ return {}
+
+ for group, rp_data in bsrp_json.items():
+ if group == "BSR Address" and bsrp_json["BSR Address"] == bsr:
+ continue
+
+ if group != grp:
+ continue
+
+ rp_priority = {}
+ rp_hash = {}
+
+ for rp, value in rp_data.items():
+ if rp == "Pending RP count":
+ continue
+ rp_priority[value["Rp Address"]] = value["Rp Priority"]
+ rp_hash[value["Rp Address"]] = value["Hash Val"]
+
+ priority_dict = dict(zip(rp_priority.values(), rp_priority.keys()))
+ hash_dict = dict(zip(rp_hash.values(), rp_hash.keys()))
+
+ # RP with lowest priority
+ if len(priority_dict) != 1:
+ rp_p, lowest_priority = sorted(rp_priority.items(), key=lambda x: x[1])[0]
+ rp_details[group] = rp_p
+
+ # RP with highest hash value
+ if len(priority_dict) == 1:
+ rp_h, highest_hash = sorted(rp_hash.items(), key=lambda x: x[1])[-1]
+ rp_details[group] = rp_h
+
+ # RP with highest IP address
+ if len(priority_dict) == 1 and len(hash_dict) == 1:
+ rp_details[group] = sorted(rp_priority.keys())[-1]
+
+ return rp_details
+
+
+@retry(retry_timeout=12)
+def verify_pim_grp_rp_source(
+ tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True
+):
+ """
+ Verify pim rp info by running "show ip pim rp-info" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: JSON file handler
+ * `dut`: device under test
+ * `grp_addr`: IGMP group address
+ * 'rp_source': source from which rp installed
+ * 'rpadd': rp address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ group_address = "225.1.1.1"
+ rp_source = "BSR"
+ result = verify_pim_rp_and_source(tgen, topo, dut, group_address, rp_source)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying ip rp info", dut)
+ show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True)
+
+ if rpadd != None:
+ rp_json = show_ip_rp_info_json[rpadd]
+ if rp_json[0]["group"] == grp_addr:
+ if rp_json[0]["source"] == rp_source:
+ logger.info(
+ "[DUT %s]: Verifying Group and rp_source [PASSED]"
+ "Found Expected: %s, %s"
+ % (dut, rp_json[0]["group"], rp_json[0]["source"])
+ )
+ return True
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying Group and rp_source [FAILED]"
+ "Expected (%s, %s) "
+ "Found (%s, %s)"
+ % (
+ dut,
+ grp_addr,
+ rp_source,
+ rp_json[0]["group"],
+ rp_json[0]["source"],
+ )
+ )
+ return errormsg
+ errormsg = (
+ "[DUT %s]: Verifying Group and rp_source [FAILED]"
+ "Expected: %s, %s but not found" % (dut, grp_addr, rp_source)
+ )
+ return errormsg
+
+ for rp in show_ip_rp_info_json:
+ rp_json = show_ip_rp_info_json[rp]
+ logger.info("%s", rp_json)
+ if rp_json[0]["group"] == grp_addr:
+ if rp_json[0]["source"] == rp_source:
+ logger.info(
+ "[DUT %s]: Verifying Group and rp_source [PASSED]"
+ "Found Expected: %s, %s"
+ % (dut, rp_json[0]["group"], rp_json[0]["source"])
+ )
+ return True
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying Group and rp_source [FAILED]"
+ "Expected (%s, %s) "
+ "Found (%s, %s)"
+ % (
+ dut,
+ grp_addr,
+ rp_source,
+ rp_json[0]["group"],
+ rp_json[0]["source"],
+ )
+ )
+ return errormsg
+
+ errormsg = (
+ "[DUT %s]: Verifying Group and rp_source [FAILED]"
+ "Expected: %s, %s but not found" % (dut, grp_addr, rp_source)
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return errormsg
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):
+ """
+ Verify all PIM interface are up and running, config is verified
+ using "show ip pim interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * 'bsr' : bsr ip to be verified
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim_bsr(tgen, topo, dut, bsr_ip)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM bsr status:", dut)
+
+ rnode = tgen.routers()[dut]
+ pim_bsr_json = rnode.vtysh_cmd("show ip pim bsr json", isjson=True)
+
+ logger.info("show_ip_pim_bsr_json: \n %s", pim_bsr_json)
+
+ # Verifying PIM bsr
+ if pim_bsr_json["bsr"] != bsr_ip:
+ errormsg = (
+ "[DUT %s]:"
+ "bsr status: not found"
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, bsr_ip, pim_bsr_json["bsr"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]:" " bsr status: found, Address :%s" " [PASSED]!!",
+ dut,
+ pim_bsr_json["bsr"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_upstream_rpf(
+ tgen, topo, dut, interface, group_addresses, rp=None, expected=True
+):
+ """
+ Verify IP/IPv6 PIM upstream rpf, config is verified
+ using "show ip/ipv6 pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : devuce under test
+ * `interface` : upstream interface
+ * `group_addresses` : list of group address for which upstream info
+ needs to be checked
+ * `rp` : RP address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim_upstream_rpf(gen, topo, dut, interface,
+ group_addresses, rp=None)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if "pim" in topo["routers"][dut]:
+ logger.info("[DUT: %s]: Verifying ip pim upstream rpf:", dut)
+
+ rnode = tgen.routers()[dut]
+ show_ip_pim_upstream_rpf_json = rnode.vtysh_cmd(
+ "show ip pim upstream-rpf json", isjson=True
+ )
+
+ logger.info(
+ "show_ip_pim_upstream_rpf_json: \n %s", show_ip_pim_upstream_rpf_json
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp_addr in group_addresses:
+ for destLink, data in topo["routers"][dut]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim" not in topo["routers"][destLink]:
+ continue
+
+ # Verify RP info
+ if rp is None:
+ rp_details = find_rp_details(tgen, topo)
+ else:
+ rp_details = {dut: rp}
+
+ if dut in rp_details:
+ pim_nh_intf_ip = topo["routers"][dut]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0]
+ else:
+ if destLink not in interface:
+ continue
+
+ links = topo["routers"][destLink]["links"]
+ pim_neighbor = {key: links[key] for key in [dut]}
+
+ data = pim_neighbor[dut]
+ if "pim" in data and data["pim"] == "enable":
+ pim_nh_intf_ip = data["ipv4"].split("/")[0]
+
+ upstream_rpf_json = show_ip_pim_upstream_rpf_json[grp_addr]["*"]
+
+ # Verifying ip pim upstream rpf
+ if (
+ upstream_rpf_json["rpfInterface"] == interface
+ and upstream_rpf_json["ribNexthop"] != pim_nh_intf_ip
+ ):
+ errormsg = (
+ "[DUT %s]: Verifying group: %s, "
+ "rpf interface: %s, "
+ " rib Nexthop check [FAILED]!!"
+ "Expected: %s, Found: %s"
+ % (
+ dut,
+ grp_addr,
+ interface,
+ pim_nh_intf_ip,
+ upstream_rpf_json["ribNexthop"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying group: %s,"
+ " rpf interface: %s, "
+ " rib Nexthop: %s [PASSED]!!",
+ dut,
+ grp_addr,
+ interface,
+ pim_nh_intf_ip,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def enable_disable_pim_unicast_bsm(tgen, router, intf, enable=True):
+ """
+ Helper API to enable or disable pim bsm on interfaces
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `router` : router id to be configured.
+ * `intf` : Interface to be configured
+ * `enable` : this flag denotes if config should be enabled or disabled
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ config_data = []
+ cmd = "interface {}".format(intf)
+ config_data.append(cmd)
+
+ if enable == True:
+ config_data.append("ip pim unicast-bsm")
+ else:
+ config_data.append("no ip pim unicast-bsm")
+
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=False
+ )
+ if result is not True:
+ return False
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+def enable_disable_pim_bsm(tgen, router, intf, enable=True):
+ """
+ Helper API to enable or disable pim bsm on interfaces
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `router` : router id to be configured.
+ * `intf` : Interface to be configured
+ * `enable` : this flag denotes if config should be enabled or disabled
+
+ Returns
+ -------
+ True or False
+ """
+ result = False
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ try:
+ config_data = []
+ cmd = "interface {}".format(intf)
+ config_data.append(cmd)
+
+ if enable is True:
+ config_data.append("ip pim bsm")
+ else:
+ config_data.append("no ip pim bsm")
+
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=False
+ )
+ if result is not True:
+ return False
+
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_join(
+ tgen,
+ topo,
+ dut,
+ interface,
+ group_addresses,
+ src_address=None,
+ addr_type="ipv4",
+ expected=True,
+):
+ """
+ Verify ip/ipv6 pim join by running "show ip/ipv6 pim join" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo`: JSON file handler
+ * `dut`: device under test
+ * `interface`: interface name, from which PIM join would come
+ * `group_addresses`: IGMP group address
+ * `src_address`: Source address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_pim_join(tgen, dut, star, group_address, interface)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim join", dut)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ show_pim_join_json = run_frr_cmd(
+ rnode, "show {} pim join json".format(ip_cmd), isjson=True
+ )
+
+ for grp_addr in group_addresses:
+ # Verify if IGMP is enabled in DUT
+ if "igmp" not in topo["routers"][dut]:
+ pim_join = True
+ else:
+ pim_join = False
+
+ interface_json = show_pim_join_json[interface]
+
+ grp_addr = grp_addr.split("/")[0]
+ for source, data in interface_json[grp_addr].items():
+ # Verify pim join
+ if pim_join:
+ if data["group"] == grp_addr and data["channelJoinName"] == "JOIN":
+ logger.info(
+ "[DUT %s]: Verifying pim join for group: %s"
+ "[PASSED]!! Found Expected: (%s)",
+ dut,
+ grp_addr,
+ data["channelJoinName"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying pim join for group: %s"
+ "[FAILED]!! Expected: (%s) "
+ "Found: (%s)" % (dut, grp_addr, "JOIN", data["channelJoinName"])
+ )
+ return errormsg
+
+ if not pim_join:
+ if data["group"] == grp_addr and data["channelJoinName"] == "NOINFO":
+ logger.info(
+ "[DUT %s]: Verifying pim join for group: %s"
+ "[PASSED]!! Found Expected: (%s)",
+ dut,
+ grp_addr,
+ data["channelJoinName"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying pim join for group: %s"
+ "[FAILED]!! Expected: (%s) "
+ "Found: (%s)"
+ % (dut, grp_addr, "NOINFO", data["channelJoinName"])
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):
+ """
+ Verify igmp interface details, verifying following configs:
+ timerQueryInterval
+ timerQueryResponseIntervalMsec
+ lastMemberQueryCount
+ timerLastMemberQueryMsec
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict` : Input dict data, required to verify
+ timer
+ * `stats_return`: If user wants API to return statistics
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "igmp": {
+ "query": {
+ "query-interval" : 200,
+ "query-max-response-time" : 100
+ },
+ "statistics": {
+ "queryV2" : 2,
+ "reportV2" : 1
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_igmp_config(tgen, input_dict, stats_return)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ for interface, data in input_dict[dut]["igmp"]["interfaces"].items():
+ statistics = False
+ report = False
+ if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]:
+ statistics = True
+ cmd = "show ip igmp statistics"
+ else:
+ cmd = "show ip igmp"
+
+ logger.info(
+ "[DUT: %s]: Verifying IGMP interface %s detail:", dut, interface
+ )
+
+ if statistics:
+ if (
+ "report"
+ in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"][
+ "statistics"
+ ]
+ ):
+ report = True
+
+ if statistics and report:
+ show_ip_igmp_intf_json = run_frr_cmd(
+ rnode, "{} json".format(cmd), isjson=True
+ )
+ intf_detail_json = show_ip_igmp_intf_json["global"]
+ else:
+ show_ip_igmp_intf_json = run_frr_cmd(
+ rnode, "{} interface {} json".format(cmd, interface), isjson=True
+ )
+
+ if not report:
+ if interface not in show_ip_igmp_intf_json:
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ " is not present in CLI output "
+ "[FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ else:
+ intf_detail_json = show_ip_igmp_intf_json[interface]
+
+ if stats_return:
+ igmp_stats = {}
+
+ if "statistics" in data["igmp"]:
+ if stats_return:
+ igmp_stats["statistics"] = {}
+ for query, value in data["igmp"]["statistics"].items():
+ if query == "queryV2":
+ # Verifying IGMP interface queryV2 statistics
+ if stats_return:
+ igmp_stats["statistics"][query] = intf_detail_json[
+ "queryV2"
+ ]
+
+ else:
+ if intf_detail_json["queryV2"] != value:
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ " queryV2 statistics verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["queryV2"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s "
+ "queryV2 statistics is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if query == "reportV2":
+ # Verifying IGMP interface timerV2 statistics
+ if stats_return:
+ igmp_stats["statistics"][query] = intf_detail_json[
+ "reportV2"
+ ]
+
+ else:
+ if intf_detail_json["reportV2"] <= value:
+ errormsg = (
+ "[DUT %s]: IGMP reportV2 "
+ "statistics verification "
+ "[FAILED]!! Expected : %s "
+ "or more, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP reportV2 " "statistics is %s",
+ dut,
+ intf_detail_json["reportV2"],
+ )
+
+ if "query" in data["igmp"]:
+ for query, value in data["igmp"]["query"].items():
+ if query == "query-interval":
+ # Verifying IGMP interface query interval timer
+ if intf_detail_json["timerQueryInterval"] != value:
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ " query-interval verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["timerQueryInterval"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s " "query-interval is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if query == "query-max-response-time":
+ # Verifying IGMP interface query max response timer
+ if (
+ intf_detail_json["timerQueryResponseIntervalMsec"]
+ != value * 100
+ ):
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ "query-max-response-time "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 1000,
+ intf_detail_json["timerQueryResponseIntervalMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s "
+ "query-max-response-time is %s ms",
+ dut,
+ interface,
+ value * 100,
+ )
+
+ if query == "last-member-query-count":
+ # Verifying IGMP interface last member query count
+ if intf_detail_json["lastMemberQueryCount"] != value:
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ "last-member-query-count "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["lastMemberQueryCount"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s "
+ "last-member-query-count is %s ms",
+ dut,
+ interface,
+ value * 1000,
+ )
+
+ if query == "last-member-query-interval":
+ # Verifying IGMP interface last member query interval
+ if (
+ intf_detail_json["timerLastMemberQueryMsec"]
+ != value * 100 * intf_detail_json["lastMemberQueryCount"]
+ ):
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ "last-member-query-interval "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 1000,
+ intf_detail_json["timerLastMemberQueryMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s "
+ "last-member-query-interval is %s ms",
+ dut,
+ interface,
+ value * intf_detail_json["lastMemberQueryCount"] * 100,
+ )
+
+ if "version" in data["igmp"]:
+ # Verifying IGMP interface state is up
+ if intf_detail_json["state"] != "up":
+ errormsg = (
+ "[DUT %s]: IGMP interface: %s "
+ " state: %s verification "
+ "[FAILED]!!" % (dut, interface, intf_detail_json["state"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: IGMP interface: %s " "state: %s",
+ dut,
+ interface,
+ intf_detail_json["state"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if stats_return == False else igmp_stats
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_config(tgen, input_dict, expected=True):
+ """
+ Verify pim interface details, verifying following configs:
+ drPriority
+ helloPeriod
+ helloReceived
+ helloSend
+ drAddress
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict` : Input dict data, required to verify
+ timer
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "pim": {
+ "drPriority" : 10,
+ "helloPeriod" : 5
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_pim_config(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ for interface, data in input_dict[dut]["pim"]["interfaces"].items():
+ logger.info("[DUT: %s]: Verifying PIM interface %s detail:", dut, interface)
+
+ show_ip_igmp_intf_json = run_frr_cmd(
+ rnode, "show ip pim interface {} json".format(interface), isjson=True
+ )
+
+ if interface not in show_ip_igmp_intf_json:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ " is not present in CLI output "
+ "[FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ intf_detail_json = show_ip_igmp_intf_json[interface]
+
+ for config, value in data.items():
+ if config == "helloPeriod":
+ # Verifying PIM interface helloPeriod
+ if intf_detail_json["helloPeriod"] != value:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ " helloPeriod verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["helloPeriod"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM interface: %s " "helloPeriod is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if config == "drPriority":
+ # Verifying PIM interface drPriority
+ if intf_detail_json["drPriority"] != value:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ " drPriority verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["drPriority"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM interface: %s " "drPriority is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if config == "drAddress":
+ # Verifying PIM interface drAddress
+ if intf_detail_json["drAddress"] != value:
+ errormsg = (
+ "[DUT %s]: PIM interface: %s "
+ " drAddress verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["drAddress"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM interface: %s " "drAddress is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=20, diag_pct=0)
+def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=True):
+ """
+ Verify multicast traffic by running
+ "show multicast traffic count json" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and for which interfaces
+ traffic needs to be verified
+ * `return_traffic`: returns traffic stats
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "traffic_received": ["r1-r0-eth0"],
+ "traffic_sent": ["r1-r0-eth0"]
+ }
+ }
+
+ result = verify_multicast_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ traffic_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying multicast " "traffic", dut)
+
+ show_multicast_traffic_json = run_frr_cmd(
+ rnode, "show ip multicast count json", isjson=True
+ )
+
+ for traffic_type, interfaces in input_dict[dut].items():
+ traffic_dict[traffic_type] = {}
+ if traffic_type == "traffic_received":
+ for interface in interfaces:
+ traffic_dict[traffic_type][interface] = {}
+ interface_json = show_multicast_traffic_json[interface]
+
+ if interface_json["pktsIn"] == 0 and interface_json["bytesIn"] == 0:
+ errormsg = (
+ "[DUT %s]: Multicast traffic is "
+ "not received on interface %s "
+ "PktsIn: %s, BytesIn: %s "
+ "[FAILED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsIn"],
+ interface_json["bytesIn"],
+ )
+ )
+ return errormsg
+
+ elif (
+ interface_json["pktsIn"] != 0 and interface_json["bytesIn"] != 0
+ ):
+ traffic_dict[traffic_type][interface][
+ "pktsIn"
+ ] = interface_json["pktsIn"]
+ traffic_dict[traffic_type][interface][
+ "bytesIn"
+ ] = interface_json["bytesIn"]
+
+ logger.info(
+ "[DUT %s]: Multicast traffic is "
+ "received on interface %s "
+ "PktsIn: %s, BytesIn: %s "
+ "[PASSED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsIn"],
+ interface_json["bytesIn"],
+ )
+ )
+
+ else:
+ errormsg = (
+ "[DUT %s]: Multicast traffic interface %s:"
+ " Miss-match in "
+ "PktsIn: %s, BytesIn: %s"
+ "[FAILED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsIn"],
+ interface_json["bytesIn"],
+ )
+ )
+ return errormsg
+
+ if traffic_type == "traffic_sent":
+ traffic_dict[traffic_type] = {}
+ for interface in interfaces:
+ traffic_dict[traffic_type][interface] = {}
+ interface_json = show_multicast_traffic_json[interface]
+
+ if (
+ interface_json["pktsOut"] == 0
+ and interface_json["bytesOut"] == 0
+ ):
+ errormsg = (
+ "[DUT %s]: Multicast traffic is "
+ "not received on interface %s "
+ "PktsIn: %s, BytesIn: %s"
+ "[FAILED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsOut"],
+ interface_json["bytesOut"],
+ )
+ )
+ return errormsg
+
+ elif (
+ interface_json["pktsOut"] != 0
+ and interface_json["bytesOut"] != 0
+ ):
+ traffic_dict[traffic_type][interface][
+ "pktsOut"
+ ] = interface_json["pktsOut"]
+ traffic_dict[traffic_type][interface][
+ "bytesOut"
+ ] = interface_json["bytesOut"]
+
+ logger.info(
+ "[DUT %s]: Multicast traffic is "
+ "received on interface %s "
+ "PktsOut: %s, BytesOut: %s "
+ "[PASSED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsOut"],
+ interface_json["bytesOut"],
+ )
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Multicast traffic interface %s:"
+ " Miss-match in "
+ "PktsOut: %s, BytesOut: %s "
+ "[FAILED]!!"
+ % (
+ dut,
+ interface,
+ interface_json["pktsOut"],
+ interface_json["bytesOut"],
+ )
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if return_traffic == False else traffic_dict
+
+
+def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses):
+ """
+ Verify upstream inbound interface is updated correctly
+ by running "show ip pim upstream" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `iif`: inbound interface
+ * `src_address`: source address
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ iif = "r1-r0-eth0"
+ src_address = "*"
+ group_address = "225.1.1.1"
+ result = get_refCount_for_mroute(tgen, dut, iif, src_address,
+ group_address)
+
+ Returns
+ -------
+ refCount(int)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ refCount = 0
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying refCount for mroutes: ", dut)
+ show_ip_pim_upstream_json = run_frr_cmd(
+ rnode, "show ip pim upstream json", isjson=True
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp_addr in group_addresses:
+ # Verify group address
+ if grp_addr not in show_ip_pim_upstream_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % (
+ dut,
+ grp_addr,
+ )
+ return errormsg
+ group_addr_json = show_ip_pim_upstream_json[grp_addr]
+
+ # Verify source address
+ if src_address not in group_addr_json:
+ errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+
+ # Verify Inbound Interface
+ if group_addr_json[src_address]["inboundInterface"] == iif:
+ refCount = group_addr_json[src_address]["refCount"]
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return refCount
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_multicast_flag_state(
+ tgen, dut, src_address, group_addresses, flag, expected=True
+):
+ """
+ Verify flag state for mroutes and make sure (*, G)/(S, G) are having
+ coorect flags by running "show ip mroute" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `src_address`: source address
+ * `group_addresses`: IGMP group address
+ * `flag`: flag state, needs to be verified
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ flag = "SC"
+ group_address = "225.1.1.1"
+ result = verify_multicast_flag_state(tgen, dut, src_address,
+ group_address, flag)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying flag state for mroutes", dut)
+ show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
+
+ if bool(show_ip_mroute_json) == False:
+ error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut)
+ return error_msg
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ for grp_addr in group_addresses:
+ if grp_addr not in show_ip_mroute_json:
+ errormsg = (
+ "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! ",
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+ else:
+ group_addr_json = show_ip_mroute_json[grp_addr]
+
+ if src_address not in group_addr_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src_address,
+ grp_addr,
+ )
+ return errormsg
+ else:
+ mroutes = group_addr_json[src_address]
+
+ if mroutes["installed"] != 0:
+ logger.info(
+ "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr
+ )
+
+ if mroutes["flags"] != flag:
+ errormsg = (
+ "[DUT %s]: Verifying flag for (%s, %s) "
+ "mroute [FAILED]!! "
+ "Expected: %s Found: %s"
+ % (dut, src_address, grp_addr, flag, mroutes["flags"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying flag for (%s, %s)"
+ " mroute, [PASSED]!! "
+ "Found Expected: %s",
+ dut,
+ src_address,
+ grp_addr,
+ mroutes["flags"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_igmp_interface(tgen, dut, igmp_iface, interface_ip, expected=True):
+ """
+ Verify all IGMP interface are up and running, config is verified
+ using "show ip igmp interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `igmp_iface` : interface name
+ * `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
+
+ rnode = tgen.routers()[dut]
+ show_ip_igmp_interface_json = run_frr_cmd(
+ rnode, "show ip igmp interface json", isjson=True
+ )
+
+ if igmp_iface in show_ip_igmp_interface_json:
+ igmp_intf_json = show_ip_igmp_interface_json[igmp_iface]
+ # Verifying igmp interface
+ if igmp_intf_json["address"] != interface_ip:
+ errormsg = (
+ "[DUT %s]: igmp interface ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, igmp_intf_json["address"], interface_ip)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: igmp interface: %s, " "interface ip: %s" " [PASSED]!!",
+ dut,
+ igmp_iface,
+ interface_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: igmp interface: %s "
+ "igmp interface ip: %s, is not present "
+ % (dut, igmp_iface, interface_ip)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+class McastTesterHelper(HostApplicationHelper):
+ def __init__(self, tgen=None):
+ self.script_path = os.path.join(CWD, "mcast-tester.py")
+ self.host_conn = {}
+ self.listen_sock = None
+
+ # # Get a temporary file for socket path
+ # (fd, sock_path) = tempfile.mkstemp("-mct.sock", "tmp" + str(os.getpid()))
+ # os.close(fd)
+ # os.remove(sock_path)
+ # self.app_sock_path = sock_path
+
+ # # Listen on unix socket
+ # logger.debug("%s: listening on socket %s", self, self.app_sock_path)
+ # self.listen_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ # self.listen_sock.settimeout(10)
+ # self.listen_sock.bind(self.app_sock_path)
+ # self.listen_sock.listen(10)
+
+ python3_path = get_exec_path(["python3", "python"])
+ super(McastTesterHelper, self).__init__(
+ tgen,
+ # [python3_path, self.script_path, self.app_sock_path]
+ [python3_path, self.script_path],
+ )
+
+ def __str__(self):
+ return "McastTesterHelper({})".format(self.script_path)
+
+ def run_join(self, host, join_addrs, join_towards=None, join_intf=None):
+ """
+ Join a UDP multicast group.
+
+ One of join_towards or join_intf MUST be set.
+
+ Parameters:
+ -----------
+ * `host`: host from where IGMP join would be sent
+ * `join_addrs`: multicast address (or addresses) to join to
+ * `join_intf`: the interface to bind the join[s] to
+ * `join_towards`: router whos interface to bind the join[s] to
+ """
+ if not isinstance(join_addrs, list) and not isinstance(join_addrs, tuple):
+ join_addrs = [join_addrs]
+
+ if join_towards:
+ join_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards]["interface"]
+ )
+ else:
+ assert join_intf
+
+ for join in join_addrs:
+ self.run(host, [join, join_intf])
+
+ return True
+
+ def run_traffic(self, host, send_to_addrs, bind_towards=None, bind_intf=None):
+ """
+ Send UDP multicast traffic.
+
+ One of bind_towards or bind_intf MUST be set.
+
+ Parameters:
+ -----------
+ * `host`: host to send traffic from
+ * `send_to_addrs`: multicast address (or addresses) to send traffic to
+ * `bind_towards`: Router who's interface the source ip address is got from
+ """
+ if bind_towards:
+ bind_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["interface"]
+ )
+ else:
+ assert bind_intf
+
+ if not isinstance(send_to_addrs, list) and not isinstance(send_to_addrs, tuple):
+ send_to_addrs = [send_to_addrs]
+
+ for send_to in send_to_addrs:
+ self.run(host, ["--send=0.7", send_to, bind_intf])
+
+ return True
+
+
+@retry(retry_timeout=62)
+def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify local IGMP groups are received from an intended interface
+ by running "show ip igmp join json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_local_igmp_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying local IGMP groups received:", dut)
+ show_ip_local_igmp_json = run_frr_cmd(rnode, "show ip igmp join json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_ip_local_igmp_json:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ for index in show_ip_local_igmp_json[interface]["groups"]:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying local IGMP group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
+ """
+ Verify ip pim interface traffic by running
+ "show ip pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and from which interfaces
+ traffic needs to be verified
+ * [optional]`addr_type`: specify address-family, default is ipv4
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "r1-r0-eth0": {
+ "helloRx": 0,
+ "helloTx": 1,
+ "joinRx": 0,
+ "joinTx": 0
+ }
+ }
+ }
+
+ result = verify_pim_interface_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ output_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
+
+ if addr_type == "ipv4":
+ cmd = "show ip pim interface traffic json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 pim interface traffic json"
+
+ show_pim_intf_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ output_dict[dut] = {}
+ for intf, data in input_dict[dut].items():
+ interface_json = show_pim_intf_traffic_json[intf]
+ for state in data:
+ # Verify Tx/Rx
+ if state in interface_json:
+ output_dict[dut][state] = interface_json[state]
+ else:
+ errormsg = (
+ "[DUT %s]: %s is not present"
+ "for interface %s [FAILED]!! " % (dut, state, intf)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if return_stats == False else output_dict
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_mld_groups(tgen, dut, interface, group_addresses, expected=True):
+ """
+ Verify IGMP groups are received from an intended interface
+ by running "show ip mld groups" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which MLD groups would be received
+ * `group_addresses`: MLD group address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "ffaa::1"
+ result = verify_mld_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying mld groups received:", dut)
+ show_mld_json = run_frr_cmd(rnode, "show ipv6 mld groups json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface in show_mld_json:
+ show_mld_json = show_mld_json[interface]["groups"]
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying MLD group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ found = False
+ for grp_addr in group_addresses:
+ for index in show_mld_json:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if found is not True:
+ errormsg = (
+ "[DUT %s]: Verifying MLD group received"
+ " from interface %s [FAILED]!! "
+ " Expected not found: %s" % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying MLD group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_mld_interface(tgen, dut, mld_iface, interface_ip, expected=True):
+ """
+ Verify all IGMP interface are up and running, config is verified
+ using "show ip mld interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `mld_iface` : interface name
+ * `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_mld_interface(tgen, topo, dut, mld_iface, interface_ip)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying MLD interface status:", dut)
+
+ rnode = tgen.routers()[dut]
+ show_mld_interface_json = run_frr_cmd(
+ rnode, "show ipv6 mld interface json", isjson=True
+ )
+
+ if mld_iface in show_mld_interface_json:
+ mld_intf_json = show_mld_interface_json[mld_iface]
+ # Verifying igmp interface
+ if mld_intf_json["address"] != interface_ip:
+ errormsg = (
+ "[DUT %s]: igmp interface ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, mld_intf_json["address"], interface_ip)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: igmp interface: %s, " "interface ip: %s" " [PASSED]!!",
+ dut,
+ mld_iface,
+ interface_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: igmp interface: %s "
+ "igmp interface ip: %s, is not present "
+ % (dut, mld_iface, interface_ip)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_mld_config(tgen, input_dict, stats_return=False, expected=True):
+ """
+ Verify mld interface details, verifying following configs:
+ timerQueryInterval
+ timerQueryResponseIntervalMsec
+ lastMemberQueryCount
+ timerLastMemberQueryMsec
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict` : Input dict data, required to verify
+ timer
+ * `stats_return`: If user wants API to return statistics
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "mld": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "mld": {
+ "query": {
+ "query-interval" : 200,
+ "query-max-response-time" : 100
+ },
+ "statistics": {
+ "queryV2" : 2,
+ "reportV2" : 1
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_mld_config(tgen, input_dict, stats_return)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+ for interface, data in input_dict[dut]["mld"]["interfaces"].items():
+ statistics = False
+ report = False
+ if "statistics" in input_dict[dut]["mld"]["interfaces"][interface]["mld"]:
+ statistics = True
+ cmd = "show ipv6 mld statistics"
+ else:
+ cmd = "show ipv6 mld"
+
+ logger.info("[DUT: %s]: Verifying MLD interface %s detail:", dut, interface)
+
+ if statistics:
+ if (
+ "report"
+ in input_dict[dut]["mld"]["interfaces"][interface]["mld"][
+ "statistics"
+ ]
+ ):
+ report = True
+
+ if statistics and report:
+ show_ipv6_mld_intf_json = run_frr_cmd(
+ rnode, "{} json".format(cmd), isjson=True
+ )
+ intf_detail_json = show_ipv6_mld_intf_json["global"]
+ else:
+ show_ipv6_mld_intf_json = run_frr_cmd(
+ rnode, "{} interface {} json".format(cmd, interface), isjson=True
+ )
+
+ show_ipv6_mld_intf_json = show_ipv6_mld_intf_json["default"]
+
+ if not report:
+ if interface not in show_ipv6_mld_intf_json:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " is not present in CLI output "
+ "[FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ else:
+ intf_detail_json = show_ipv6_mld_intf_json[interface]
+
+ if stats_return:
+ mld_stats = {}
+
+ if "statistics" in data["mld"]:
+ if stats_return:
+ mld_stats["statistics"] = {}
+ for query, value in data["mld"]["statistics"].items():
+ if query == "queryV1":
+ # Verifying IGMP interface queryV2 statistics
+ if stats_return:
+ mld_stats["statistics"][query] = intf_detail_json["queryV1"]
+
+ else:
+ if intf_detail_json["queryV1"] != value:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " queryV1 statistics verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["queryV1"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "queryV1 statistics is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if query == "reportV1":
+ # Verifying IGMP interface timerV2 statistics
+ if stats_return:
+ mld_stats["statistics"][query] = intf_detail_json[
+ "reportV1"
+ ]
+
+ else:
+ if intf_detail_json["reportV1"] <= value:
+ errormsg = (
+ "[DUT %s]: MLD reportV1 "
+ "statistics verification "
+ "[FAILED]!! Expected : %s "
+ "or more, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD reportV1 " "statistics is %s",
+ dut,
+ intf_detail_json["reportV1"],
+ )
+
+ if "query" in data["mld"]:
+ for query, value in data["mld"]["query"].items():
+ if query == "query-interval":
+ # Verifying IGMP interface query interval timer
+ if intf_detail_json["timerQueryIntervalMsec"] != value * 1000:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " query-interval verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["timerQueryIntervalMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s " "query-interval is %s",
+ dut,
+ interface,
+ value * 1000,
+ )
+
+ if query == "query-max-response-time":
+ # Verifying IGMP interface query max response timer
+ if (
+ intf_detail_json["timerQueryResponseTimerMsec"]
+ != value * 100
+ ):
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "query-max-response-time "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 100,
+ intf_detail_json["timerQueryResponseTimerMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "query-max-response-time is %s ms",
+ dut,
+ interface,
+ value * 100,
+ )
+
+ if query == "last-member-query-count":
+ # Verifying IGMP interface last member query count
+ if intf_detail_json["lastMemberQueryCount"] != value:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-count "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["lastMemberQueryCount"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-count is %s ms",
+ dut,
+ interface,
+ value * 1000,
+ )
+
+ if query == "last-member-query-interval":
+ # Verifying IGMP interface last member query interval
+ if (
+ intf_detail_json["timerLastMemberQueryIntervalMsec"]
+ != value * 100
+ ):
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-interval "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 100,
+ intf_detail_json[
+ "timerLastMemberQueryIntervalMsec"
+ ],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-interval is %s ms",
+ dut,
+ interface,
+ value * 100,
+ )
+
+ if "version" in data["mld"]:
+ # Verifying IGMP interface state is up
+ if intf_detail_json["state"] != "up":
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " state: %s verification "
+ "[FAILED]!!" % (dut, interface, intf_detail_json["state"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s " "state: %s",
+ dut,
+ interface,
+ intf_detail_json["state"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if stats_return == False else mld_stats
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_nexthop(tgen, topo, dut, nexthop, addr_type="ipv4"):
+ """
+ Verify all PIM nexthop details using "show ip/ipv6 pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : dut info
+ * `nexthop` : nexthop ip/ipv6 address
+
+ Usage
+ -----
+ result = verify_pim_nexthop(tgen, topo, dut, nexthop)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ rnode = tgen.routers()[dut]
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} pim nexthop".format(addr_type)
+ pim_nexthop = rnode.vtysh_cmd(cmd)
+
+ if nexthop in pim_nexthop:
+ logger.info("[DUT %s]: Expected nexthop: %s, Found", dut, nexthop)
+ return True
+ else:
+ errormsg = "[DUT %s]: Nexthop not found: %s" % (dut, nexthop)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_mroute_summary(
+ tgen, dut, sg_mroute=None, starg_mroute=None, total_mroute=None, addr_type="ipv4"
+):
+ """
+ Verify ip mroute summary has correct (*,g) (s,G) and total mroutes
+ by running "show ip mroutes summary json" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `sg_mroute`: Number of installed (s,g) mroute
+ * `starg_mroute`: Number installed of (*,g) mroute
+ * `Total_mroute`: Total number of installed mroutes
+ * 'addr_type : IPv4 or IPv6 address
+ * `return_json`: Whether to return raw json data
+
+ Usage
+ -----
+ dut = "r1"
+ sg_mroute = "4000"
+ starg_mroute= "2000"
+ total_mroute = "6000"
+ addr_type=IPv4 or IPv6
+ result = verify_mroute_summary(tgen, dut, sg_mroute=None, starg_mroute=None,
+ total_mroute= None)
+ Returns
+ -------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying mroute summary", dut)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} mroute summary json".format(ip_cmd)
+ show_mroute_summary_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if starg_mroute is not None:
+ if show_mroute_summary_json["wildcardGroup"]["installed"] != starg_mroute:
+ logger.error(
+ "Number of installed starg are: %s but expected: %s",
+ show_mroute_summary_json["wildcardGroup"]["installed"],
+ starg_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed starg routes are %s",
+ show_mroute_summary_json["wildcardGroup"]["installed"],
+ )
+
+ if sg_mroute is not None:
+ if show_mroute_summary_json["sourceGroup"]["installed"] != sg_mroute:
+ logger.error(
+ "Number of installed SG routes are: %s but expected: %s",
+ show_mroute_summary_json["sourceGroup"]["installed"],
+ sg_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed SG routes are %s",
+ show_mroute_summary_json["sourceGroup"]["installed"],
+ )
+
+ if total_mroute is not None:
+ if show_mroute_summary_json["totalNumOfInstalledMroutes"] != total_mroute:
+ logger.error(
+ "Total number of installed mroutes are: %s but expected: %s",
+ show_mroute_summary_json["totalNumOfInstalledMroutes"],
+ total_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed Total mroute are %s",
+ show_mroute_summary_json["totalNumOfInstalledMroutes"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_sg_traffic(tgen, dut, groups, src, addr_type="ipv4"):
+ """
+ Verify multicast traffic by running
+ "show ip mroute count json" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `groups`: igmp or mld groups where traffic needs to be verified
+
+ Usage
+ -----
+ result = verify_sg_traffic(tgen, "r1", igmp_groups/mld_groups, srcaddress)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying multicast " "SG traffic", dut)
+
+ if addr_type == "ipv4":
+ cmd = "show ip mroute count json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 mroute count json"
+ # import pdb; pdb.set_trace()
+ show_mroute_sg_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if bool(show_mroute_sg_traffic_json) is False:
+ errormsg = "[DUT %s]: Json output is empty" % (dut)
+ return errormsg
+
+ before_traffic = {}
+ after_traffic = {}
+
+ for grp in groups:
+ if grp not in show_mroute_sg_traffic_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src,
+ grp,
+ )
+ if src not in show_mroute_sg_traffic_json[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying source is not present in "
+ " %s [FAILED]!! " % (dut, src)
+ )
+ return errormsg
+
+ before_traffic[grp] = show_mroute_sg_traffic_json[grp][src]["packets"]
+
+ logger.info("Waiting for 10sec traffic to increament")
+ sleep(10)
+
+ show_mroute_sg_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ for grp in groups:
+ if grp not in show_mroute_sg_traffic_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src,
+ grp,
+ )
+ if src not in show_mroute_sg_traffic_json[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying source is not present in "
+ " %s [FAILED]!! " % (dut, src)
+ )
+ return errormsg
+
+ after_traffic[grp] = show_mroute_sg_traffic_json[grp][src]["packets"]
+
+ for grp in groups:
+ if after_traffic[grp] <= before_traffic[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying igmp group %s source %s not increamenting traffic"
+ " [FAILED]!! " % (dut, grp, src)
+ )
+ return errormsg
+ else:
+ logger.info(
+ "[DUT %s]:igmp group %s source %s receiving traffic"
+ " [PASSED]!! " % (dut, grp, src)
+ )
+ result = True
+
+ return result
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim6_config(tgen, input_dict, expected=True):
+ """
+ Verify pim interface details, verifying following configs:
+ drPriority
+ helloPeriod
+ helloReceived
+ helloSend
+ drAddress
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict` : Input dict data, required to verify
+ timer
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "mld": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "pim6": {
+ "drPriority" : 10,
+ "helloPeriod" : 5
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_pim6_config(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ for interface, data in input_dict[dut]["pim6"]["interfaces"].items():
+ logger.info(
+ "[DUT: %s]: Verifying PIM6 interface %s detail:", dut, interface
+ )
+
+ show_ipv6_pim_intf_json = run_frr_cmd(
+ rnode, "show ipv6 pim interface {} json".format(interface), isjson=True
+ )
+
+ if interface not in show_ipv6_pim_intf_json:
+ errormsg = (
+ "[DUT %s]: PIM6 interface: %s "
+ " is not present in CLI output "
+ "[FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ intf_detail_json = show_ipv6_pim_intf_json[interface]
+
+ for config, value in data.items():
+ if config == "helloPeriod":
+ # Verifying PIM interface helloPeriod
+ if intf_detail_json["helloPeriod"] != value:
+ errormsg = (
+ "[DUT %s]: PIM6 interface: %s "
+ " helloPeriod verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["helloPeriod"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM6 interface: %s " "helloPeriod is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if config == "drPriority":
+ # Verifying PIM interface drPriority
+ if intf_detail_json["drPriority"] != value:
+ errormsg = (
+ "[DUT %s]: PIM6 interface: %s "
+ " drPriority verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["drPriority"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM6 interface: %s " "drPriority is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if config == "drAddress":
+ # Verifying PIM interface drAddress
+ if intf_detail_json["drAddress"] != value:
+ errormsg = (
+ "[DUT %s]: PIM6 interface: %s "
+ " drAddress verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (dut, interface, value, intf_detail_json["drAddress"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: PIM6 interface: %s " "drAddress is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=62)
+def verify_local_mld_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify local MLD groups are received from an intended interface
+ by running "show ipv6 mld join json" command
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: MLD group address
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "ffaa::1"
+ result = verify_local_mld_groups(tgen, dut, interface, group_address)
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+ logger.info("[DUT: %s]: Verifying local MLD groups received:", dut)
+ show_ipv6_local_mld_json = run_frr_cmd(
+ rnode, "show ipv6 mld join json", isjson=True
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_ipv6_local_mld_json["default"]:
+ errormsg = (
+ "[DUT %s]: Verifying local MLD group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ if grp_addr in show_ipv6_local_mld_json["default"][interface]:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying local MLD group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying local MLD group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+ # def cleanup(self):
+ # super(McastTesterHelper, self).cleanup()
+
+ # if not self.listen_sock:
+ # return
+
+ # logger.debug("%s: closing listen socket %s", self, self.app_sock_path)
+ # self.listen_sock.close()
+ # self.listen_sock = None
+
+ # if os.path.exists(self.app_sock_path):
+ # os.remove(self.app_sock_path)
+
+ # def started_proc(self, host, p):
+ # logger.debug("%s: %s: accepting on socket %s", self, host, self.app_sock_path)
+ # try:
+ # conn = self.listen_sock.accept()
+ # return conn
+ # except Exception as error:
+ # logger.error("%s: %s: accept on socket failed: %s", self, host, error)
+ # if p.poll() is not None:
+ # logger.error("%s: %s: helper app quit: %s", self, host, comm_error(p))
+ # raise
+
+ # def stopping_proc(self, host, p, conn):
+ # logger.debug("%s: %s: closing socket %s", self, host, conn)
+ # conn[0].close()
diff --git a/tests/topotests/lib/scapy_sendpkt.py b/tests/topotests/lib/scapy_sendpkt.py
new file mode 100755
index 0000000..377146e
--- /dev/null
+++ b/tests/topotests/lib/scapy_sendpkt.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: MIT
+#
+# July 29 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN")
+#
+import argparse
+import logging
+import re
+import sys
+
+from scapy.all import conf, srp
+
+conf.verb = 0
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--interface", help="interface to send packet on.")
+ parser.add_argument("-I", "--imports", help="scapy symbols to import")
+ parser.add_argument(
+ "-t", "--timeout", type=float, default=2.0, help="timeout for reply receipts"
+ )
+ parser.add_argument("pktdef", help="scapy packet definition to send")
+ args = parser.parse_args()
+
+ if args.imports:
+ i = args.imports.replace("\n", "").strip()
+ if not re.match("[a-zA-Z0-9_ \t,]", i):
+ logging.critical('Invalid imports specified: "%s"', i)
+ sys.exit(1)
+ exec("from scapy.all import " + i, globals(), locals())
+
+ ans, unans = srp(eval(args.pktdef), iface=args.interface, timeout=args.timeout)
+ if not ans:
+ sys.exit(2)
+ for pkt in ans:
+ print(pkt.answer.show(dump=True))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/topotests/lib/send_bsr_packet.py b/tests/topotests/lib/send_bsr_packet.py
new file mode 100755
index 0000000..c11de64
--- /dev/null
+++ b/tests/topotests/lib/send_bsr_packet.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+
+import sys
+import argparse
+from scapy.all import Raw, sendp
+import binascii
+
+
+def send_packet(packet, iface, interval, count):
+ """
+ Read BSR packet in Raw format and send it to specified interface
+
+ Parameter:
+ ---------
+ * `packet` : BSR packet in raw format
+ * `interface` : Interface from which packet would be send
+ * `interval` : Interval between the packets
+ * `count` : Number of packets to be sent
+ """
+
+ data = binascii.a2b_hex(packet)
+ p = Raw(load=data)
+ p.show()
+ sendp(p, inter=int(interval), iface=iface, count=int(count))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Send BSR Raw packet")
+ parser.add_argument("packet", help="Packet in raw format")
+ parser.add_argument("iface", help="Packet send to this ineterface")
+ parser.add_argument("--interval", help="Interval between packets", default=0)
+ parser.add_argument(
+ "--count", help="Number of times packet is sent repetitively", default=0
+ )
+ args = parser.parse_args()
+
+ if not args.packet or not args.iface:
+ sys.exit(1)
+
+ send_packet(args.packet, args.iface, args.interval, args.count)
diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py
new file mode 100644
index 0000000..e7cd657
--- /dev/null
+++ b/tests/topotests/lib/snmptest.py
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: ISC
+#
+# topogen.py
+# Library of helper functions for NetDEF Topology Tests
+#
+# Copyright (c) 2020 by Volta Networks
+#
+#
+
+"""
+SNMP library to test snmp walks and gets
+
+Basic usage instructions:
+
+* define an SnmpTester class giving a router, address, community and version
+* use test_oid or test_walk to check values in MIBS
+* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example
+"""
+
+from lib.topolog import logger
+
+
+class SnmpTester(object):
+ "A helper class for testing SNMP"
+
+ def __init__(self, router, iface, community, version, options=""):
+ self.community = community
+ self.version = version
+ self.router = router
+ self.iface = iface
+ self.options = options
+ logger.info(
+ "created SNMP tester: SNMPv{0} community:{1}".format(
+ self.version, self.community
+ )
+ )
+
+ def _snmp_config(self):
+ """
+ Helper function to build a string with SNMP
+ configuration for commands.
+ """
+ return "-v {0} -c {1} {2} {3}".format(
+ self.version, self.community, self.options, self.iface
+ )
+
+ @staticmethod
+ def _get_snmp_value(snmp_output):
+ tokens = snmp_output.strip().split()
+
+ num_value_tokens = len(tokens) - 3
+
+ # this copes with the emptys string return
+ if num_value_tokens == 0:
+ return tokens[2]
+
+ if num_value_tokens > 1:
+ output = ""
+ index = 3
+ while index < len(tokens) - 1:
+ output += "{} ".format(tokens[index])
+ index += 1
+ output += "{}".format(tokens[index])
+ return output
+ # third token is the value of the object
+ return tokens[3]
+
+ @staticmethod
+ def _get_snmp_oid(snmp_output):
+ tokens = snmp_output.strip().split()
+
+ # third token onwards is the value of the object
+ return tokens[0].split(".", 1)[1]
+
+ @staticmethod
+ def _get_snmp_oid(snmp_output):
+ tokens = snmp_output.strip().split()
+
+ # if len(tokens) > 5:
+ # return None
+
+ # third token is the value of the object
+ return tokens[0].split(".", 1)[1]
+
+ def _parse_multiline(self, snmp_output):
+ results = snmp_output.strip().split("\n")
+
+ out_dict = {}
+ out_list = []
+ for response in results:
+ out_dict[self._get_snmp_oid(response)] = self._get_snmp_value(response)
+ out_list.append(self._get_snmp_value(response))
+
+ return out_dict, out_list
+
+ def get(self, oid):
+ cmd = "snmpget {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ if "not found" in result:
+ return None
+ return self._get_snmp_value(result)
+
+ def get_next(self, oid):
+ cmd = "snmpgetnext {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ print("get_next: {}".format(result))
+ if "not found" in result:
+ return None
+ return self._get_snmp_value(result)
+
+ def walk(self, oid):
+ cmd = "snmpwalk {0} {1}".format(self._snmp_config(), oid)
+
+ result = self.router.cmd(cmd)
+ return self._parse_multiline(result)
+
+ def test_oid(self, oid, value):
+ print("oid: {}".format(self.get_next(oid)))
+ return self.get_next(oid) == value
+
+ def test_oid_walk(self, oid, values, oids=None):
+ results_dict, results_list = self.walk(oid)
+ print("test_oid_walk: {} {}".format(oid, results_dict))
+ if oids is not None:
+ index = 0
+ for oid in oids:
+ # avoid key error for missing keys
+ if not oid in results_dict.keys():
+ print("FAIL: missing oid key {}".format(oid))
+ return False
+ if results_dict[oid] != values[index]:
+ print(
+ "FAIL{} {} |{}| == |{}|".format(
+ oid, index, results_dict[oid], values[index]
+ )
+ )
+ return False
+ index += 1
+ return True
+
+ # Return true if 'values' is a subset of 'results_list'
+ print("test {} == {}".format(results_list[: len(values)], values))
+ return results_list[: len(values)] == values
diff --git a/tests/topotests/lib/test/__init__.py b/tests/topotests/lib/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/topotests/lib/test/__init__.py
diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py
new file mode 100755
index 0000000..230c2bd
--- /dev/null
+++ b/tests/topotests/lib/test/test_json.py
@@ -0,0 +1,627 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_json.py
+# Tests for library function: json_cmp().
+#
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+Tests for the json_cmp() function.
+"""
+
+import os
+import sys
+import pytest
+
+# Save the Current Working Directory to find lib files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+from lib.topotest import json_cmp
+
+
+def test_json_intersect_true():
+ "Test simple correct JSON intersections"
+
+ dcomplete = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": "item3",
+ "i100": "item4",
+ }
+
+ dsub1 = {
+ "i1": "item1",
+ "i3": "item3",
+ }
+ dsub2 = {
+ "i1": "item1",
+ "i2": "item2",
+ }
+ dsub3 = {
+ "i100": "item4",
+ "i2": "item2",
+ }
+ dsub4 = {
+ "i50": None,
+ "i100": "item4",
+ }
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+ assert json_cmp(dcomplete, dsub4) is None
+
+
+def test_json_intersect_false():
+ "Test simple incorrect JSON intersections"
+
+ dcomplete = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": "item3",
+ "i100": "item4",
+ }
+
+ # Incorrect value for 'i1'
+ dsub1 = {
+ "i1": "item3",
+ "i3": "item3",
+ }
+ # Non-existing key 'i5'
+ dsub2 = {
+ "i1": "item1",
+ "i5": "item2",
+ }
+ # Key should not exist
+ dsub3 = {
+ "i100": None,
+ }
+
+ assert json_cmp(dcomplete, dsub1) is not None
+ assert json_cmp(dcomplete, dsub2) is not None
+ assert json_cmp(dcomplete, dsub3) is not None
+
+
+def test_json_intersect_multilevel_true():
+ "Test multi level correct JSON intersections"
+
+ dcomplete = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {
+ "i100": "item100",
+ },
+ "i4": {
+ "i41": {
+ "i411": "item411",
+ },
+ "i42": {
+ "i421": "item421",
+ "i422": "item422",
+ },
+ },
+ }
+
+ dsub1 = {
+ "i1": "item1",
+ "i3": {
+ "i100": "item100",
+ },
+ "i10": None,
+ }
+ dsub2 = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {},
+ }
+ dsub3 = {
+ "i2": "item2",
+ "i4": {
+ "i41": {
+ "i411": "item411",
+ },
+ "i42": {
+ "i422": "item422",
+ "i450": None,
+ },
+ },
+ }
+ dsub4 = {
+ "i2": "item2",
+ "i4": {
+ "i41": {},
+ "i42": {
+ "i450": None,
+ },
+ },
+ }
+ dsub5 = {
+ "i2": "item2",
+ "i3": {
+ "i100": "item100",
+ },
+ "i4": {
+ "i42": {
+ "i450": None,
+ }
+ },
+ }
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+ assert json_cmp(dcomplete, dsub4) is None
+ assert json_cmp(dcomplete, dsub5) is None
+
+
+def test_json_intersect_multilevel_false():
+ "Test multi level incorrect JSON intersections"
+
+ dcomplete = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {
+ "i100": "item100",
+ },
+ "i4": {
+ "i41": {
+ "i411": "item411",
+ },
+ "i42": {
+ "i421": "item421",
+ "i422": "item422",
+ },
+ },
+ }
+
+ # Incorrect sub-level value
+ dsub1 = {
+ "i1": "item1",
+ "i3": {
+ "i100": "item00",
+ },
+ "i10": None,
+ }
+ # Inexistent sub-level
+ dsub2 = {
+ "i1": "item1",
+ "i2": "item2",
+ "i3": None,
+ }
+ # Inexistent sub-level value
+ dsub3 = {
+ "i1": "item1",
+ "i3": {
+ "i100": None,
+ },
+ }
+ # Inexistent sub-sub-level value
+ dsub4 = {
+ "i4": {
+ "i41": {
+ "i412": "item412",
+ },
+ "i42": {
+ "i421": "item421",
+ },
+ }
+ }
+ # Invalid sub-sub-level value
+ dsub5 = {
+ "i4": {
+ "i41": {
+ "i411": "item411",
+ },
+ "i42": {
+ "i421": "item420000",
+ },
+ }
+ }
+ # sub-sub-level should be value
+ dsub6 = {
+ "i4": {
+ "i41": {
+ "i411": "item411",
+ },
+ "i42": "foobar",
+ }
+ }
+
+ assert json_cmp(dcomplete, dsub1) is not None
+ assert json_cmp(dcomplete, dsub2) is not None
+ assert json_cmp(dcomplete, dsub3) is not None
+ assert json_cmp(dcomplete, dsub4) is not None
+ assert json_cmp(dcomplete, dsub5) is not None
+ assert json_cmp(dcomplete, dsub6) is not None
+
+
+def test_json_with_list_sucess():
+ "Test successful json comparisons that have lists."
+
+ dcomplete = {
+ "list": [
+ {
+ "i1": "item 1",
+ "i2": "item 2",
+ },
+ {
+ "i10": "item 10",
+ },
+ ],
+ "i100": "item 100",
+ }
+
+ # Test list type
+ dsub1 = {
+ "list": [],
+ }
+ # Test list correct list items
+ dsub2 = {
+ "list": [
+ {
+ "i1": "item 1",
+ },
+ ],
+ "i100": "item 100",
+ }
+ # Test list correct list size
+ dsub3 = {
+ "list": [
+ {},
+ {},
+ ],
+ }
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+
+
+def test_json_with_list_failure():
+ "Test failed json comparisons that have lists."
+
+ dcomplete = {
+ "list": [
+ {
+ "i1": "item 1",
+ "i2": "item 2",
+ },
+ {
+ "i10": "item 10",
+ },
+ ],
+ "i100": "item 100",
+ }
+
+ # Test list type
+ dsub1 = {
+ "list": {},
+ }
+ # Test list incorrect list items
+ dsub2 = {
+ "list": [
+ {
+ "i1": "item 2",
+ },
+ ],
+ "i100": "item 100",
+ }
+ # Test list correct list size
+ dsub3 = {
+ "list": [
+ {},
+ {},
+ {},
+ ],
+ }
+
+ assert json_cmp(dcomplete, dsub1) is not None
+ assert json_cmp(dcomplete, dsub2) is not None
+ assert json_cmp(dcomplete, dsub3) is not None
+
+
+def test_json_list_start_success():
+ "Test JSON encoded data that starts with a list that should succeed."
+
+ dcomplete = [
+ {
+ "id": 100,
+ "value": "abc",
+ },
+ {
+ "id": 200,
+ "value": "abcd",
+ },
+ {
+ "id": 300,
+ "value": "abcde",
+ },
+ ]
+
+ dsub1 = [
+ {
+ "id": 100,
+ "value": "abc",
+ }
+ ]
+
+ dsub2 = [
+ {
+ "id": 100,
+ "value": "abc",
+ },
+ {
+ "id": 200,
+ "value": "abcd",
+ },
+ ]
+
+ dsub3 = [
+ {
+ "id": 300,
+ "value": "abcde",
+ }
+ ]
+
+ dsub4 = []
+
+ dsub5 = [
+ {
+ "id": 100,
+ }
+ ]
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+ assert json_cmp(dcomplete, dsub4) is None
+ assert json_cmp(dcomplete, dsub5) is None
+
+
+def test_json_list_start_failure():
+ "Test JSON encoded data that starts with a list that should fail."
+
+ dcomplete = [
+ {"id": 100, "value": "abc"},
+ {"id": 200, "value": "abcd"},
+ {"id": 300, "value": "abcde"},
+ ]
+
+ dsub1 = [
+ {
+ "id": 100,
+ "value": "abcd",
+ }
+ ]
+
+ dsub2 = [
+ {
+ "id": 100,
+ "value": "abc",
+ },
+ {
+ "id": 200,
+ "value": "abc",
+ },
+ ]
+
+ dsub3 = [
+ {
+ "id": 100,
+ "value": "abc",
+ },
+ {
+ "id": 350,
+ "value": "abcde",
+ },
+ ]
+
+ dsub4 = [
+ {
+ "value": "abcx",
+ },
+ {
+ "id": 300,
+ "value": "abcde",
+ },
+ ]
+
+ assert json_cmp(dcomplete, dsub1) is not None
+ assert json_cmp(dcomplete, dsub2) is not None
+ assert json_cmp(dcomplete, dsub3) is not None
+ assert json_cmp(dcomplete, dsub4) is not None
+
+
+def test_json_list_ordered():
+ "Test JSON encoded data that should be ordered using the '__ordered__' tag."
+
+ dcomplete = [
+ {"id": 1, "value": "abc"},
+ "some string",
+ 123,
+ ]
+
+ dsub1 = [
+ "__ordered__",
+ "some string",
+ {"id": 1, "value": "abc"},
+ 123,
+ ]
+
+ assert json_cmp(dcomplete, dsub1) is not None
+
+
+def test_json_list_exact_matching():
+ "Test JSON array on exact matching using the 'exact' parameter."
+
+ dcomplete = [
+ {"id": 1, "value": "abc"},
+ "some string",
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub1 = [
+ "some string",
+ {"id": 1, "value": "abc"},
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub2 = [
+ {"id": 1},
+ "some string",
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub3 = [
+ {"id": 1, "value": "abc"},
+ "some string",
+ 123,
+ [1, 3, 2],
+ ]
+
+ assert json_cmp(dcomplete, dsub1, exact=True) is not None
+ assert json_cmp(dcomplete, dsub2, exact=True) is not None
+
+
+def test_json_object_exact_matching():
+ "Test JSON object on exact matching using the 'exact' parameter."
+
+ dcomplete = {
+ "a": {"id": 1, "value": "abc"},
+ "b": "some string",
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub1 = {
+ "a": {"id": 1, "value": "abc"},
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub2 = {
+ "a": {"id": 1},
+ "b": "some string",
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub3 = {
+ "a": {"id": 1, "value": "abc"},
+ "b": "some string",
+ "c": 123,
+ "d": [1, 3],
+ }
+
+ assert json_cmp(dcomplete, dsub1, exact=True) is not None
+ assert json_cmp(dcomplete, dsub2, exact=True) is not None
+ assert json_cmp(dcomplete, dsub3, exact=True) is not None
+
+
+def test_json_list_asterisk_matching():
+ "Test JSON array elements on matching '*' as a placeholder for arbitrary data."
+
+ dcomplete = [
+ {"id": 1, "value": "abc"},
+ "some string",
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub1 = [
+ "*",
+ "some string",
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub2 = [
+ {"id": "*", "value": "abc"},
+ "some string",
+ 123,
+ [1, 2, 3],
+ ]
+
+ dsub3 = [
+ {"id": 1, "value": "abc"},
+ "some string",
+ 123,
+ [1, "*", 3],
+ ]
+
+ dsub4 = [
+ "*",
+ "some string",
+ "*",
+ [1, 2, 3],
+ ]
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+ assert json_cmp(dcomplete, dsub4) is None
+
+
+def test_json_object_asterisk_matching():
+ "Test JSON object value elements on matching '*' as a placeholder for arbitrary data."
+
+ dcomplete = {
+ "a": {"id": 1, "value": "abc"},
+ "b": "some string",
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub1 = {
+ "a": "*",
+ "b": "some string",
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub2 = {
+ "a": {"id": 1, "value": "abc"},
+ "b": "some string",
+ "c": 123,
+ "d": [1, "*", 3],
+ }
+
+ dsub3 = {
+ "a": {"id": "*", "value": "abc"},
+ "b": "some string",
+ "c": 123,
+ "d": [1, 2, 3],
+ }
+
+ dsub4 = {
+ "a": "*",
+ "b": "some string",
+ "c": "*",
+ "d": [1, 2, 3],
+ }
+
+ assert json_cmp(dcomplete, dsub1) is None
+ assert json_cmp(dcomplete, dsub2) is None
+ assert json_cmp(dcomplete, dsub3) is None
+ assert json_cmp(dcomplete, dsub4) is None
+
+
+def test_json_list_nested_with_objects():
+ dcomplete = [{"key": 1, "list": [123]}, {"key": 2, "list": [123]}]
+
+ dsub1 = [{"key": 2, "list": [123]}, {"key": 1, "list": [123]}]
+
+ assert json_cmp(dcomplete, dsub1) is None
+
+
+if __name__ == "__main__":
+ sys.exit(pytest.main())
diff --git a/tests/topotests/lib/test/test_run_and_expect.py b/tests/topotests/lib/test/test_run_and_expect.py
new file mode 100755
index 0000000..5e171b2
--- /dev/null
+++ b/tests/topotests/lib/test/test_run_and_expect.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_run_and_expect.py
+# Tests for library function: run_and_expect(_type)().
+#
+# Copyright (c) 2019 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+Tests for the `run_and_expect(_type)()` functions.
+"""
+
+import os
+import sys
+import pytest
+
+# Save the Current Working Directory to find lib files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+from lib.topotest import run_and_expect_type
+
+
+def test_run_and_expect_type():
+ "Test basic `run_and_expect_type` functionality."
+
+ def return_true():
+ "Test function that returns `True`."
+ return True
+
+ # Test value success.
+ success, value = run_and_expect_type(
+ return_true, bool, count=1, wait=0, avalue=True
+ )
+ assert success is True
+ assert value is True
+
+ # Test value failure.
+ success, value = run_and_expect_type(
+ return_true, bool, count=1, wait=0, avalue=False
+ )
+ assert success is False
+ assert value is True
+
+ # Test type success.
+ success, value = run_and_expect_type(return_true, bool, count=1, wait=0)
+ assert success is True
+ assert value is True
+
+ # Test type failure.
+ success, value = run_and_expect_type(return_true, str, count=1, wait=0)
+ assert success is False
+ assert value is True
+
+ # Test type failure, return correct type.
+ success, value = run_and_expect_type(return_true, str, count=1, wait=0, avalue=True)
+ assert success is False
+ assert value is True
+
+
+if __name__ == "__main__":
+ sys.exit(pytest.main())
diff --git a/tests/topotests/lib/test/test_version.py b/tests/topotests/lib/test/test_version.py
new file mode 100755
index 0000000..7b18daa
--- /dev/null
+++ b/tests/topotests/lib/test/test_version.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_version.py
+# Tests for library function: version_cmp().
+#
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+Tests for the version_cmp() function.
+"""
+
+import os
+import sys
+import pytest
+
+# Save the Current Working Directory to find lib files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+from lib.topotest import version_cmp
+
+
+def test_valid_versions():
+ "Test valid version compare results"
+
+ curver = "3.0"
+ samever = "3"
+ oldver = "2.0"
+ newver = "3.0.1"
+ newerver = "3.0.11"
+ vercustom = "3.0-dev"
+ verysmallinc = "3.0.0.0.0.0.0.1"
+
+ assert version_cmp(curver, oldver) == 1
+ assert version_cmp(curver, newver) == -1
+ assert version_cmp(curver, curver) == 0
+ assert version_cmp(curver, newerver) == -1
+ assert version_cmp(newver, newerver) == -1
+ assert version_cmp(curver, samever) == 0
+ assert version_cmp(curver, vercustom) == 0
+ assert version_cmp(vercustom, vercustom) == 0
+ assert version_cmp(vercustom, oldver) == 1
+ assert version_cmp(vercustom, newver) == -1
+ assert version_cmp(vercustom, samever) == 0
+ assert version_cmp(curver, verysmallinc) == -1
+ assert version_cmp(newver, verysmallinc) == 1
+ assert version_cmp(verysmallinc, verysmallinc) == 0
+ assert version_cmp(vercustom, verysmallinc) == -1
+
+
+def test_invalid_versions():
+ "Test invalid version strings"
+
+ curver = "3.0"
+ badver1 = ".1"
+ badver2 = "-1.0"
+ badver3 = "."
+ badver4 = "3.-0.3"
+
+ with pytest.raises(ValueError):
+ assert version_cmp(curver, badver1)
+ assert version_cmp(curver, badver2)
+ assert version_cmp(curver, badver3)
+ assert version_cmp(curver, badver4)
+
+
+def test_regression_1():
+ """
+ Test regression on the following type of comparison: '3.0.2' > '3'
+ Expected result is 1.
+ """
+ assert version_cmp("3.0.2", "3") == 1
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
new file mode 100644
index 0000000..4d935b9
--- /dev/null
+++ b/tests/topotests/lib/topogen.py
@@ -0,0 +1,1390 @@
+# SPDX-License-Identifier: ISC
+#
+# topogen.py
+# Library of helper functions for NetDEF Topology Tests
+#
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+Topogen (Topology Generator) is an abstraction around Topotest and Mininet to
+help reduce boilerplate code and provide a stable interface to build topology
+tests on.
+
+Basic usage instructions:
+
+* Define a Topology class with a build method using mininet.topo.Topo.
+ See examples/test_template.py.
+* Use Topogen inside the build() method with get_topogen.
+ e.g. get_topogen(self).
+* Start up your topology with: Topogen(YourTopology)
+* Initialize the Mininet with your topology with: tgen.start_topology()
+* Configure your routers/hosts and start them
+* Run your tests / mininet cli.
+* After running stop Mininet with: tgen.stop_topology()
+"""
+
+import configparser
+import grp
+import inspect
+import json
+import logging
+import os
+import platform
+import pwd
+import re
+import shlex
+import subprocess
+import sys
+from collections import OrderedDict
+
+import lib.topolog as topolog
+from lib.micronet import Commander
+from lib.micronet_compat import Mininet
+from lib.topolog import logger
+from munet.testing.util import pause_test
+
+from lib import topotest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+# pylint: disable=C0103
+# Global Topogen variable. This is being used to keep the Topogen available on
+# all test functions without declaring a test local variable.
+global_tgen = None
+
+
+def get_topogen(topo=None):
+ """
+ Helper function to retrieve Topogen. Must be called with `topo` when called
+ inside the build() method of Topology class.
+ """
+ if topo is not None:
+ global_tgen.topo = topo
+ return global_tgen
+
+
+def set_topogen(tgen):
+ "Helper function to set Topogen"
+ # pylint: disable=W0603
+ global global_tgen
+ global_tgen = tgen
+
+
+def is_string(value):
+ """Return True if value is a string."""
+ try:
+ return isinstance(value, basestring) # type: ignore
+ except NameError:
+ return isinstance(value, str)
+
+
+def get_exabgp_cmd(commander=None):
+ """Return the command to use for ExaBGP version < 4."""
+
+ if commander is None:
+ commander = Commander("exabgp", logger=logging.getLogger("exabgp"))
+
+ def exacmd_version_ok(exacmd):
+ logger.debug("checking %s for exabgp < version 4", exacmd)
+ _, stdout, _ = commander.cmd_status(exacmd + " -v", warn=False)
+ m = re.search(r"ExaBGP\s*:\s*((\d+)\.(\d+)(?:\.(\d+))?)", stdout)
+ if not m:
+ return False
+ version = m.group(1)
+ if topotest.version_cmp(version, "4") >= 0:
+ logging.debug("found exabgp version >= 4 in %s will keep looking", exacmd)
+ return False
+ logger.info("Using ExaBGP version %s in %s", version, exacmd)
+ return True
+
+ exacmd = commander.get_exec_path("exabgp")
+ if exacmd and exacmd_version_ok(exacmd):
+ return exacmd
+ py2_path = commander.get_exec_path("python2")
+ if py2_path:
+ exacmd = py2_path + " -m exabgp"
+ if exacmd_version_ok(exacmd):
+ return exacmd
+ py2_path = commander.get_exec_path("python")
+ if py2_path:
+ exacmd = py2_path + " -m exabgp"
+ if exacmd_version_ok(exacmd):
+ return exacmd
+ return None
+
+
+#
+# Main class: topology builder
+#
+
+# Topogen configuration defaults
+tgen_defaults = {
+ "verbosity": "info",
+ "frrdir": "/usr/lib/frr",
+ "routertype": "frr",
+ "memleak_path": "",
+}
+
+
+class Topogen(object):
+ "A topology test builder helper."
+
+ CONFIG_SECTION = "topogen"
+
+ def __init__(self, topodef, modname="unnamed"):
+ """
+ Topogen initialization function, takes the following arguments:
+ * `cls`: OLD:uthe topology class that is child of mininet.topo or a build function.
+ * `topodef`: A dictionary defining the topology, a filename of a json file, or a
+ function that will do the same
+ * `modname`: module name must be a unique name to identify logs later.
+ """
+ self.config = None
+ self.net = None
+ self.gears = {}
+ self.routern = 1
+ self.switchn = 1
+ self.modname = modname
+ self.errorsd = {}
+ self.errors = ""
+ self.peern = 1
+ self.cfg_gen = 0
+ self.exabgp_cmd = None
+ self._init_topo(topodef)
+
+ logger.info("loading topology: {}".format(self.modname))
+
+ # @staticmethod
+ # def _mininet_reset():
+ # "Reset the mininet environment"
+ # # Clean up the mininet environment
+ # os.system("mn -c > /dev/null 2>&1")
+
+ def __str__(self):
+ return "Topogen()"
+
+ def _init_topo(self, topodef):
+ """
+ Initialize the topogily provided by the user. The user topology class
+ must call get_topogen() during build() to get the topogen object.
+ """
+ # Set the global variable so the test cases can access it anywhere
+ set_topogen(self)
+
+ # Increase host based limits
+ topotest.fix_host_limits()
+
+ # Test for MPLS Kernel modules available
+ self.hasmpls = False
+ if not topotest.module_present("mpls-router"):
+ logger.info("MPLS tests will not run (missing mpls-router kernel module)")
+ elif not topotest.module_present("mpls-iptunnel"):
+ logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)")
+ else:
+ self.hasmpls = True
+
+ # Load the default topology configurations
+ self._load_config()
+
+ # Create new log directory
+ self.logdir = topotest.get_logs_path(topotest.g_pytest_config.option.rundir)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True
+ )
+ try:
+ routertype = self.config.get(self.CONFIG_SECTION, "routertype")
+ # Only allow group, if it exist.
+ gid = grp.getgrnam(routertype)[2]
+ os.chown(self.logdir, 0, gid)
+ os.chmod(self.logdir, 0o775)
+ except KeyError:
+ # Allow anyone, but set the sticky bit to avoid file deletions
+ os.chmod(self.logdir, 0o1777)
+
+ # Remove old twisty way of creating sub-classed topology object which has it's
+ # build method invoked which calls Topogen methods which then call Topo methods
+ # to create a topology within the Topo object, which is then used by
+ # Mininet(Micronet) to build the actual topology.
+ assert not inspect.isclass(topodef)
+
+ self.net = Mininet(
+ rundir=self.logdir,
+ pytestconfig=topotest.g_pytest_config,
+ logger=topolog.get_logger("mu", log_level="debug"),
+ )
+
+ # Adjust the parent namespace
+ topotest.fix_netns_limits(self.net)
+
+ # New direct way: Either a dictionary defines the topology or a build function
+ # is supplied, or a json filename all of which build the topology by calling
+ # Topogen methods which call Mininet(Micronet) methods to create the actual
+ # topology.
+ if not inspect.isclass(topodef):
+ if callable(topodef):
+ topodef(self)
+ self.net.configure_hosts()
+ elif is_string(topodef):
+ # topojson imports topogen in one function too,
+ # switch away from this use here to the topojson
+ # fixutre and remove this case
+ from lib.topojson import build_topo_from_json
+
+ with open(topodef, "r") as topof:
+ self.json_topo = json.load(topof)
+ build_topo_from_json(self, self.json_topo)
+ self.net.configure_hosts()
+ elif topodef:
+ self.add_topology_from_dict(topodef)
+
+ def add_topology_from_dict(self, topodef):
+ keylist = (
+ topodef.keys()
+ if isinstance(topodef, OrderedDict)
+ else sorted(topodef.keys())
+ )
+ # ---------------------------
+ # Create all referenced hosts
+ # ---------------------------
+ for oname in keylist:
+ tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
+ for e in tup:
+ desc = e.split(":")
+ name = desc[0]
+ if name not in self.gears:
+ logging.debug("Adding router: %s", name)
+ self.add_router(name)
+
+ # ------------------------------
+ # Create all referenced switches
+ # ------------------------------
+ for oname in keylist:
+ if oname is not None and oname not in self.gears:
+ logging.debug("Adding switch: %s", oname)
+ self.add_switch(oname)
+
+ # ----------------
+ # Create all links
+ # ----------------
+ for oname in keylist:
+ if oname is None:
+ continue
+ tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
+ for e in tup:
+ desc = e.split(":")
+ name = desc[0]
+ ifname = desc[1] if len(desc) > 1 else None
+ sifname = desc[2] if len(desc) > 2 else None
+ self.add_link(self.gears[oname], self.gears[name], sifname, ifname)
+
+ self.net.configure_hosts()
+
+ def _load_config(self):
+ """
+ Loads the configuration file `pytest.ini` located at the root dir of
+ topotests.
+ """
+ self.config = configparser.ConfigParser(tgen_defaults)
+ pytestini_path = os.path.join(CWD, "../pytest.ini")
+ self.config.read(pytestini_path)
+
+ def add_router(self, name=None, cls=None, **params):
+ """
+ Adds a new router to the topology. This function has the following
+ options:
+ * `name`: (optional) select the router name
+ * `daemondir`: (optional) custom daemon binary directory
+ * `routertype`: (optional) `frr`
+ Returns a TopoRouter.
+ """
+ if cls is None:
+ cls = topotest.Router
+ if name is None:
+ name = "r{}".format(self.routern)
+ if name in self.gears:
+ raise KeyError("router already exists")
+
+ params["frrdir"] = self.config.get(self.CONFIG_SECTION, "frrdir")
+ params["memleak_path"] = self.config.get(self.CONFIG_SECTION, "memleak_path")
+ if "routertype" not in params:
+ params["routertype"] = self.config.get(self.CONFIG_SECTION, "routertype")
+
+ self.gears[name] = TopoRouter(self, cls, name, **params)
+ self.routern += 1
+ return self.gears[name]
+
+ def add_switch(self, name=None):
+ """
+ Adds a new switch to the topology. This function has the following
+ options:
+ name: (optional) select the switch name
+ Returns the switch name and number.
+ """
+ if name is None:
+ name = "s{}".format(self.switchn)
+ if name in self.gears:
+ raise KeyError("switch already exists")
+
+ self.gears[name] = TopoSwitch(self, name)
+ self.switchn += 1
+ return self.gears[name]
+
+ def add_exabgp_peer(self, name, ip, defaultRoute):
+ """
+ Adds a new ExaBGP peer to the topology. This function has the following
+ parameters:
+ * `ip`: the peer address (e.g. '1.2.3.4/24')
+ * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1')
+ """
+ if name is None:
+ name = "peer{}".format(self.peern)
+ if name in self.gears:
+ raise KeyError("exabgp peer already exists")
+
+ self.gears[name] = TopoExaBGP(self, name, ip=ip, defaultRoute=defaultRoute)
+ self.peern += 1
+ return self.gears[name]
+
+ def add_host(self, name, ip, defaultRoute):
+ """
+ Adds a new host to the topology. This function has the following
+ parameters:
+ * `ip`: the peer address (e.g. '1.2.3.4/24')
+ * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1')
+ """
+ if name is None:
+ name = "host{}".format(self.peern)
+ if name in self.gears:
+ raise KeyError("host already exists")
+
+ self.gears[name] = TopoHost(self, name, ip=ip, defaultRoute=defaultRoute)
+ self.peern += 1
+ return self.gears[name]
+
+ def add_bmp_server(self, name, ip, defaultRoute, port=1789):
+ """Add the bmp collector gear"""
+ if name in self.gears:
+ raise KeyError("The bmp server already exists")
+
+ self.gears[name] = TopoBMPCollector(
+ self, name, ip=ip, defaultRoute=defaultRoute, port=port
+ )
+
+ def add_link(self, node1, node2, ifname1=None, ifname2=None):
+ """
+ Creates a connection between node1 and node2. The nodes can be the
+ following:
+ * TopoGear
+ * TopoRouter
+ * TopoSwitch
+ """
+ if not isinstance(node1, TopoGear):
+ raise ValueError("invalid node1 type")
+ if not isinstance(node2, TopoGear):
+ raise ValueError("invalid node2 type")
+
+ if ifname1 is None:
+ ifname1 = node1.new_link()
+ if ifname2 is None:
+ ifname2 = node2.new_link()
+
+ node1.register_link(ifname1, node2, ifname2)
+ node2.register_link(ifname2, node1, ifname1)
+ self.net.add_link(node1.name, node2.name, ifname1, ifname2)
+
+ def get_gears(self, geartype):
+ """
+ Returns a dictionary of all gears of type `geartype`.
+
+ Normal usage:
+ * Dictionary iteration:
+ ```py
+ tgen = get_topogen()
+ router_dict = tgen.get_gears(TopoRouter)
+ for router_name, router in router_dict.items():
+ # Do stuff
+ ```
+ * List iteration:
+ ```py
+ tgen = get_topogen()
+ peer_list = tgen.get_gears(TopoExaBGP).values()
+ for peer in peer_list:
+ # Do stuff
+ ```
+ """
+ return dict(
+ (name, gear)
+ for name, gear in self.gears.items()
+ if isinstance(gear, geartype)
+ )
+
+ def routers(self):
+ """
+ Returns the router dictionary (key is the router name and value is the
+ router object itself).
+ """
+ return self.get_gears(TopoRouter)
+
+ def exabgp_peers(self):
+ """
+ Returns the exabgp peer dictionary (key is the peer name and value is
+ the peer object itself).
+ """
+ return self.get_gears(TopoExaBGP)
+
+ def get_bmp_servers(self):
+ """
+ Retruns the bmp servers dictionnary (the key is the bmp server the
+ value is the bmp server object itself).
+ """
+ return self.get_gears(TopoBMPCollector)
+
+ def start_topology(self):
+ """Starts the topology class."""
+ logger.info("starting topology: {}".format(self.modname))
+ self.net.start()
+
+ def start_router(self, router=None):
+ """
+ Call the router startRouter method.
+ If no router is specified it is called for all registered routers.
+ """
+ if router is None:
+ # pylint: disable=r1704
+ # XXX should be hosts?
+ for _, router in self.routers().items():
+ router.start()
+ else:
+ if isinstance(router, str):
+ router = self.gears[router]
+
+ router.start()
+
+ def stop_topology(self):
+ """
+ Stops the network topology. This function will call the stop() function
+ of all gears before calling the mininet stop function, so they can have
+ their oportunity to do a graceful shutdown. stop() is called twice. The
+ first is a simple kill with no sleep, the second will sleep if not
+ killed and try with a different signal.
+ """
+ pause = bool(self.net.cfgopt.get_option("--pause-at-end"))
+ pause = pause or bool(self.net.cfgopt.get_option("--pause"))
+ if pause:
+ try:
+ pause_test("Before MUNET delete")
+ except KeyboardInterrupt:
+ print("^C...continuing")
+ except Exception as error:
+ self.logger.error("\n...continuing after error: %s", error)
+
+ logger.info("stopping topology: {}".format(self.modname))
+
+ errors = ""
+ for gear in self.gears.values():
+ errors += gear.stop()
+ if len(errors) > 0:
+ logger.error(
+ "Errors found post shutdown - details follow: {}".format(errors)
+ )
+
+ self.net.stop()
+
+ def get_exabgp_cmd(self):
+ if not self.exabgp_cmd:
+ self.exabgp_cmd = get_exabgp_cmd(self.net)
+ return self.exabgp_cmd
+
+ def cli(self):
+ """
+ Interrupt the test and call the command line interface for manual
+ inspection. Should be only used on non production code.
+ """
+ self.net.cli()
+
+ mininet_cli = cli
+
+ def is_memleak_enabled(self):
+ "Returns `True` if memory leak report is enable, otherwise `False`."
+ # On router failure we can't run the memory leak test
+ if self.routers_have_failure():
+ return False
+
+ memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get(
+ self.CONFIG_SECTION, "memleak_path"
+ )
+ if memleak_file == "" or memleak_file is None:
+ return False
+ return True
+
+ def report_memory_leaks(self, testname=None):
+ "Run memory leak test and reports."
+ if not self.is_memleak_enabled():
+ return
+
+ # If no name was specified, use the test module name
+ if testname is None:
+ testname = self.modname
+
+ router_list = self.routers().values()
+ for router in router_list:
+ router.report_memory_leaks(self.modname)
+
+ def set_error(self, message, code=None):
+ "Sets an error message and signal other tests to skip."
+ logger.info("setting error msg: %s", message)
+
+ # If no code is defined use a sequential number
+ if code is None:
+ code = len(self.errorsd)
+
+ self.errorsd[code] = message
+ self.errors += "\n{}: {}".format(code, message)
+
+ def has_errors(self):
+ "Returns whether errors exist or not."
+ return len(self.errorsd) > 0
+
+ def routers_have_failure(self):
+ "Runs an assertion to make sure that all routers are running."
+ if self.has_errors():
+ return True
+
+ errors = ""
+ router_list = self.routers().values()
+ for router in router_list:
+ result = router.check_router_running()
+ if result != "":
+ errors += result + "\n"
+
+ if errors != "":
+ self.set_error(errors, "router_error")
+ assert False, errors
+ return True
+ return False
+
+
+#
+# Topology gears (equipment)
+#
+
+
+class TopoGear(object):
+ "Abstract class for type checking"
+
+ def __init__(self, tgen, name, **params):
+ self.tgen = tgen
+ self.name = name
+ self.params = params
+ self.links = {}
+ self.linkn = 0
+
+ # Would be nice for this to point at the gears log directory rather than the
+ # test's.
+ self.logdir = tgen.logdir
+ self.gearlogdir = None
+
+ def __str__(self):
+ links = ""
+ for myif, dest in self.links.items():
+ _, destif = dest
+ if links != "":
+ links += ","
+ links += '"{}"<->"{}"'.format(myif, destif)
+
+ return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links)
+
+ @property
+ def net(self):
+ return self.tgen.net[self.name]
+
+ def start(self):
+ "Basic start function that just reports equipment start"
+ logger.info('starting "{}"'.format(self.name))
+
+ def stop(self, wait=True, assertOnError=True):
+ "Basic stop function that just reports equipment stop"
+ logger.info('"{}" base stop called'.format(self.name))
+ return ""
+
+ def cmd(self, command, **kwargs):
+ """
+ Runs the provided command string in the router and returns a string
+ with the response.
+ """
+ return self.net.cmd_legacy(command, **kwargs)
+
+ def cmd_raises(self, command, **kwargs):
+ """
+ Runs the provided command string in the router and returns a string
+ with the response. Raise an exception on any error.
+ """
+ return self.net.cmd_raises(command, **kwargs)
+
+ run = cmd
+
+ def popen(self, *params, **kwargs):
+ """
+ Creates a pipe with the given command. Same args as python Popen.
+ If `command` is a string then will be invoked with shell, otherwise
+ `command` is a list and will be invoked w/o shell. Returns a popen object.
+ """
+ return self.net.popen(*params, **kwargs)
+
+ def add_link(self, node, myif=None, nodeif=None):
+ """
+ Creates a link (connection) between myself and the specified node.
+ Interfaces name can be speficied with:
+ myif: the interface name that will be created in this node
+ nodeif: the target interface name that will be created on the remote node.
+ """
+ self.tgen.add_link(self, node, myif, nodeif)
+
+ def link_enable(self, myif, enabled=True, netns=None):
+ """
+ Set this node interface administrative state.
+ myif: this node interface name
+ enabled: whether we should enable or disable the interface
+ """
+ if myif not in self.links.keys():
+ raise KeyError("interface doesn't exists")
+
+ if enabled is True:
+ operation = "up"
+ else:
+ operation = "down"
+
+ logger.info(
+ 'setting node "{}" link "{}" to state "{}"'.format(
+ self.name, myif, operation
+ )
+ )
+ extract = ""
+ if netns is not None:
+ extract = "ip netns exec {} ".format(netns)
+
+ return self.run("{}ip link set dev {} {}".format(extract, myif, operation))
+
+ def peer_link_enable(self, myif, enabled=True, netns=None):
+ """
+ Set the peer interface administrative state.
+ myif: this node interface name
+ enabled: whether we should enable or disable the interface
+
+ NOTE: this is used to simulate a link down on this node, since when the
+ peer disables their interface our interface status changes to no link.
+ """
+ if myif not in self.links.keys():
+ raise KeyError("interface doesn't exists")
+
+ node, nodeif = self.links[myif]
+ node.link_enable(nodeif, enabled, netns)
+
+ def new_link(self):
+ """
+ Generates a new unique link name.
+
+ NOTE: This function should only be called by Topogen.
+ """
+ ifname = "{}-eth{}".format(self.name, self.linkn)
+ self.linkn += 1
+ return ifname
+
+ def register_link(self, myif, node, nodeif):
+ """
+ Register link between this node interface and outside node.
+
+ NOTE: This function should only be called by Topogen.
+ """
+ if myif in self.links.keys():
+ raise KeyError("interface already exists")
+
+ self.links[myif] = (node, nodeif)
+
+ def _setup_tmpdir(self):
+ topotest.setup_node_tmpdir(self.logdir, self.name)
+ self.gearlogdir = "{}/{}".format(self.logdir, self.name)
+ return "{}/{}.log".format(self.logdir, self.name)
+
+
+class TopoRouter(TopoGear):
+ """
+ Router abstraction.
+ """
+
+ # The default required directories by FRR
+ PRIVATE_DIRS = [
+ "/etc/frr",
+ "/etc/snmp",
+ "/var/run/frr",
+ "/var/log",
+ ]
+
+ # Router Daemon enumeration definition.
+ RD_FRR = 0 # not a daemon, but use to setup unified configs
+ RD_ZEBRA = 1
+ RD_RIP = 2
+ RD_RIPNG = 3
+ RD_OSPF = 4
+ RD_OSPF6 = 5
+ RD_ISIS = 6
+ RD_BGP = 7
+ RD_LDP = 8
+ RD_PIM = 9
+ RD_EIGRP = 10
+ RD_NHRP = 11
+ RD_STATIC = 12
+ RD_BFD = 13
+ RD_SHARP = 14
+ RD_BABEL = 15
+ RD_PBRD = 16
+ RD_PATH = 17
+ RD_SNMP = 18
+ RD_PIM6 = 19
+ RD_MGMTD = 20
+ RD = {
+ RD_FRR: "frr",
+ RD_ZEBRA: "zebra",
+ RD_RIP: "ripd",
+ RD_RIPNG: "ripngd",
+ RD_OSPF: "ospfd",
+ RD_OSPF6: "ospf6d",
+ RD_ISIS: "isisd",
+ RD_BGP: "bgpd",
+ RD_PIM: "pimd",
+ RD_PIM6: "pim6d",
+ RD_LDP: "ldpd",
+ RD_EIGRP: "eigrpd",
+ RD_NHRP: "nhrpd",
+ RD_STATIC: "staticd",
+ RD_BFD: "bfdd",
+ RD_SHARP: "sharpd",
+ RD_BABEL: "babeld",
+ RD_PBRD: "pbrd",
+ RD_PATH: "pathd",
+ RD_SNMP: "snmpd",
+ RD_MGMTD: "mgmtd",
+ }
+
+ def __init__(self, tgen, cls, name, **params):
+ """
+ The constructor has the following parameters:
+ * tgen: Topogen object
+ * cls: router class that will be used to instantiate
+ * name: router name
+ * daemondir: daemon binary directory
+ * routertype: 'frr'
+ """
+ super(TopoRouter, self).__init__(tgen, name, **params)
+ self.routertype = params.get("routertype", "frr")
+ if "private_mounts" not in params:
+ params["private_mounts"] = self.PRIVATE_DIRS
+
+ # Propagate the router log directory
+ logfile = self._setup_tmpdir()
+ params["logdir"] = self.logdir
+
+ self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = self.logger
+ tgen.net.add_host(self.name, cls=cls, **params)
+ topotest.fix_netns_limits(tgen.net[name])
+
+ # Mount gear log directory on a common path
+ self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
+
+ # Ensure pid file
+ with open(os.path.join(self.logdir, self.name + ".pid"), "w") as f:
+ f.write(str(self.net.pid) + "\n")
+
+ def __str__(self):
+ gear = super(TopoRouter, self).__str__()
+ gear += " TopoRouter<>"
+ return gear
+
+ def check_capability(self, daemon, param):
+ """
+ Checks a capability daemon against an argument option
+ Return True if capability available. False otherwise
+ """
+ daemonstr = self.RD.get(daemon)
+ self.logger.info('check capability {} for "{}"'.format(param, daemonstr))
+ return self.net.checkCapability(daemonstr, param)
+
+ def load_frr_config(self, source, daemons=None):
+ """
+ Loads the unified configuration file source
+ Start the daemons in the list
+ If daemons is None, try to infer daemons from the config file
+ """
+ source_path = self.load_config(self.RD_FRR, source)
+ if not daemons:
+ # Always add zebra
+ self.load_config(self.RD_ZEBRA, "")
+ for daemon in self.RD:
+ # This will not work for all daemons
+ daemonstr = self.RD.get(daemon).rstrip("d")
+ if daemonstr == "pim":
+ grep_cmd = "grep 'ip {}' {}".format(daemonstr, source_path)
+ else:
+ grep_cmd = "grep 'router {}' {}".format(daemonstr, source_path)
+ result = self.run(grep_cmd, warn=False).strip()
+ if result:
+ self.load_config(daemon, "")
+ else:
+ for daemon in daemons:
+ self.load_config(daemon, "")
+
+ def load_config(self, daemon, source=None, param=None):
+ """Loads daemon configuration from the specified source
+ Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP,
+ TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
+ TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
+ TopoRouter.RD_PIM, TopoRouter.RD_PIM6, TopoRouter.RD_PBR,
+ TopoRouter.RD_SNMP, TopoRouter.RD_MGMTD.
+
+ Possible `source` values are `None` for an empty config file, a path name which is
+ used directly, or a file name with no path components which is first looked for
+ directly and then looked for under a sub-directory named after router.
+
+ This API unfortunately allows for source to not exist for any and
+ all routers.
+ """
+ daemonstr = self.RD.get(daemon)
+ self.logger.debug('loading "{}" configuration: {}'.format(daemonstr, source))
+ return self.net.loadConf(daemonstr, source, param)
+
+ def check_router_running(self):
+ """
+ Run a series of checks and returns a status string.
+ """
+ self.logger.info("checking if daemons are running")
+ return self.net.checkRouterRunning()
+
+ def start(self):
+ """
+ Start router:
+ * Load modules
+ * Clean up files
+ * Configure interfaces
+ * Start daemons (e.g. FRR)
+ * Configure daemon logging files
+ """
+
+ nrouter = self.net
+ result = nrouter.startRouter(self.tgen)
+
+ # Enable command logging
+
+ # Enable all daemon command logging, logging files
+ # and set them to the start dir.
+ for daemon, enabled in nrouter.daemons.items():
+ if enabled and daemon != "snmpd":
+ self.vtysh_cmd(
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 6",
+ ]
+ ),
+ daemon=daemon,
+ )
+
+ if result != "":
+ self.tgen.set_error(result)
+ elif nrouter.daemons["ldpd"] == 1 or nrouter.daemons["pathd"] == 1:
+ # Enable MPLS processing on all interfaces.
+ for interface in self.links:
+ topotest.sysctl_assure(
+ nrouter, "net.mpls.conf.{}.input".format(interface), 1
+ )
+
+ return result
+
+ def stop(self):
+ """
+ Stop router cleanly:
+ * Signal daemons twice, once with SIGTERM, then with SIGKILL.
+ """
+ self.logger.debug("stopping (no assert)")
+ return self.net.stopRouter(False)
+
+ def startDaemons(self, daemons):
+ """
+ Start Daemons: to start specific daemon(user defined daemon only)
+ * Start daemons (e.g. FRR)
+ * Configure daemon logging files
+ """
+ self.logger.debug("starting")
+ nrouter = self.net
+ result = nrouter.startRouterDaemons(daemons)
+
+ if daemons is None:
+ daemons = nrouter.daemons.keys()
+
+ # Enable all daemon command logging, logging files
+ # and set them to the start dir.
+ for daemon in daemons:
+ enabled = nrouter.daemons[daemon]
+ if enabled and daemon != "snmpd":
+ self.vtysh_cmd(
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 6",
+ ]
+ ),
+ daemon=daemon,
+ )
+
+ if result != "":
+ self.tgen.set_error(result)
+
+ return result
+
+ def killDaemons(self, daemons, wait=True, assertOnError=True):
+ """
+ Kill specific daemon(user defined daemon only)
+ forcefully using SIGKILL
+ """
+ self.logger.debug("Killing daemons using SIGKILL..")
+ return self.net.killRouterDaemons(daemons, wait, assertOnError)
+
+ def vtysh_cmd(self, command, isjson=False, daemon=None):
+ """
+ Runs the provided command string in the vty shell and returns a string
+ with the response.
+
+ This function also accepts multiple commands, but this mode does not
+ return output for each command. See vtysh_multicmd() for more details.
+ """
+ # Detect multi line commands
+ if command.find("\n") != -1:
+ return self.vtysh_multicmd(command, daemon=daemon)
+
+ dparam = ""
+ if daemon is not None:
+ dparam += "-d {}".format(daemon)
+
+ vtysh_command = "vtysh {} -c {} 2>/dev/null".format(
+ dparam, shlex.quote(command)
+ )
+
+ self.logger.debug("vtysh command => {}".format(shlex.quote(command)))
+ output = self.run(vtysh_command)
+
+ dbgout = output.strip()
+ if dbgout:
+ if "\n" in dbgout:
+ dbgout = dbgout.replace("\n", "\n\t")
+ self.logger.debug("vtysh result:\n\t{}".format(dbgout))
+ else:
+ self.logger.debug('vtysh result: "{}"'.format(dbgout))
+
+ if isjson is False:
+ return output
+
+ try:
+ return json.loads(output)
+ except ValueError as error:
+ logger.warning(
+ "vtysh_cmd: %s: failed to convert json output: %s: %s",
+ self.name,
+ str(output),
+ str(error),
+ )
+ return {}
+
+ def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
+ """
+ Runs the provided commands in the vty shell and return the result of
+ execution.
+
+ pretty_output: defines how the return value will be presented. When
+ True it will show the command as they were executed in the vty shell,
+ otherwise it will only show lines that failed.
+ """
+ # Prepare the temporary file that will hold the commands
+ fname = topotest.get_file(commands)
+
+ dparam = ""
+ if daemon is not None:
+ dparam += "-d {}".format(daemon)
+
+ # Run the commands and delete the temporary file
+ if pretty_output:
+ vtysh_command = "vtysh {} < {}".format(dparam, fname)
+ else:
+ vtysh_command = "vtysh {} -f {}".format(dparam, fname)
+
+ dbgcmds = commands if is_string(commands) else "\n".join(commands)
+ dbgcmds = "\t" + dbgcmds.replace("\n", "\n\t")
+ self.logger.debug("vtysh command => FILE:\n{}".format(dbgcmds))
+
+ res = self.run(vtysh_command)
+ os.unlink(fname)
+
+ dbgres = res.strip()
+ if dbgres:
+ if "\n" in dbgres:
+ dbgres = dbgres.replace("\n", "\n\t")
+ self.logger.debug("vtysh result:\n\t{}".format(dbgres))
+ else:
+ self.logger.debug('vtysh result: "{}"'.format(dbgres))
+ return res
+
+ def report_memory_leaks(self, testname):
+ """
+ Runs the router memory leak check test. Has the following parameter:
+ testname: the test file name for identification
+
+ NOTE: to run this you must have the environment variable
+ TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`.
+ """
+ memleak_file = (
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.params["memleak_path"]
+ )
+ if memleak_file == "" or memleak_file is None:
+ return
+
+ self.stop()
+
+ self.logger.info("running memory leak report")
+ self.net.report_memory_leaks(memleak_file, testname)
+
+ def version_info(self):
+ "Get equipment information from 'show version'."
+ output = self.vtysh_cmd("show version").split("\n")[0]
+ columns = topotest.normalize_text(output).split(" ")
+ try:
+ return {
+ "type": columns[0],
+ "version": columns[1],
+ }
+ except IndexError:
+ return {
+ "type": None,
+ "version": None,
+ }
+
+ def has_version(self, cmpop, version):
+ """
+ Compares router version using operation `cmpop` with `version`.
+ Valid `cmpop` values:
+ * `>=`: has the same version or greater
+ * '>': has greater version
+ * '=': has the same version
+ * '<': has a lesser version
+ * '<=': has the same version or lesser
+
+ Usage example: router.has_version('>', '1.0')
+ """
+ return self.net.checkRouterVersion(cmpop, version)
+
+ def has_type(self, rtype):
+ """
+ Compares router type with `rtype`. Returns `True` if the type matches,
+ otherwise `false`.
+ """
+ curtype = self.version_info()["type"]
+ return rtype == curtype
+
+ def has_mpls(self):
+ return self.net.hasmpls
+
+
+class TopoSwitch(TopoGear):
+ """
+ Switch abstraction. Has the following properties:
+ * cls: switch class that will be used to instantiate
+ * name: switch name
+ """
+
+ # pylint: disable=too-few-public-methods
+
+ def __init__(self, tgen, name, **params):
+ logger = topolog.get_logger(name, log_level="debug")
+ super(TopoSwitch, self).__init__(tgen, name, **params)
+ tgen.net.add_switch(name, logger=logger)
+
+ def __str__(self):
+ gear = super(TopoSwitch, self).__str__()
+ gear += " TopoSwitch<>"
+ return gear
+
+
+class TopoHost(TopoGear):
+ "Host abstraction."
+ # pylint: disable=too-few-public-methods
+
+ def __init__(self, tgen, name, **params):
+ """
+ Mininet has the following known `params` for hosts:
+ * `ip`: the IP address (string) for the host interface
+ * `defaultRoute`: the default route that will be installed
+ (e.g. 'via 10.0.0.1')
+ * `private_mounts`: directories that will be mounted on a different domain
+ (e.g. '/etc/important_dir').
+ """
+ super(TopoHost, self).__init__(tgen, name, **params)
+
+ # Propagate the router log directory
+ logfile = self._setup_tmpdir()
+ params["logdir"] = self.logdir
+
+ # Odd to have 2 logfiles for each host
+ self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = self.logger
+ tgen.net.add_host(name, **params)
+ topotest.fix_netns_limits(tgen.net[name])
+
+ # Mount gear log directory on a common path
+ self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
+
+ def __str__(self):
+ gear = super(TopoHost, self).__str__()
+ gear += ' TopoHost<ip="{}",defaultRoute="{}",private_mounts="{}">'.format(
+ self.params["ip"],
+ self.params["defaultRoute"],
+ str(self.params["private_mounts"]),
+ )
+ return gear
+
+
+class TopoExaBGP(TopoHost):
+ "ExaBGP peer abstraction."
+ # pylint: disable=too-few-public-methods
+
+ PRIVATE_DIRS = [
+ "/etc/exabgp",
+ "/var/run/exabgp",
+ "/var/log",
+ ]
+
+ def __init__(self, tgen, name, **params):
+ """
+ ExaBGP usually uses the following parameters:
+ * `ip`: the IP address (string) for the host interface
+ * `defaultRoute`: the default route that will be installed
+ (e.g. 'via 10.0.0.1')
+
+ Note: the different between a host and a ExaBGP peer is that this class
+ has a private_mounts already defined and contains functions to handle
+ ExaBGP things.
+ """
+ params["private_mounts"] = self.PRIVATE_DIRS
+ super(TopoExaBGP, self).__init__(tgen, name, **params)
+
+ def __str__(self):
+ gear = super(TopoExaBGP, self).__str__()
+ gear += " TopoExaBGP<>".format()
+ return gear
+
+ def start(self, peer_dir, env_file=None):
+ """
+ Start running ExaBGP daemon:
+ * Copy all peer* folder contents into /etc/exabgp
+ * Copy exabgp env file if specified
+ * Make all python files runnable
+ * Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg
+ """
+ exacmd = self.tgen.get_exabgp_cmd()
+ assert exacmd, "Can't find a usabel ExaBGP (must be < version 4)"
+
+ self.run("mkdir -p /etc/exabgp")
+ self.run("chmod 755 /etc/exabgp")
+ self.run("cp {}/exa-* /etc/exabgp/".format(CWD))
+ self.run("cp {}/* /etc/exabgp/".format(peer_dir))
+ if env_file is not None:
+ self.run("cp {} /etc/exabgp/exabgp.env".format(env_file))
+ self.run("chmod 644 /etc/exabgp/*")
+ self.run("chmod a+x /etc/exabgp/*.py")
+ self.run("chown -R exabgp:exabgp /etc/exabgp")
+
+ output = self.run(exacmd + " -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
+ if output is None or len(output) == 0:
+ output = "<none>"
+
+ logger.info("{} exabgp started, output={}".format(self.name, output))
+
+ def stop(self, wait=True, assertOnError=True):
+ "Stop ExaBGP peer and kill the daemon"
+ self.run("kill `cat /var/run/exabgp/exabgp.pid`")
+ return ""
+
+
+class TopoBMPCollector(TopoHost):
+ PRIVATE_DIRS = [
+ "/var/log",
+ ]
+
+ def __init__(self, tgen, name, **params):
+ params["private_mounts"] = self.PRIVATE_DIRS
+ self.port = params["port"]
+ self.ip = params["ip"]
+ super(TopoBMPCollector, self).__init__(tgen, name, **params)
+
+ def __str__(self):
+ gear = super(TopoBMPCollector, self).__str__()
+ gear += " TopoBMPCollector<>".format()
+ return gear
+
+ def start(self):
+ self.run(
+ "{}/bmp_collector/bmpserver -a {} -p {}&".format(CWD, self.ip, self.port),
+ stdout=None,
+ )
+
+ def stop(self):
+ self.run("pkill -9 -f bmpserver")
+ return ""
+
+
+#
+# Diagnostic function
+#
+
+
+# Disable linter branch warning. It is expected to have these here.
+# pylint: disable=R0912
+def diagnose_env_linux(rundir):
+ """
+ Run diagnostics in the running environment. Returns `True` when everything
+ is ok, otherwise `False`.
+ """
+ ret = True
+
+ # Load configuration
+ config = configparser.ConfigParser(defaults=tgen_defaults)
+ pytestini_path = os.path.join(CWD, "../pytest.ini")
+ config.read(pytestini_path)
+
+ # Test log path exists before installing handler.
+ os.system("mkdir -p " + rundir)
+ # Log diagnostics to file so it can be examined later.
+ fhandler = logging.FileHandler(filename="{}/diagnostics.txt".format(rundir))
+ fhandler.setLevel(logging.DEBUG)
+ fhandler.setFormatter(logging.Formatter(fmt=topolog.FORMAT))
+ logger.addHandler(fhandler)
+
+ logger.info("Running environment diagnostics")
+
+ # Assert that we are running as root
+ if os.getuid() != 0:
+ logger.error("you must run topotest as root")
+ ret = False
+
+ # Assert that we have mininet
+ # if os.system("which mn >/dev/null 2>/dev/null") != 0:
+ # logger.error("could not find mininet binary (mininet is not installed)")
+ # ret = False
+
+ # Assert that we have iproute installed
+ if os.system("which ip >/dev/null 2>/dev/null") != 0:
+ logger.error("could not find ip binary (iproute is not installed)")
+ ret = False
+
+ # Assert that we have gdb installed
+ if os.system("which gdb >/dev/null 2>/dev/null") != 0:
+ logger.error("could not find gdb binary (gdb is not installed)")
+ ret = False
+
+ # Assert that FRR utilities exist
+ frrdir = config.get("topogen", "frrdir")
+ if not os.path.isdir(frrdir):
+ logger.error("could not find {} directory".format(frrdir))
+ ret = False
+ else:
+ try:
+ pwd.getpwnam("frr")[2]
+ except KeyError:
+ logger.warning('could not find "frr" user')
+
+ try:
+ grp.getgrnam("frr")[2]
+ except KeyError:
+ logger.warning('could not find "frr" group')
+
+ try:
+ if "frr" not in grp.getgrnam("frrvty").gr_mem:
+ logger.error(
+ '"frr" user and group exist, but user is not under "frrvty"'
+ )
+ except KeyError:
+ logger.warning('could not find "frrvty" group')
+
+ for fname in [
+ "zebra",
+ "ospfd",
+ "ospf6d",
+ "bgpd",
+ "ripd",
+ "ripngd",
+ "isisd",
+ "pimd",
+ "pim6d",
+ "ldpd",
+ "pbrd",
+ "mgmtd",
+ ]:
+ path = os.path.join(frrdir, fname)
+ if not os.path.isfile(path):
+ # LDPd is an exception
+ if fname == "ldpd":
+ logger.info(
+ "could not find {} in {}".format(fname, frrdir)
+ + "(LDPd tests will not run)"
+ )
+ continue
+
+ logger.error("could not find {} in {}".format(fname, frrdir))
+ ret = False
+ else:
+ if fname != "zebra" or fname != "mgmtd":
+ continue
+
+ os.system("{} -v 2>&1 >{}/frr_mgmtd.txt".format(path, rundir))
+ os.system("{} -v 2>&1 >{}/frr_zebra.txt".format(path, rundir))
+
+ # Test MPLS availability
+ krel = platform.release()
+ if topotest.version_cmp(krel, "4.5") < 0:
+ logger.info(
+ 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format(
+ krel
+ )
+ )
+
+ # Test for MPLS Kernel modules available
+ if not topotest.module_present("mpls-router", load=False) != 0:
+ logger.info("LDPd tests will not run (missing mpls-router kernel module)")
+ if not topotest.module_present("mpls-iptunnel", load=False) != 0:
+ logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)")
+
+ if not get_exabgp_cmd():
+ logger.warning("Failed to find exabgp < 4")
+
+ logger.removeHandler(fhandler)
+ fhandler.close()
+
+ return ret
+
+
+def diagnose_env_freebsd():
+ return True
+
+
+def diagnose_env(rundir):
+ if sys.platform.startswith("linux"):
+ return diagnose_env_linux(rundir)
+ elif sys.platform.startswith("freebsd"):
+ return diagnose_env_freebsd()
+
+ return False
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
new file mode 100644
index 0000000..901e4f6
--- /dev/null
+++ b/tests/topotests/lib/topojson.py
@@ -0,0 +1,405 @@
+# SPDX-License-Identifier: ISC
+#
+# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Original work Copyright (c) 2018 by Network Device Education
+# Foundation, Inc. ("NetDEF")
+#
+
+import json
+import ipaddress
+import os
+from collections import OrderedDict
+from copy import deepcopy
+from re import search as re_search
+
+import pytest
+
+from lib.bgp import create_router_bgp
+from lib.common_config import (
+ create_bgp_community_lists,
+ create_interfaces_cfg,
+ create_prefix_lists,
+ create_route_maps,
+ create_static_routes,
+ create_vrf_cfg,
+ load_config_to_routers,
+ start_topology,
+ topo_daemons,
+ number_to_column,
+)
+from lib.ospf import create_router_ospf
+from lib.pim import (
+ create_igmp_config,
+ create_pim_config,
+ create_mld_config,
+)
+from lib.topolog import logger
+
+
+def build_topo_from_json(tgen, topo=None):
+ """
+ Reads configuration from JSON file. Adds routers, creates interface
+ names dynamically and link routers as defined in JSON to create
+ topology. Assigns IPs dynamically to all interfaces of each router.
+ * `tgen`: Topogen object
+ * `topo`: json file data, or use tgen.json_topo if None
+ """
+ if topo is None:
+ topo = tgen.json_topo
+
+ router_list = sorted(
+ topo["routers"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
+ )
+
+ switch_list = []
+ if "switches" in topo:
+ switch_list = sorted(
+ topo["switches"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
+ )
+
+ listRouters = sorted(router_list[:])
+ listSwitches = sorted(switch_list[:])
+ listAllRouters = deepcopy(listRouters)
+ dictSwitches = {}
+
+ for routerN in router_list:
+ logger.info("Topo: Add router {}".format(routerN))
+ tgen.add_router(routerN)
+
+ for switchN in switch_list:
+ logger.info("Topo: Add switch {}".format(switchN))
+ dictSwitches[switchN] = tgen.add_switch(switchN)
+
+ if "ipv4base" in topo:
+ ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"])
+ ipv4Step = 2 ** (32 - topo["link_ip_start"]["v4mask"])
+ if topo["link_ip_start"]["v4mask"] < 32:
+ ipv4Next += 1
+ if "ipv6base" in topo:
+ ipv6Next = ipaddress.IPv6Address(topo["link_ip_start"]["ipv6"])
+ ipv6Step = 2 ** (128 - topo["link_ip_start"]["v6mask"])
+ if topo["link_ip_start"]["v6mask"] < 127:
+ ipv6Next += 1
+ for router in listRouters:
+ topo["routers"][router]["nextIfname"] = 0
+
+ router_count = 0
+ while listRouters != []:
+ curRouter = listRouters.pop(0)
+ # Physical Interfaces
+ if "links" in topo["routers"][curRouter]:
+ for destRouterLink, data in sorted(
+ topo["routers"][curRouter]["links"].items()
+ ):
+ currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink]
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ router_count += 1
+ if (
+ "ipv4" in currRouter_lo_json
+ and currRouter_lo_json["ipv4"] == "auto"
+ ):
+ currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format(
+ topo["lo_prefix"]["ipv4"],
+ router_count,
+ number_to_column(curRouter),
+ topo["lo_prefix"]["v4mask"],
+ )
+ if (
+ "ipv6" in currRouter_lo_json
+ and currRouter_lo_json["ipv6"] == "auto"
+ ):
+ currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format(
+ topo["lo_prefix"]["ipv6"],
+ router_count,
+ number_to_column(curRouter),
+ topo["lo_prefix"]["v6mask"],
+ )
+
+ if "-" in destRouterLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destRouterLink.split("-")
+
+ # destRouter
+ destRouter = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, curRouter)
+ curRouterLink = "-".join(tempList)
+ else:
+ destRouter = destRouterLink
+ curRouterLink = curRouter
+
+ if destRouter in listRouters:
+ currRouter_link_json = topo["routers"][curRouter]["links"][
+ destRouterLink
+ ]
+ destRouter_link_json = topo["routers"][destRouter]["links"][
+ curRouterLink
+ ]
+
+ # Assigning name to interfaces
+ currRouter_link_json["interface"] = "{}-{}-eth{}".format(
+ curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]
+ )
+ destRouter_link_json["interface"] = "{}-{}-eth{}".format(
+ destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]
+ )
+
+ # add link interface
+ destRouter_link_json["peer-interface"] = "{}-{}-eth{}".format(
+ curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]
+ )
+ currRouter_link_json["peer-interface"] = "{}-{}-eth{}".format(
+ destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]
+ )
+
+ topo["routers"][curRouter]["nextIfname"] += 1
+ topo["routers"][destRouter]["nextIfname"] += 1
+
+ # Linking routers to each other as defined in JSON file
+ tgen.gears[curRouter].add_link(
+ tgen.gears[destRouter],
+ topo["routers"][curRouter]["links"][destRouterLink][
+ "interface"
+ ],
+ topo["routers"][destRouter]["links"][curRouterLink][
+ "interface"
+ ],
+ )
+
+ # IPv4
+ if "ipv4" in currRouter_link_json:
+ if currRouter_link_json["ipv4"] == "auto":
+ currRouter_link_json["ipv4"] = "{}/{}".format(
+ ipv4Next, topo["link_ip_start"]["v4mask"]
+ )
+ destRouter_link_json["ipv4"] = "{}/{}".format(
+ ipv4Next + 1, topo["link_ip_start"]["v4mask"]
+ )
+ ipv4Next += ipv4Step
+ # IPv6
+ if "ipv6" in currRouter_link_json:
+ if currRouter_link_json["ipv6"] == "auto":
+ currRouter_link_json["ipv6"] = "{}/{}".format(
+ ipv6Next, topo["link_ip_start"]["v6mask"]
+ )
+ destRouter_link_json["ipv6"] = "{}/{}".format(
+ ipv6Next + 1, topo["link_ip_start"]["v6mask"]
+ )
+ ipv6Next = ipaddress.IPv6Address(int(ipv6Next) + ipv6Step)
+
+ logger.debug(
+ "Generated link data for router: %s\n%s",
+ curRouter,
+ json.dumps(
+ topo["routers"][curRouter]["links"], indent=4, sort_keys=True
+ ),
+ )
+
+ switch_count = 0
+ add_switch_to_topo = []
+ while listSwitches != []:
+ curSwitch = listSwitches.pop(0)
+ # Physical Interfaces
+ if "links" in topo["switches"][curSwitch]:
+ for destRouterLink, data in sorted(
+ topo["switches"][curSwitch]["links"].items()
+ ):
+ # Loopback interfaces
+ if "dst_node" in data:
+ destRouter = data["dst_node"]
+
+ elif "-" in destRouterLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destRouterLink.split("-")
+ # destRouter
+ destRouter = tempList.pop(0)
+ else:
+ destRouter = destRouterLink
+
+ if destRouter in listAllRouters:
+ topo["routers"][destRouter]["links"][curSwitch] = deepcopy(
+ topo["switches"][curSwitch]["links"][destRouterLink]
+ )
+
+ # Assigning name to interfaces
+ topo["routers"][destRouter]["links"][curSwitch][
+ "interface"
+ ] = "{}-{}-eth{}".format(
+ destRouter, curSwitch, topo["routers"][destRouter]["nextIfname"]
+ )
+
+ topo["switches"][curSwitch]["links"][destRouter][
+ "interface"
+ ] = "{}-{}-eth{}".format(
+ curSwitch, destRouter, topo["routers"][destRouter]["nextIfname"]
+ )
+
+ topo["routers"][destRouter]["nextIfname"] += 1
+
+ # Add links
+ dictSwitches[curSwitch].add_link(
+ tgen.gears[destRouter],
+ topo["switches"][curSwitch]["links"][destRouter]["interface"],
+ topo["routers"][destRouter]["links"][curSwitch]["interface"],
+ )
+
+ # IPv4
+ if "ipv4" in topo["routers"][destRouter]["links"][curSwitch]:
+ if (
+ topo["routers"][destRouter]["links"][curSwitch]["ipv4"]
+ == "auto"
+ ):
+ topo["routers"][destRouter]["links"][curSwitch][
+ "ipv4"
+ ] = "{}/{}".format(
+ ipv4Next, topo["link_ip_start"]["v4mask"]
+ )
+ ipv4Next += 1
+ # IPv6
+ if "ipv6" in topo["routers"][destRouter]["links"][curSwitch]:
+ if (
+ topo["routers"][destRouter]["links"][curSwitch]["ipv6"]
+ == "auto"
+ ):
+ topo["routers"][destRouter]["links"][curSwitch][
+ "ipv6"
+ ] = "{}/{}".format(
+ ipv6Next, topo["link_ip_start"]["v6mask"]
+ )
+ ipv6Next = ipaddress.IPv6Address(int(ipv6Next) + ipv6Step)
+
+ logger.debug(
+ "Generated link data for router: %s\n%s",
+ curRouter,
+ json.dumps(
+ topo["routers"][curRouter]["links"], indent=4, sort_keys=True
+ ),
+ )
+
+
+def linux_intf_config_from_json(tgen, topo=None):
+ """Configure interfaces from linux based on topo."""
+ if topo is None:
+ topo = tgen.json_topo
+
+ routers = topo["routers"]
+ for rname in routers:
+ router = tgen.net[rname]
+ links = routers[rname]["links"]
+ for rrname in links:
+ link = links[rrname]
+ if rrname == "lo":
+ lname = "lo"
+ else:
+ lname = link["interface"]
+ if "ipv4" in link:
+ router.cmd_raises("ip addr add {} dev {}".format(link["ipv4"], lname))
+ if "ipv6" in link:
+ router.cmd_raises(
+ "ip -6 addr add {} dev {}".format(link["ipv6"], lname)
+ )
+
+
+def build_config_from_json(tgen, topo=None, save_bkup=True):
+ """
+ Reads initial configuraiton from JSON for each router, builds
+ configuration and loads its to router.
+
+ * `tgen`: Topogen object
+ * `topo`: json file data, or use tgen.json_topo if None
+ """
+
+ func_dict = OrderedDict(
+ [
+ ("vrfs", create_vrf_cfg),
+ ("ospf", create_router_ospf),
+ ("links", create_interfaces_cfg),
+ ("static_routes", create_static_routes),
+ ("prefix_lists", create_prefix_lists),
+ ("bgp_community_list", create_bgp_community_lists),
+ ("route_maps", create_route_maps),
+ ("pim", create_pim_config),
+ ("igmp", create_igmp_config),
+ ("mld", create_mld_config),
+ ("bgp", create_router_bgp),
+ ]
+ )
+
+ if topo is None:
+ topo = tgen.json_topo
+
+ data = topo["routers"]
+ for func_type in func_dict.keys():
+ logger.info("Checking for {} configuration in input data".format(func_type))
+
+ func_dict.get(func_type)(tgen, data, build=True)
+
+ routers = sorted(topo["routers"].keys())
+ result = load_config_to_routers(tgen, routers, save_bkup)
+ if not result:
+ logger.info("build_config_from_json: failed to configure topology")
+ assert False
+
+ logger.info(
+ "Built config now clearing ospf neighbors as that router-id might not be what is used"
+ )
+ for ospf in ["ospf", "ospf6"]:
+ for router in data:
+ if ospf not in data[router]:
+ continue
+
+ r = tgen.gears[router]
+ if ospf == "ospf":
+ r.vtysh_cmd("clear ip ospf process")
+ else:
+ r.vtysh_cmd("clear ipv6 ospf6 process")
+
+
+def create_tgen_from_json(testfile, json_file=None):
+ """Create a topogen object given a testfile.
+
+ - `testfile` : The path to the testfile.
+ - `json_file` : The path to the json config file. If None the pathname is derived
+ from the `testfile` first by trying to replace `.py` by `.json` and if that isn't
+ present then by removing `test_` prefix as well.
+ """
+ from lib.topogen import Topogen # Topogen imports this module too
+
+ thisdir = os.path.dirname(os.path.realpath(testfile))
+ basename = os.path.basename(testfile)
+ logger.debug("starting standard JSON based module setup for %s", basename)
+
+ assert basename.startswith("test_")
+ assert basename.endswith(".py")
+ json_file = os.path.join(thisdir, basename[:-3] + ".json")
+ if not os.path.exists(json_file):
+ json_file = os.path.join(thisdir, basename[5:-3] + ".json")
+ assert os.path.exists(json_file)
+ with open(json_file, "r") as topof:
+ topo = json.load(topof)
+
+ # Create topology
+ tgen = Topogen(lambda tgen: build_topo_from_json(tgen, topo), basename[:-3])
+ tgen.json_topo = topo
+ return tgen
+
+
+def setup_module_from_json(testfile, json_file=None):
+ """Do the standard module setup for JSON based test.
+
+ * `testfile` : The path to the testfile. The name is used to derive the json config
+ file name as well (removing `test_` prefix and replacing `.py` suffix with `.json`
+ """
+ # Create topology object
+ tgen = create_tgen_from_json(testfile, json_file)
+
+ # Start routers (and their daemons)
+ start_topology(tgen)
+
+ # Configure routers
+ build_config_from_json(tgen)
+ assert not tgen.routers_have_failure()
+
+ return tgen
diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py
new file mode 100644
index 0000000..aceb2cb
--- /dev/null
+++ b/tests/topotests/lib/topolog.py
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: ISC
+#
+# topolog.py
+# Library of helper functions for NetDEF Topology Tests
+#
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+Logging utilities for topology tests.
+
+This file defines our logging abstraction.
+"""
+
+import logging
+import os
+
+try:
+ from xdist import is_xdist_controller
+except ImportError:
+
+ def is_xdist_controller():
+ return False
+
+
+# Helper dictionary to convert Topogen logging levels to Python's logging.
+DEBUG_TOPO2LOGGING = {
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "output": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
+}
+FORMAT = "%(asctime)s %(levelname)s: %(name)s: %(message)s"
+
+handlers = {}
+logger = logging.getLogger("topo")
+
+
+# Remove this and use munet version when we move to pytest_asyncio
+def get_test_logdir(nodeid=None, module=False):
+ """Get log directory relative pathname."""
+ xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
+
+ # nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
+ # may be missing "::testname" if module is True
+ if not nodeid:
+ nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
+
+ cur_test = nodeid.replace("[", "_").replace("]", "_")
+ if module:
+ idx = cur_test.rfind("::")
+ path = cur_test if idx == -1 else cur_test[:idx]
+ testname = ""
+ else:
+ path, testname = cur_test.split("::")
+ testname = testname.replace("/", ".")
+ path = path[:-3].replace("/", ".")
+
+ # We use different logdir paths based on how xdist is running.
+ if mode == "each":
+ if module:
+ return os.path.join(path, "worker-logs", xdist_worker)
+ return os.path.join(path, testname, xdist_worker)
+ assert mode in ("no", "load", "loadfile", "loadscope"), f"Unknown dist mode {mode}"
+ return path if module else os.path.join(path, testname)
+
+
+def set_handler(lg, target=None):
+ if target is None:
+ h = logging.NullHandler()
+ else:
+ if isinstance(target, str):
+ h = logging.FileHandler(filename=target, mode="w")
+ else:
+ h = logging.StreamHandler(stream=target)
+ h.setFormatter(logging.Formatter(fmt=FORMAT))
+ # Don't filter anything at the handler level
+ h.setLevel(logging.DEBUG)
+ lg.addHandler(h)
+ return h
+
+
+def set_log_level(lg, level):
+ "Set the logging level."
+ # Messages sent to this logger only are created if this level or above.
+ log_level = DEBUG_TOPO2LOGGING.get(level, level)
+ lg.setLevel(log_level)
+
+
+def reset_logger(lg):
+ while lg.handlers:
+ x = lg.handlers.pop()
+ x.close()
+ lg.removeHandler(x)
+
+
+def get_logger(name, log_level=None, target=None, reset=True):
+ lg = logging.getLogger(name)
+
+ if reset:
+ reset_logger(lg)
+
+ if log_level is not None:
+ set_log_level(lg, log_level)
+
+ if target is not None:
+ set_handler(lg, target)
+
+ return lg
+
+
+def logstart(nodeid, logpath):
+ """Called from pytest before module setup."""
+ worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+ wstr = f" on worker {worker}" if worker else ""
+ handler_id = nodeid + worker
+ logpath = logpath.absolute()
+
+ logging.debug("logstart: adding logging for %s%s at %s", nodeid, wstr, logpath)
+ root_logger = logging.getLogger()
+ handler = logging.FileHandler(logpath, mode="w")
+ handler.setFormatter(logging.Formatter(FORMAT))
+
+ root_logger.addHandler(handler)
+ handlers[handler_id] = handler
+
+ logging.debug("logstart: added logging for %s%s at %s", nodeid, wstr, logpath)
+ return handler
+
+
+def logfinish(nodeid, logpath):
+ """Called from pytest after module teardown."""
+ worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+ wstr = f" on worker {worker}" if worker else ""
+
+ root_logger = logging.getLogger()
+
+ handler_id = nodeid + worker
+
+ if handler_id not in handlers:
+ logging.critical("can't find log handler to remove")
+ else:
+ logging.debug(
+ "logfinish: removing logging for %s%s at %s", nodeid, wstr, logpath
+ )
+ h = handlers[handler_id]
+ root_logger.removeHandler(h)
+ h.flush()
+ h.close()
+ del handlers[handler_id]
+ logging.debug(
+ "logfinish: removed logging for %s%s at %s", nodeid, wstr, logpath
+ )
+
+
+console_handler = set_handler(logger, None)
+set_log_level(logger, "debug")
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
new file mode 100644
index 0000000..c220bcf
--- /dev/null
+++ b/tests/topotests/lib/topotest.py
@@ -0,0 +1,2397 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# topotest.py
+# Library of helper functions for NetDEF Topology Tests
+#
+# Copyright (c) 2016 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+import configparser
+import difflib
+import errno
+import functools
+import glob
+import json
+import os
+import platform
+import re
+import resource
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import logging
+from collections.abc import Mapping
+from copy import deepcopy
+
+import lib.topolog as topolog
+from lib.micronet_compat import Node
+from lib.topolog import logger
+from munet.base import Timeout
+
+from lib import micronet
+
+g_pytest_config = None
+
+
+def get_logs_path(rundir):
+ logspath = topolog.get_test_logdir(module=True)
+ return os.path.join(rundir, logspath)
+
+
+def gdb_core(obj, daemon, corefiles):
+ gdbcmds = r"""
+set print elements 1024
+echo -------\n
+echo threads\n
+echo -------\n
+info threads
+echo ---------\n
+echo registers\n
+echo ---------\n
+info registers
+echo ---------\n
+echo backtrace\n
+echo ---------\n
+bt
+ """
+ gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
+ gdbcmds = [item for sl in gdbcmds for item in sl]
+
+ daemon_path = os.path.join(obj.daemondir, daemon)
+ p = subprocess.run(
+ ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds,
+ encoding="utf-8",
+ errors="ignore",
+ capture_output=True,
+ )
+ backtrace = p.stdout
+
+ #
+ # Grab the disassemble of top couple frames
+ #
+ m = re.search(r"#(\d+) .*assert.*", backtrace)
+ if not m:
+ m = re.search(r"#(\d+) .*abort.*", backtrace)
+ frames = re.findall(r"\n#(\d+) ", backtrace)
+ if m:
+ frstart = -1
+ astart = int(m.group(1)) + 1
+ ocount = f"-{int(frames[-1]) - astart + 1}"
+ else:
+ astart = -1
+ frstart = 0
+ ocount = ""
+ m = re.search(r"#(\d+) .*core_handler.*", backtrace)
+ if m:
+ frstart = int(m.group(1)) + 2
+ ocount = f"-{int(frames[-1]) - frstart + 1}"
+
+ sys.stderr.write(
+ f"\nCORE FOUND: {obj.name}: {daemon} crashed: see log for backtrace and more\n"
+ )
+
+ gdbcmds = rf"""
+set print elements 1024
+echo -------------------------\n
+echo backtrace with local args\n
+echo -------------------------\n
+bt full {ocount}
+"""
+ if frstart >= 0:
+ gdbcmds += rf"""echo ---------------------------------------\n
+echo disassemble of failing funciton (guess)\n
+echo ---------------------------------------\n
+fr {frstart}
+disassemble /m
+"""
+
+ gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
+ gdbcmds = [item for sl in gdbcmds for item in sl]
+
+ daemon_path = os.path.join(obj.daemondir, daemon)
+ p = subprocess.run(
+ ["gdb", daemon_path, corefiles[0], "-q", "--batch"] + gdbcmds,
+ encoding="utf-8",
+ errors="ignore",
+ capture_output=True,
+ )
+ btdump = p.stdout
+
+ # sys.stderr.write(
+ # "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
+ # )
+
+ return backtrace + btdump
+
+
+class json_cmp_result(object):
+ "json_cmp result class for better assertion messages"
+
+ def __init__(self):
+ self.errors = []
+
+ def add_error(self, error):
+ "Append error message to the result"
+ for line in error.splitlines():
+ self.errors.append(line)
+
+ def has_errors(self):
+ "Returns True if there were errors, otherwise False."
+ return len(self.errors) > 0
+
+ def gen_report(self):
+ headline = ["Generated JSON diff error report:", ""]
+ return headline + self.errors
+
+ def __str__(self):
+ return (
+ "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
+ )
+
+
+def gen_json_diff_report(output, expected, exact=False, path="> $", acc=(0, "")):
+ """
+ Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
+ """
+
+ def dump_json(v):
+ if isinstance(v, (dict, list)):
+ return "\t" + "\t".join(
+ json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
+ )
+ else:
+ return "'{}'".format(v)
+
+ def json_type(v):
+ if isinstance(v, (list, tuple)):
+ return "Array"
+ elif isinstance(v, dict):
+ return "Object"
+ elif isinstance(v, (int, float)):
+ return "Number"
+ elif isinstance(v, bool):
+ return "Boolean"
+ elif isinstance(v, str):
+ return "String"
+ elif v == None:
+ return "null"
+
+ def get_errors(other_acc):
+ return other_acc[1]
+
+ def get_errors_n(other_acc):
+ return other_acc[0]
+
+ def add_error(acc, msg, points=1):
+ return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
+
+ def merge_errors(acc, other_acc):
+ return (acc[0] + other_acc[0], acc[1] + other_acc[1])
+
+ def add_idx(idx):
+ return "{}[{}]".format(path, idx)
+
+ def add_key(key):
+ return "{}->{}".format(path, key)
+
+ def has_errors(other_acc):
+ return other_acc[0] > 0
+
+ if expected == "*" or (
+ not isinstance(output, (list, dict))
+ and not isinstance(expected, (list, dict))
+ and output == expected
+ ):
+ return acc
+ elif (
+ not isinstance(output, (list, dict))
+ and not isinstance(expected, (list, dict))
+ and output != expected
+ ):
+ acc = add_error(
+ acc,
+ "output has element with value '{}' but in expected it has value '{}'".format(
+ output, expected
+ ),
+ )
+ elif (
+ isinstance(output, list)
+ and isinstance(expected, list)
+ and ((len(expected) > 0 and expected[0] == "__ordered__") or exact)
+ ):
+ if not exact:
+ del expected[0]
+ if len(output) != len(expected):
+ acc = add_error(
+ acc,
+ "output has Array of length {} but in expected it is of length {}".format(
+ len(output), len(expected)
+ ),
+ )
+ else:
+ for idx, v1, v2 in zip(range(0, len(output)), output, expected):
+ acc = merge_errors(
+ acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
+ )
+ elif isinstance(output, list) and isinstance(expected, list):
+ if len(output) < len(expected):
+ acc = add_error(
+ acc,
+ "output has Array of length {} but in expected it is of length {}".format(
+ len(output), len(expected)
+ ),
+ )
+ else:
+ for idx2, v2 in zip(range(0, len(expected)), expected):
+ found_match = False
+ closest_diff = None
+ closest_idx = None
+ for idx1, v1 in zip(range(0, len(output)), output):
+ tmp_v1 = deepcopy(v1)
+ tmp_v2 = deepcopy(v2)
+ tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
+ if not has_errors(tmp_diff):
+ found_match = True
+ del output[idx1]
+ break
+ elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
+ closest_diff
+ ):
+ closest_diff = tmp_diff
+ closest_idx = idx1
+ if not found_match and isinstance(v2, (list, dict)):
+ sub_error = "\n\n\t{}".format(
+ "\t".join(get_errors(closest_diff).splitlines(True))
+ )
+ acc = add_error(
+ acc,
+ (
+ "expected has the following element at index {} which is not present in output: "
+ + "\n\n{}\n\n\tClosest match in output is at index {} with the following errors: {}"
+ ).format(idx2, dump_json(v2), closest_idx, sub_error),
+ )
+ if not found_match and not isinstance(v2, (list, dict)):
+ acc = add_error(
+ acc,
+ "expected has the following element at index {} which is not present in output: {}".format(
+ idx2, dump_json(v2)
+ ),
+ )
+ elif isinstance(output, dict) and isinstance(expected, dict) and exact:
+ invalid_keys_d1 = [k for k in output.keys() if k not in expected.keys()]
+ invalid_keys_d2 = [k for k in expected.keys() if k not in output.keys()]
+ for k in invalid_keys_d1:
+ acc = add_error(
+ acc, "output has key '{}' which is not present in expected".format(k)
+ )
+ for k in invalid_keys_d2:
+ acc = add_error(
+ acc, "expected has key '{}' which is not present in output".format(k)
+ )
+ valid_keys_intersection = [k for k in output.keys() if k in expected.keys()]
+ for k in valid_keys_intersection:
+ acc = merge_errors(
+ acc,
+ gen_json_diff_report(
+ output[k], expected[k], exact=exact, path=add_key(k)
+ ),
+ )
+ elif isinstance(output, dict) and isinstance(expected, dict):
+ none_keys = [k for k, v in expected.items() if v == None]
+ none_keys_present = [k for k in output.keys() if k in none_keys]
+ for k in none_keys_present:
+ acc = add_error(
+ acc, "output has key '{}' which is not supposed to be present".format(k)
+ )
+ keys = [k for k, v in expected.items() if v != None]
+ invalid_keys_intersection = [k for k in keys if k not in output.keys()]
+ for k in invalid_keys_intersection:
+ acc = add_error(
+ acc, "expected has key '{}' which is not present in output".format(k)
+ )
+ valid_keys_intersection = [k for k in keys if k in output.keys()]
+ for k in valid_keys_intersection:
+ acc = merge_errors(
+ acc,
+ gen_json_diff_report(
+ output[k], expected[k], exact=exact, path=add_key(k)
+ ),
+ )
+ else:
+ acc = add_error(
+ acc,
+ "output has element of type '{}' but the corresponding element in expected is of type '{}'".format(
+ json_type(output), json_type(expected)
+ ),
+ points=2,
+ )
+
+ return acc
+
+
+def json_cmp(output, expected, exact=False):
+ """
+ JSON compare function. Receives two parameters:
+ * `output`: parsed JSON data structure from outputed vtysh command
+ * `expected``: parsed JSON data structure from what is expected to be seen
+
+ Returns 'None' when all JSON Object keys and all Array elements of expected have a match
+ in output, i.e., when expected is a "subset" of output without honoring any order. Otherwise an
+ error report is generated and wrapped in a 'json_cmp_result()'. There are special
+ parameters and notations explained below which can be used to cover rather unusual
+ cases:
+
+ * when 'exact is set to 'True' then output and expected are tested for equality (including
+ order within JSON Arrays)
+ * using 'null' (or 'None' in Python) as JSON Object value is checking for key
+ absence in output
+ * using '*' as JSON Object value or Array value is checking for presence in output
+ without checking the values
+ * using '__ordered__' as first element in a JSON Array in expected will also check the
+ order when it is compared to an Array in output
+ """
+
+ (errors_n, errors) = gen_json_diff_report(
+ deepcopy(output), deepcopy(expected), exact=exact
+ )
+
+ if errors_n > 0:
+ result = json_cmp_result()
+ result.add_error(errors)
+ return result
+ else:
+ return None
+
+
+def router_output_cmp(router, cmd, expected):
+ """
+ Runs `cmd` in router and compares the output with `expected`.
+ """
+ return difflines(
+ normalize_text(router.vtysh_cmd(cmd)),
+ normalize_text(expected),
+ title1="Current output",
+ title2="Expected output",
+ )
+
+
+def router_json_cmp(router, cmd, data, exact=False):
+ """
+ Runs `cmd` that returns JSON data (normally the command ends with 'json')
+ and compare with `data` contents.
+ """
+ return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
+
+
+def run_and_expect(func, what, count=20, wait=3):
+ """
+ Run `func` and compare the result with `what`. Do it for `count` times
+ waiting `wait` seconds between tries. By default it tries 20 times with
+ 3 seconds delay between tries.
+
+ Returns (True, func-return) on success or
+ (False, func-return) on failure.
+
+ ---
+
+ Helper functions to use with this function:
+ - router_output_cmp
+ - router_json_cmp
+ """
+ start_time = time.time()
+ func_name = "<unknown>"
+ if func.__class__ == functools.partial:
+ func_name = func.func.__name__
+ else:
+ func_name = func.__name__
+
+ # Just a safety-check to avoid running topotests with very
+ # small wait/count arguments.
+ wait_time = wait * count
+ if wait_time < 5:
+ assert (
+ wait_time >= 5
+ ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
+ count, wait
+ )
+
+ logger.debug(
+ "'{}' polling started (interval {} secs, maximum {} tries)".format(
+ func_name, wait, count
+ )
+ )
+
+ while count > 0:
+ result = func()
+ if result != what:
+ time.sleep(wait)
+ count -= 1
+ continue
+
+ end_time = time.time()
+ logger.debug(
+ "'{}' succeeded after {:.2f} seconds".format(
+ func_name, end_time - start_time
+ )
+ )
+ return (True, result)
+
+ end_time = time.time()
+ logger.error(
+ "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
+ )
+ return (False, result)
+
+
+def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
+ """
+ Run `func` and compare the result with `etype`. Do it for `count` times
+ waiting `wait` seconds between tries. By default it tries 20 times with
+ 3 seconds delay between tries.
+
+ This function is used when you want to test the return type and,
+ optionally, the return value.
+
+ Returns (True, func-return) on success or
+ (False, func-return) on failure.
+ """
+ start_time = time.time()
+ func_name = "<unknown>"
+ if func.__class__ == functools.partial:
+ func_name = func.func.__name__
+ else:
+ func_name = func.__name__
+
+ # Just a safety-check to avoid running topotests with very
+ # small wait/count arguments.
+ wait_time = wait * count
+ if wait_time < 5:
+ assert (
+ wait_time >= 5
+ ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
+ count, wait
+ )
+
+ logger.debug(
+ "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
+ func_name, wait, int(wait * count)
+ )
+ )
+
+ while count > 0:
+ result = func()
+ if not isinstance(result, etype):
+ logger.debug(
+ "Expected result type '{}' got '{}' instead".format(etype, type(result))
+ )
+ time.sleep(wait)
+ count -= 1
+ continue
+
+ if etype != type(None) and avalue != None and result != avalue:
+ logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
+ time.sleep(wait)
+ count -= 1
+ continue
+
+ end_time = time.time()
+ logger.debug(
+ "'{}' succeeded after {:.2f} seconds".format(
+ func_name, end_time - start_time
+ )
+ )
+ return (True, result)
+
+ end_time = time.time()
+ logger.error(
+ "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
+ )
+ return (False, result)
+
+
+def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
+ """
+ Runs `cmd` that returns JSON data (normally the command ends with 'json')
+ and compare with `data` contents. Retry by default for 10 seconds
+ """
+
+ def test_func():
+ return router_json_cmp(router, cmd, data, exact)
+
+ ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
+ return ok
+
+
+def int2dpid(dpid):
+ "Converting Integer to DPID"
+
+ try:
+ dpid = hex(dpid)[2:]
+ dpid = "0" * (16 - len(dpid)) + dpid
+ return dpid
+ except IndexError:
+ raise Exception(
+ "Unable to derive default datapath ID - "
+ "please either specify a dpid or use a "
+ "canonical switch name such as s23."
+ )
+
+
+def get_textdiff(text1, text2, title1="", title2="", **opts):
+ "Returns empty string if same or formatted diff"
+
+ diff = "\n".join(
+ difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
+ )
+ # Clean up line endings
+ diff = os.linesep.join([s for s in diff.splitlines() if s])
+ return diff
+
+
+def difflines(text1, text2, title1="", title2="", **opts):
+ "Wrapper for get_textdiff to avoid string transformations."
+ text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
+ text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
+ return get_textdiff(text1, text2, title1, title2, **opts)
+
+
+def get_file(content):
+ """
+ Generates a temporary file in '/tmp' with `content` and returns the file name.
+ """
+ if isinstance(content, list) or isinstance(content, tuple):
+ content = "\n".join(content)
+ fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
+ fname = fde.name
+ fde.write(content)
+ fde.close()
+ return fname
+
+
+def normalize_text(text):
+ """
+ Strips formating spaces/tabs, carriage returns and trailing whitespace.
+ """
+ text = re.sub(r"[ \t]+", " ", text)
+ text = re.sub(r"\r", "", text)
+
+ # Remove whitespace in the middle of text.
+ text = re.sub(r"[ \t]+\n", "\n", text)
+ # Remove whitespace at the end of the text.
+ text = text.rstrip()
+
+ return text
+
+
+def is_linux():
+ """
+ Parses unix name output to check if running on GNU/Linux.
+
+ Returns True if running on Linux, returns False otherwise.
+ """
+
+ if os.uname()[0] == "Linux":
+ return True
+ return False
+
+
+def iproute2_is_vrf_capable():
+ """
+ Checks if the iproute2 version installed on the system is capable of
+ handling VRFs by interpreting the output of the 'ip' utility found in PATH.
+
+ Returns True if capability can be detected, returns False otherwise.
+ """
+
+ if is_linux():
+ try:
+ subp = subprocess.Popen(
+ ["ip", "route", "show", "vrf"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
+
+ if iproute2_err != "Error:":
+ return True
+ except Exception:
+ pass
+ return False
+
+
+def iproute2_is_fdb_get_capable():
+ """
+ Checks if the iproute2 version installed on the system is capable of
+ handling `bridge fdb get` commands to query neigh table resolution.
+
+ Returns True if capability can be detected, returns False otherwise.
+ """
+
+ if is_linux():
+ try:
+ subp = subprocess.Popen(
+ ["bridge", "fdb", "get", "help"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ iproute2_out = subp.communicate()[1].splitlines()[0].split()[0]
+
+ if "Usage" in str(iproute2_out):
+ return True
+ except Exception:
+ pass
+ return False
+
+
+def module_present_linux(module, load):
+ """
+ Returns whether `module` is present.
+
+ If `load` is true, it will try to load it via modprobe.
+ """
+ with open("/proc/modules", "r") as modules_file:
+ if module.replace("-", "_") in modules_file.read():
+ return True
+ cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
+ if os.system(cmd) != 0:
+ return False
+ else:
+ return True
+
+
+def module_present_freebsd(module, load):
+ return True
+
+
+def module_present(module, load=True):
+ if sys.platform.startswith("linux"):
+ return module_present_linux(module, load)
+ elif sys.platform.startswith("freebsd"):
+ return module_present_freebsd(module, load)
+
+
+def version_cmp(v1, v2):
+ """
+ Compare two version strings and returns:
+
+ * `-1`: if `v1` is less than `v2`
+ * `0`: if `v1` is equal to `v2`
+ * `1`: if `v1` is greater than `v2`
+
+ Raises `ValueError` if versions are not well formated.
+ """
+ vregex = r"(?P<whole>\d+(\.(\d+))*)"
+ v1m = re.match(vregex, v1)
+ v2m = re.match(vregex, v2)
+ if v1m is None or v2m is None:
+ raise ValueError("got a invalid version string")
+
+ # Split values
+ v1g = v1m.group("whole").split(".")
+ v2g = v2m.group("whole").split(".")
+
+ # Get the longest version string
+ vnum = len(v1g)
+ if len(v2g) > vnum:
+ vnum = len(v2g)
+
+ # Reverse list because we are going to pop the tail
+ v1g.reverse()
+ v2g.reverse()
+ for _ in range(vnum):
+ try:
+ v1n = int(v1g.pop())
+ except IndexError:
+ while v2g:
+ v2n = int(v2g.pop())
+ if v2n > 0:
+ return -1
+ break
+
+ try:
+ v2n = int(v2g.pop())
+ except IndexError:
+ if v1n > 0:
+ return 1
+ while v1g:
+ v1n = int(v1g.pop())
+ if v1n > 0:
+ return 1
+ break
+
+ if v1n > v2n:
+ return 1
+ if v1n < v2n:
+ return -1
+ return 0
+
+
+def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
+ if ifaceaction:
+ str_ifaceaction = "no shutdown"
+ else:
+ str_ifaceaction = "shutdown"
+ if vrf_name == None:
+ cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
+ ifacename, str_ifaceaction
+ )
+ else:
+ cmd = (
+ 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
+ ifacename, vrf_name, str_ifaceaction
+ )
+ )
+ node.run(cmd)
+
+
+def ip4_route_zebra(node, vrf_name=None):
+ """
+ Gets an output of 'show ip route' command. It can be used
+ with comparing the output to a reference
+ """
+ if vrf_name == None:
+ tmp = node.vtysh_cmd("show ip route")
+ else:
+ tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
+ output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
+
+ lines = output.splitlines()
+ header_found = False
+ while lines and (not lines[0].strip() or not header_found):
+ if "o - offload failure" in lines[0]:
+ header_found = True
+ lines = lines[1:]
+ return "\n".join(lines)
+
+
+def ip6_route_zebra(node, vrf_name=None):
+ """
+ Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
+ canonicalizes it by eliding link-locals.
+ """
+
+ if vrf_name == None:
+ tmp = node.vtysh_cmd("show ipv6 route")
+ else:
+ tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
+
+ # Mask out timestamp
+ output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
+
+ # Mask out the link-local addresses
+ output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
+
+ lines = output.splitlines()
+ header_found = False
+ while lines and (not lines[0].strip() or not header_found):
+ if "o - offload failure" in lines[0]:
+ header_found = True
+ lines = lines[1:]
+
+ return "\n".join(lines)
+
+
+def proto_name_to_number(protocol):
+ return {
+ "bgp": "186",
+ "isis": "187",
+ "ospf": "188",
+ "rip": "189",
+ "ripng": "190",
+ "nhrp": "191",
+ "eigrp": "192",
+ "ldp": "193",
+ "sharp": "194",
+ "pbr": "195",
+ "static": "196",
+ "ospf6": "197",
+ }.get(
+ protocol, protocol
+ ) # default return same as input
+
+
+def ip4_route(node):
+ """
+ Gets a structured return of the command 'ip route'. It can be used in
+ conjunction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '10.0.1.0/24': {
+ 'dev': 'eth0',
+ 'via': '172.16.0.1',
+ 'proto': '188',
+ },
+ '10.0.2.0/24': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(node.run("ip route")).splitlines()
+ result = {}
+ for line in output:
+ columns = line.split(" ")
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
+ # translate protocol names back to numbers
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "scope":
+ route["scope"] = column
+ prev = column
+
+ return result
+
+
+def ip4_vrf_route(node):
+ """
+ Gets a structured return of the command 'ip route show vrf {0}-cust1'.
+ It can be used in conjunction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '10.0.1.0/24': {
+ 'dev': 'eth0',
+ 'via': '172.16.0.1',
+ 'proto': '188',
+ },
+ '10.0.2.0/24': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(
+ node.run("ip route show vrf {0}-cust1".format(node.name))
+ ).splitlines()
+
+ result = {}
+ for line in output:
+ columns = line.split(" ")
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
+ # translate protocol names back to numbers
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "scope":
+ route["scope"] = column
+ prev = column
+
+ return result
+
+
+def ip6_route(node):
+ """
+ Gets a structured return of the command 'ip -6 route'. It can be used in
+ conjunction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '2001:db8:1::/64': {
+ 'dev': 'eth0',
+ 'proto': '188',
+ },
+ '2001:db8:2::/64': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(node.run("ip -6 route")).splitlines()
+ result = {}
+ for line in output:
+ columns = line.split(" ")
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
+ # translate protocol names back to numbers
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "pref":
+ route["pref"] = column
+ prev = column
+
+ return result
+
+
+def ip6_vrf_route(node):
+ """
+ Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
+ It can be used in conjunction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '2001:db8:1::/64': {
+ 'dev': 'eth0',
+ 'proto': '188',
+ },
+ '2001:db8:2::/64': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(
+ node.run("ip -6 route show vrf {0}-cust1".format(node.name))
+ ).splitlines()
+ result = {}
+ for line in output:
+ columns = line.split(" ")
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
+ # translate protocol names back to numbers
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "pref":
+ route["pref"] = column
+ prev = column
+
+ return result
+
+
+def ip_rules(node):
+ """
+ Gets a structured return of the command 'ip rule'. It can be used in
+ conjunction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ [
+ {
+ "pref": "0"
+ "from": "all"
+ },
+ {
+ "pref": "32766"
+ "from": "all"
+ },
+ {
+ "to": "3.4.5.0/24",
+ "iif": "r1-eth2",
+ "pref": "304",
+ "from": "1.2.0.0/16",
+ "proto": "zebra"
+ }
+ ]
+ """
+ output = normalize_text(node.run("ip rule")).splitlines()
+ result = []
+ for line in output:
+ columns = line.split(" ")
+
+ route = {}
+ # remove last character, since it is ':'
+ pref = columns[0][:-1]
+ route["pref"] = pref
+ prev = None
+ for column in columns:
+ if prev == "from":
+ route["from"] = column
+ if prev == "to":
+ route["to"] = column
+ if prev == "proto":
+ route["proto"] = column
+ if prev == "iif":
+ route["iif"] = column
+ if prev == "fwmark":
+ route["fwmark"] = column
+ prev = column
+
+ result.append(route)
+ return result
+
+
+def sleep(amount, reason=None):
+ """
+ Sleep wrapper that registers in the log the amount of sleep
+ """
+ if reason is None:
+ logger.info("Sleeping for {} seconds".format(amount))
+ else:
+ logger.info(reason + " ({} seconds)".format(amount))
+
+ time.sleep(amount)
+
+
+def checkAddressSanitizerError(output, router, component, logdir=""):
+ "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
+
+ def processAddressSanitizerError(asanErrorRe, output, router, component):
+ sys.stderr.write(
+ "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
+ )
+ # Sanitizer Error found in log
+ pidMark = asanErrorRe.group(1)
+ addressSanitizerLog = re.search(
+ "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
+ )
+ if addressSanitizerLog:
+ # Find Calling Test. Could be multiple steps back
+ testframe = list(sys._current_frames().values())[0]
+ level = 0
+ while level < 10:
+ test = os.path.splitext(
+ os.path.basename(testframe.f_globals["__file__"])
+ )[0]
+ if (test != "topotest") and (test != "topogen"):
+ # Found the calling test
+ callingTest = os.path.basename(testframe.f_globals["__file__"])
+ break
+ level = level + 1
+ testframe = testframe.f_back
+ if level >= 10:
+ # somehow couldn't find the test script.
+ callingTest = "unknownTest"
+ #
+ # Now finding Calling Procedure
+ level = 0
+ while level < 20:
+ callingProc = sys._getframe(level).f_code.co_name
+ if (
+ (callingProc != "processAddressSanitizerError")
+ and (callingProc != "checkAddressSanitizerError")
+ and (callingProc != "checkRouterCores")
+ and (callingProc != "stopRouter")
+ and (callingProc != "stop")
+ and (callingProc != "stop_topology")
+ and (callingProc != "checkRouterRunning")
+ and (callingProc != "check_router_running")
+ and (callingProc != "routers_have_failure")
+ ):
+ # Found the calling test
+ break
+ level = level + 1
+ if level >= 20:
+ # something wrong - couldn't found the calling test function
+ callingProc = "unknownProc"
+ with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
+ sys.stderr.write(
+ "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
+ % (callingTest, callingProc, router)
+ )
+ sys.stderr.write(
+ "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
+ )
+ addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
+ addrSanFile.write(
+ "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
+ % (callingTest, callingProc, router)
+ )
+ addrSanFile.write(
+ " "
+ + "\n ".join(addressSanitizerLog.group(1).splitlines())
+ + "\n"
+ )
+ addrSanFile.write("\n---------------\n")
+ return
+
+ addressSanitizerError = re.search(
+ r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
+ )
+ if addressSanitizerError:
+ processAddressSanitizerError(addressSanitizerError, output, router, component)
+ return True
+
+ # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
+ if logdir:
+ filepattern = logdir + "/" + router + ".asan." + component + ".*"
+ logger.debug(
+ "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
+ )
+ for file in glob.glob(filepattern):
+ with open(file, "r") as asanErrorFile:
+ asanError = asanErrorFile.read()
+ addressSanitizerError = re.search(
+ r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
+ )
+ if addressSanitizerError:
+ processAddressSanitizerError(
+ addressSanitizerError, asanError, router, component
+ )
+ return True
+ return False
+
+
+def _sysctl_atleast(commander, variable, min_value):
+ if isinstance(min_value, tuple):
+ min_value = list(min_value)
+ is_list = isinstance(min_value, list)
+
+ sval = commander.cmd_raises("sysctl -n " + variable).strip()
+ if is_list:
+ cur_val = [int(x) for x in sval.split()]
+ else:
+ cur_val = int(sval)
+
+ set_value = False
+ if is_list:
+ for i, v in enumerate(cur_val):
+ if v < min_value[i]:
+ set_value = True
+ else:
+ min_value[i] = v
+ else:
+ if cur_val < min_value:
+ set_value = True
+ if set_value:
+ if is_list:
+ valstr = " ".join([str(x) for x in min_value])
+ else:
+ valstr = str(min_value)
+ logger.debug("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
+ commander.cmd_raises('sysctl -w {}="{}"'.format(variable, valstr))
+
+
+def _sysctl_assure(commander, variable, value):
+ if isinstance(value, tuple):
+ value = list(value)
+ is_list = isinstance(value, list)
+
+ sval = commander.cmd_raises("sysctl -n " + variable).strip()
+ if is_list:
+ cur_val = [int(x) for x in sval.split()]
+ else:
+ cur_val = sval
+
+ set_value = False
+ if is_list:
+ for i, v in enumerate(cur_val):
+ if v != value[i]:
+ set_value = True
+ else:
+ value[i] = v
+ else:
+ if cur_val != str(value):
+ set_value = True
+
+ if set_value:
+ if is_list:
+ valstr = " ".join([str(x) for x in value])
+ else:
+ valstr = str(value)
+ logger.debug("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
+ commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
+
+
+def sysctl_atleast(commander, variable, min_value, raises=False):
+ try:
+ if commander is None:
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
+
+ return _sysctl_atleast(commander, variable, min_value)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "%s: Failed to assure sysctl min value %s = %s",
+ commander,
+ variable,
+ min_value,
+ )
+ if raises:
+ raise
+
+
+def sysctl_assure(commander, variable, value, raises=False):
+ try:
+ if commander is None:
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
+ return _sysctl_assure(commander, variable, value)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "%s: Failed to assure sysctl value %s = %s",
+ commander,
+ variable,
+ value,
+ exc_info=True,
+ )
+ if raises:
+ raise
+
+
+def rlimit_atleast(rname, min_value, raises=False):
+ try:
+ cval = resource.getrlimit(rname)
+ soft, hard = cval
+ if soft < min_value:
+ nval = (min_value, hard if min_value < hard else min_value)
+ logger.debug("Increasing rlimit %s from %s to %s", rname, cval, nval)
+ resource.setrlimit(rname, nval)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
+ )
+ if raises:
+ raise
+
+
+def fix_netns_limits(ns):
+ # Maximum read and write socket buffer sizes
+ sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
+ sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
+
+ sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
+ sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
+ sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
+
+ sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
+ sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
+
+ # XXX if things fail look here as this wasn't done previously
+ sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
+ sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
+
+ # ARP
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
+ # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
+ # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
+
+ sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
+
+ # Keep ipv6 permanent addresses on an admin down
+ sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
+ if version_cmp(platform.release(), "4.20") >= 0:
+ sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
+
+ sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
+ sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
+
+ # igmp
+ sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
+
+ # Use neigh information on selection of nexthop for multipath hops
+ sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
+
+
+def fix_host_limits():
+ """Increase system limits."""
+
+ rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
+ rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
+ sysctl_atleast(None, "fs.file-max", 16 * 1024)
+ sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
+
+ # Enable coredumps
+ # Original on ubuntu 17.x, but apport won't save as in namespace
+ # |/usr/share/apport/apport %p %s %c %d %P
+ sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
+ sysctl_assure(None, "kernel.core_uses_pid", 1)
+ sysctl_assure(None, "fs.suid_dumpable", 1)
+
+ # Maximum connection backlog
+ sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
+
+ # Maximum read and write socket buffer sizes
+ sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
+ sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
+
+ # Garbage Collection Settings for ARP and Neighbors
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
+ # Hold entries for 10 minutes
+ sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+ sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+
+ # igmp
+ sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
+
+ # MLD
+ sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
+
+ # Increase routing table size to 128K
+ sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
+ sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
+
+
+def setup_node_tmpdir(logdir, name):
+ # Cleanup old log, valgrind, and core files.
+ subprocess.check_call(
+ "rm -rf {0}/{1}.valgrind.* {0}/{1}.asan.* {0}/{1}/".format(logdir, name),
+ shell=True,
+ )
+
+ # Setup the per node directory.
+ nodelogdir = "{}/{}".format(logdir, name)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
+ )
+ logfile = "{0}/{1}.log".format(logdir, name)
+ return logfile
+
+
+class Router(Node):
+ "A Node with IPv4/IPv6 forwarding enabled"
+
+ def __init__(self, name, *posargs, **params):
+ # Backward compatibility:
+ # Load configuration defaults like topogen.
+ self.config_defaults = configparser.ConfigParser(
+ defaults={
+ "verbosity": "info",
+ "frrdir": "/usr/lib/frr",
+ "routertype": "frr",
+ "memleak_path": "",
+ }
+ )
+
+ self.config_defaults.read(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
+ )
+
+ self.perf_daemons = {}
+
+ # If this topology is using old API and doesn't have logdir
+ # specified, then attempt to generate an unique logdir.
+ self.logdir = params.get("logdir")
+ if self.logdir is None:
+ self.logdir = get_logs_path(g_pytest_config.getoption("--rundir"))
+
+ if not params.get("logger"):
+ # If logger is present topogen has already set this up
+ logfile = setup_node_tmpdir(self.logdir, name)
+ l = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = l
+
+ super(Router, self).__init__(name, *posargs, **params)
+
+ self.daemondir = None
+ self.hasmpls = False
+ self.routertype = "frr"
+ self.unified_config = None
+ self.daemons = {
+ "zebra": 0,
+ "ripd": 0,
+ "ripngd": 0,
+ "ospfd": 0,
+ "ospf6d": 0,
+ "isisd": 0,
+ "bgpd": 0,
+ "pimd": 0,
+ "pim6d": 0,
+ "ldpd": 0,
+ "eigrpd": 0,
+ "nhrpd": 0,
+ "staticd": 0,
+ "bfdd": 0,
+ "sharpd": 0,
+ "babeld": 0,
+ "pbrd": 0,
+ "pathd": 0,
+ "snmpd": 0,
+ "mgmtd": 0,
+ }
+ self.daemons_options = {"zebra": ""}
+ self.reportCores = True
+ self.version = None
+
+ self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
+ try:
+ # Allow escaping from running inside docker
+ cgroup = open("/proc/1/cgroup").read()
+ m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
+ if m:
+ self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
+ except IOError:
+ pass
+ else:
+ logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
+
+ def _config_frr(self, **params):
+ "Configure FRR binaries"
+ self.daemondir = params.get("frrdir")
+ if self.daemondir is None:
+ self.daemondir = self.config_defaults.get("topogen", "frrdir")
+
+ zebra_path = os.path.join(self.daemondir, "zebra")
+ if not os.path.isfile(zebra_path):
+ raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
+
+ mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+ if not os.path.isfile(mgmtd_path):
+ raise Exception("FRR MGMTD binary doesn't exist at {}".format(mgmtd_path))
+
+ # pylint: disable=W0221
+ # Some params are only meaningful for the parent class.
+ def config_host(self, **params):
+ super(Router, self).config_host(**params)
+
+ # User did not specify the daemons directory, try to autodetect it.
+ self.daemondir = params.get("daemondir")
+ if self.daemondir is None:
+ self.routertype = params.get(
+ "routertype", self.config_defaults.get("topogen", "routertype")
+ )
+ self._config_frr(**params)
+ else:
+ # Test the provided path
+ zpath = os.path.join(self.daemondir, "zebra")
+ if not os.path.isfile(zpath):
+ raise Exception("No zebra binary found in {}".format(zpath))
+
+ cpath = os.path.join(self.daemondir, "mgmtd")
+ if not os.path.isfile(zpath):
+ raise Exception("No MGMTD binary found in {}".format(cpath))
+ # Allow user to specify routertype when the path was specified.
+ if params.get("routertype") is not None:
+ self.routertype = params.get("routertype")
+
+ # Set ownership of config files
+ self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
+
+ def terminate(self):
+ # Stop running FRR daemons
+ self.stopRouter()
+ super(Router, self).terminate()
+ os.system("chmod -R go+rw " + self.logdir)
+
+ # Return count of running daemons
+ def listDaemons(self):
+ ret = []
+ rc, stdout, _ = self.cmd_status(
+ "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
+ )
+ if rc:
+ return ret
+ for d in stdout.strip().split("\n"):
+ pidfile = d.strip()
+ try:
+ pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
+ name = os.path.basename(pidfile[:-4])
+
+ # probably not compatible with bsd.
+ rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
+ if rc:
+ logger.warning(
+ "%s: %s exited leaving pidfile %s (%s)",
+ self.name,
+ name,
+ pidfile,
+ pid,
+ )
+ self.cmd("rm -- " + pidfile)
+ else:
+ ret.append((name, pid))
+ except (subprocess.CalledProcessError, ValueError):
+ pass
+ return ret
+
+ def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
+ # Stop Running FRR Daemons
+ running = self.listDaemons()
+ if not running:
+ return ""
+
+ logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
+ for name, pid in running:
+ logger.debug("{}: sending SIGTERM to {}".format(self.name, name))
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as err:
+ logger.debug(
+ "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
+ )
+
+ running = self.listDaemons()
+ if running:
+ for _ in range(0, 30):
+ sleep(
+ 0.5,
+ "{}: waiting for daemons stopping: {}".format(
+ self.name, ", ".join([x[0] for x in running])
+ ),
+ )
+ running = self.listDaemons()
+ if not running:
+ break
+
+ if running:
+ logger.warning(
+ "%s: sending SIGBUS to: %s",
+ self.name,
+ ", ".join([x[0] for x in running]),
+ )
+ for name, pid in running:
+ pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
+ logger.info("%s: killing %s", self.name, name)
+ self.cmd("kill -SIGBUS %d" % pid)
+ self.cmd("rm -- " + pidfile)
+
+ sleep(
+ 0.5,
+ "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name,
+ )
+
+ errors = self.checkRouterCores(reportOnce=True)
+ if self.checkRouterVersion("<", minErrorVersion):
+ # ignore errors in old versions
+ errors = ""
+ if assertOnError and (errors is not None) and len(errors) > 0:
+ assert "Errors found - details follow:" == 0, errors
+ return errors
+
+ def removeIPs(self):
+ for interface in self.intfNames():
+ try:
+ self.intf_ip_cmd(interface, "ip -4 address flush " + interface)
+ self.intf_ip_cmd(
+ interface, "ip -6 address flush " + interface + " scope global"
+ )
+ except Exception as ex:
+ logger.error("%s can't remove IPs %s", self, str(ex))
+ # breakpoint()
+ # assert False, "can't remove IPs %s" % str(ex)
+
+ def checkCapability(self, daemon, param):
+ if param is not None:
+ daemon_path = os.path.join(self.daemondir, daemon)
+ daemon_search_option = param.replace("-", "")
+ output = self.cmd(
+ "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
+ )
+ if daemon_search_option not in output:
+ return False
+ return True
+
+ def loadConf(self, daemon, source=None, param=None):
+ """Enabled and set config for a daemon.
+
+ Arranges for loading of daemon configuration from the specified source. Possible
+ `source` values are `None` for an empty config file, a path name which is used
+ directly, or a file name with no path components which is first looked for
+ directly and then looked for under a sub-directory named after router.
+ """
+
+ # Unfortunately this API allowsfor source to not exist for any and all routers.
+ source_was_none = source is None
+ if source_was_none:
+ source = f"{daemon}.conf"
+
+ # "" to avoid loading a default config which is present in router dir
+ if source:
+ head, tail = os.path.split(source)
+ if not head and not self.path_exists(tail):
+ script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
+ router_relative = os.path.join(script_dir, self.name, tail)
+ if self.path_exists(router_relative):
+ source = router_relative
+ self.logger.debug(
+ "using router relative configuration: {}".format(source)
+ )
+
+ # print "Daemons before:", self.daemons
+ if daemon in self.daemons.keys() or daemon == "frr":
+ if daemon == "frr":
+ self.unified_config = 1
+ else:
+ self.daemons[daemon] = 1
+ if param is not None:
+ self.daemons_options[daemon] = param
+ conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
+ if source and not os.path.exists(source):
+ logger.warning(
+ "missing config '%s' for '%s' creating empty file '%s'",
+ self.name,
+ source,
+ conf_file,
+ )
+ if daemon == "frr" or not self.unified_config:
+ self.cmd_raises("rm -f " + conf_file)
+ self.cmd_raises("touch " + conf_file)
+ self.cmd_raises(
+ "chown {0}:{0} {1}".format(self.routertype, conf_file)
+ )
+ self.cmd_raises("chmod 664 {}".format(conf_file))
+ elif source:
+ # copy zebra.conf to mgmtd folder, which can be used during startup
+ if daemon == "zebra" and not self.unified_config:
+ conf_file_mgmt = "/etc/{}/{}.conf".format(self.routertype, "mgmtd")
+ logger.debug(
+ "copying '%s' as '%s' on '%s'",
+ source,
+ conf_file_mgmt,
+ self.name,
+ )
+ self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
+ self.cmd_raises(
+ "chown {0}:{0} {1}".format(self.routertype, conf_file_mgmt)
+ )
+ self.cmd_raises("chmod 664 {}".format(conf_file_mgmt))
+
+ logger.debug(
+ "copying '%s' as '%s' on '%s'", source, conf_file, self.name
+ )
+ self.cmd_raises("cp {} {}".format(source, conf_file))
+ self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
+ self.cmd_raises("chmod 664 {}".format(conf_file))
+
+ if (daemon == "snmpd") and (self.routertype == "frr"):
+ # /etc/snmp is private mount now
+ self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
+ self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
+
+ if (daemon == "zebra") and (self.daemons["mgmtd"] == 0):
+ # Add mgmtd with zebra - if it exists
+ mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+ if os.path.isfile(mgmtd_path):
+ self.daemons["mgmtd"] = 1
+ self.daemons_options["mgmtd"] = ""
+ # Auto-Started mgmtd has no config, so it will read from zebra config
+
+ if (daemon == "zebra") and (self.daemons["staticd"] == 0):
+ # Add staticd with zebra - if it exists
+ staticd_path = os.path.join(self.daemondir, "staticd")
+ if os.path.isfile(staticd_path):
+ self.daemons["staticd"] = 1
+ self.daemons_options["staticd"] = ""
+ # Auto-Started staticd has no config, so it will read from zebra config
+
+ else:
+ logger.warning("No daemon {} known".format(daemon))
+
+ return source if os.path.exists(source) else ""
+
+ def runInWindow(self, cmd, title=None):
+ return self.run_in_window(cmd, title)
+
+ def startRouter(self, tgen=None):
+ if self.unified_config:
+ self.cmd(
+ 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
+ % self.routertype
+ )
+ else:
+ # Disable integrated-vtysh-config
+ self.cmd(
+ 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
+ % self.routertype
+ )
+
+ self.cmd(
+ "chown %s:%svty /etc/%s/vtysh.conf"
+ % (self.routertype, self.routertype, self.routertype)
+ )
+ # TODO remove the following lines after all tests are migrated to Topogen.
+ # Try to find relevant old logfiles in /tmp and delete them
+ map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
+ # Remove IP addresses from OS first - we have them in zebra.conf
+ self.removeIPs()
+ # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
+ # No error - but return message and skip all the tests
+ if self.daemons["ldpd"] == 1:
+ ldpd_path = os.path.join(self.daemondir, "ldpd")
+ if not os.path.isfile(ldpd_path):
+ logger.info("LDP Test, but no ldpd compiled or installed")
+ return "LDP Test, but no ldpd compiled or installed"
+
+ if version_cmp(platform.release(), "4.5") < 0:
+ logger.info("LDP Test need Linux Kernel 4.5 minimum")
+ return "LDP Test need Linux Kernel 4.5 minimum"
+ # Check if have mpls
+ if tgen != None:
+ self.hasmpls = tgen.hasmpls
+ if self.hasmpls != True:
+ logger.info(
+ "LDP/MPLS Tests will be skipped, platform missing module(s)"
+ )
+ else:
+ # Test for MPLS Kernel modules available
+ self.hasmpls = False
+ if not module_present("mpls-router"):
+ logger.info(
+ "MPLS tests will not run (missing mpls-router kernel module)"
+ )
+ elif not module_present("mpls-iptunnel"):
+ logger.info(
+ "MPLS tests will not run (missing mpls-iptunnel kernel module)"
+ )
+ else:
+ self.hasmpls = True
+ if self.hasmpls != True:
+ return "LDP/MPLS Tests need mpls kernel modules"
+
+ # Really want to use sysctl_atleast here, but only when MPLS is actually being
+ # used
+ self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
+
+ if g_pytest_config.name_in_option_list(self.name, "--shell"):
+ self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
+
+ if self.daemons["eigrpd"] == 1:
+ eigrpd_path = os.path.join(self.daemondir, "eigrpd")
+ if not os.path.isfile(eigrpd_path):
+ logger.info("EIGRP Test, but no eigrpd compiled or installed")
+ return "EIGRP Test, but no eigrpd compiled or installed"
+
+ if self.daemons["bfdd"] == 1:
+ bfdd_path = os.path.join(self.daemondir, "bfdd")
+ if not os.path.isfile(bfdd_path):
+ logger.info("BFD Test, but no bfdd compiled or installed")
+ return "BFD Test, but no bfdd compiled or installed"
+
+ status = self.startRouterDaemons(tgen=tgen)
+
+ if g_pytest_config.name_in_option_list(self.name, "--vtysh"):
+ self.run_in_window("vtysh", title="vt-%s" % self.name)
+
+ if self.unified_config:
+ self.cmd("vtysh -f /etc/frr/frr.conf")
+
+ return status
+
+ def getStdErr(self, daemon):
+ return self.getLog("err", daemon)
+
+ def getStdOut(self, daemon):
+ return self.getLog("out", daemon)
+
+ def getLog(self, log, daemon):
+ filename = "{}/{}/{}.{}".format(self.logdir, self.name, daemon, log)
+ log = ""
+ with open(filename) as file:
+ log = file.read()
+ return log
+
+ def startRouterDaemons(self, daemons=None, tgen=None):
+ "Starts FRR daemons for this router."
+
+ asan_abort = bool(g_pytest_config.option.asan_abort)
+ gdb_breakpoints = g_pytest_config.get_option_list("--gdb-breakpoints")
+ gdb_daemons = g_pytest_config.get_option_list("--gdb-daemons")
+ gdb_routers = g_pytest_config.get_option_list("--gdb-routers")
+ valgrind_extra = bool(g_pytest_config.option.valgrind_extra)
+ valgrind_memleaks = bool(g_pytest_config.option.valgrind_memleaks)
+ strace_daemons = g_pytest_config.get_option_list("--strace-daemons")
+
+ # Get global bundle data
+ if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
+ # Copy global value if was covered by namespace mount
+ bundle_data = ""
+ if os.path.exists("/etc/frr/support_bundle_commands.conf"):
+ with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
+ bundle_data = rf.read()
+ self.cmd_raises(
+ "cat > /etc/frr/support_bundle_commands.conf",
+ stdin=bundle_data,
+ )
+
+ # Starts actual daemons without init (ie restart)
+ # cd to per node directory
+ self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
+ self.set_cwd("{}/{}".format(self.logdir, self.name))
+ self.cmd("umask 000")
+
+ # Re-enable to allow for report per run
+ self.reportCores = True
+
+ # XXX: glue code forward ported from removed function.
+ if self.version is None:
+ self.version = self.cmd(
+ os.path.join(self.daemondir, "bgpd") + " -v"
+ ).split()[2]
+ logger.info("{}: running version: {}".format(self.name, self.version))
+
+ perfds = {}
+ perf_options = g_pytest_config.get_option("--perf-options", "-g")
+ for perf in g_pytest_config.get_option("--perf", []):
+ if "," in perf:
+ daemon, routers = perf.split(",", 1)
+ perfds[daemon] = routers.split(",")
+ else:
+ daemon = perf
+ perfds[daemon] = ["all"]
+
+ logd_options = {}
+ for logd in g_pytest_config.get_option("--logd", []):
+ if "," in logd:
+ daemon, routers = logd.split(",", 1)
+ logd_options[daemon] = routers.split(",")
+ else:
+ daemon = logd
+ logd_options[daemon] = ["all"]
+
+ # If `daemons` was specified then some upper API called us with
+ # specific daemons, otherwise just use our own configuration.
+ daemons_list = []
+ if daemons is not None:
+ daemons_list = daemons
+ else:
+ # Append all daemons configured.
+ for daemon in self.daemons:
+ if self.daemons[daemon] == 1:
+ daemons_list.append(daemon)
+
+ tail_log_files = []
+ check_daemon_files = []
+
+ def start_daemon(daemon, extra_opts=None):
+ daemon_opts = self.daemons_options.get(daemon, "")
+
+ # get pid and vty filenames and remove the files
+ m = re.match(r"(.* |^)-n (\d+)( ?.*|$)", daemon_opts)
+ dfname = daemon if not m else "{}-{}".format(daemon, m.group(2))
+ runbase = "/var/run/{}/{}".format(self.routertype, dfname)
+ # If this is a new system bring-up remove the pid/vty files, otherwise
+ # do not since apparently presence of the pidfile impacts BGP GR
+ self.cmd_status("rm -f {0}.pid {0}.vty".format(runbase))
+
+ rediropt = " > {0}.out 2> {0}.err".format(daemon)
+ if daemon == "snmpd":
+ binary = "/usr/sbin/snmpd"
+ cmdenv = ""
+ cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
+ daemon_opts
+ ) + "{}.pid -x /etc/frr/agentx".format(runbase)
+ # check_daemon_files.append(runbase + ".pid")
+ else:
+ binary = os.path.join(self.daemondir, daemon)
+ check_daemon_files.extend([runbase + ".pid", runbase + ".vty"])
+
+ cmdenv = "ASAN_OPTIONS="
+ if asan_abort:
+ cmdenv += "abort_on_error=1:"
+ cmdenv += "log_path={0}/{1}.asan.{2} ".format(
+ self.logdir, self.name, daemon
+ )
+
+ if valgrind_memleaks:
+ this_dir = os.path.dirname(
+ os.path.abspath(os.path.realpath(__file__))
+ )
+ supp_file = os.path.abspath(
+ os.path.join(this_dir, "../../../tools/valgrind.supp")
+ )
+ cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
+ daemon, self.logdir, self.name, supp_file
+ )
+ if valgrind_extra:
+ cmdenv += (
+ " --gen-suppressions=all --expensive-definedness-checks=yes"
+ )
+ elif daemon in strace_daemons or "all" in strace_daemons:
+ cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
+ daemon, self.logdir, self.name
+ )
+
+ cmdopt = "{} --command-log-always ".format(daemon_opts)
+ cmdopt += "--log file:{}.log --log-level debug".format(daemon)
+
+ if daemon in logd_options:
+ logdopt = logd_options[daemon]
+ if "all" in logdopt or self.name in logdopt:
+ tail_log_files.append(
+ "{}/{}/{}.log".format(self.logdir, self.name, daemon)
+ )
+ if extra_opts:
+ cmdopt += " " + extra_opts
+
+ if (
+ (gdb_routers or gdb_daemons)
+ and (
+ not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
+ )
+ and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
+ ):
+ if daemon == "snmpd":
+ cmdopt += " -f "
+
+ cmdopt += rediropt
+ gdbcmd = "sudo -E gdb " + binary
+ if gdb_breakpoints:
+ gdbcmd += " -ex 'set breakpoint pending on'"
+ for bp in gdb_breakpoints:
+ gdbcmd += " -ex 'b {}'".format(bp)
+ gdbcmd += " -ex 'run {}'".format(cmdopt)
+
+ self.run_in_window(gdbcmd, daemon)
+
+ logger.info(
+ "%s: %s %s launched in gdb window", self, self.routertype, daemon
+ )
+ elif daemon in perfds and (
+ self.name in perfds[daemon] or "all" in perfds[daemon]
+ ):
+ cmdopt += rediropt
+ cmd = " ".join(
+ ["perf record {} --".format(perf_options), binary, cmdopt]
+ )
+ p = self.popen(cmd)
+ self.perf_daemons[daemon] = p
+ if p.poll() and p.returncode:
+ self.logger.error(
+ '%s: Failed to launch "%s" (%s) with perf using: %s',
+ self,
+ daemon,
+ p.returncode,
+ cmd,
+ )
+ else:
+ logger.debug(
+ "%s: %s %s started with perf", self, self.routertype, daemon
+ )
+ else:
+ if daemon != "snmpd":
+ cmdopt += " -d "
+ cmdopt += rediropt
+
+ try:
+ self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
+ except subprocess.CalledProcessError as error:
+ self.logger.error(
+ '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
+ self,
+ daemon,
+ error.returncode,
+ error.cmd,
+ '\n:stdout: "{}"'.format(error.stdout.strip())
+ if error.stdout
+ else "",
+ '\n:stderr: "{}"'.format(error.stderr.strip())
+ if error.stderr
+ else "",
+ )
+ else:
+ logger.debug("%s: %s %s started", self, self.routertype, daemon)
+
+ # Start mgmtd first
+ if "mgmtd" in daemons_list:
+ start_daemon("mgmtd")
+ while "mgmtd" in daemons_list:
+ daemons_list.remove("mgmtd")
+
+ # Start Zebra after mgmtd
+ if "zebra" in daemons_list:
+ start_daemon("zebra", "-s 90000000")
+ while "zebra" in daemons_list:
+ daemons_list.remove("zebra")
+
+ # Start staticd next if required
+ if "staticd" in daemons_list:
+ start_daemon("staticd")
+ while "staticd" in daemons_list:
+ daemons_list.remove("staticd")
+
+ if "snmpd" in daemons_list:
+ # Give zerbra a chance to configure interface addresses that snmpd daemon
+ # may then use.
+ time.sleep(2)
+
+ start_daemon("snmpd")
+ while "snmpd" in daemons_list:
+ daemons_list.remove("snmpd")
+
+ # Now start all the other daemons
+ for daemon in daemons_list:
+ if self.daemons[daemon] == 0:
+ continue
+ start_daemon(daemon)
+
+ # Check if daemons are running.
+ wait_time = 30 if (gdb_routers or gdb_daemons) else 10
+ timeout = Timeout(wait_time)
+ for remaining in timeout:
+ if not check_daemon_files:
+ break
+ check = check_daemon_files[0]
+ if self.path_exists(check):
+ check_daemon_files.pop(0)
+ continue
+ self.logger.debug("Waiting {}s for {} to appear".format(remaining, check))
+ time.sleep(0.5)
+
+ if check_daemon_files:
+ assert False, "Timeout({}) waiting for {} to appear on {}".format(
+ wait_time, check_daemon_files[0], self.name
+ )
+
+ # Update the permissions on the log files
+ self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
+ self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
+
+ if "frr" in logd_options:
+ logdopt = logd_options["frr"]
+ if "all" in logdopt or self.name in logdopt:
+ tail_log_files.append("{}/{}/frr.log".format(self.logdir, self.name))
+
+ for tailf in tail_log_files:
+ self.run_in_window("tail -n10000 -F " + tailf, title=tailf, background=True)
+
+ return ""
+
+ def pid_exists(self, pid):
+ if pid <= 0:
+ return False
+ try:
+ # If we are not using PID namespaces then we will be a parent of the pid,
+ # otherwise the init process of the PID namespace will have reaped the proc.
+ os.waitpid(pid, os.WNOHANG)
+ except Exception:
+ pass
+
+ rc, o, e = self.cmd_status("kill -0 " + str(pid), warn=False)
+ return rc == 0 or "No such process" not in e
+
+ def killRouterDaemons(
+ self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
+ ):
+ # Kill Running FRR
+ # Daemons(user specified daemon only) using SIGKILL
+ rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
+ errors = ""
+ daemonsNotRunning = []
+ if re.search(r"No such file or directory", rundaemons):
+ return errors
+ for daemon in daemons:
+ if rundaemons is not None and daemon in rundaemons:
+ numRunning = 0
+ dmns = rundaemons.split("\n")
+ # Exclude empty string at end of list
+ for d in dmns[:-1]:
+ if re.search(r"%s" % daemon, d):
+ daemonpidfile = d.rstrip()
+ daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
+ if daemonpid.isdigit() and self.pid_exists(int(daemonpid)):
+ logger.debug(
+ "{}: killing {}".format(
+ self.name,
+ os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
+ )
+ )
+ self.cmd_status("kill -KILL {}".format(daemonpid))
+ if self.pid_exists(int(daemonpid)):
+ numRunning += 1
+ while wait and numRunning > 0:
+ sleep(
+ 2,
+ "{}: waiting for {} daemon to be stopped".format(
+ self.name, daemon
+ ),
+ )
+
+ # 2nd round of kill if daemons didn't exit
+ for d in dmns[:-1]:
+ if re.search(r"%s" % daemon, d):
+ daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
+ if daemonpid.isdigit() and self.pid_exists(
+ int(daemonpid)
+ ):
+ logger.info(
+ "{}: killing {}".format(
+ self.name,
+ os.path.basename(
+ d.rstrip().rsplit(".", 1)[0]
+ ),
+ )
+ )
+ self.cmd_status(
+ "kill -KILL {}".format(daemonpid)
+ )
+ if daemonpid.isdigit() and not self.pid_exists(
+ int(daemonpid)
+ ):
+ numRunning -= 1
+ self.cmd("rm -- {}".format(daemonpidfile))
+ if wait:
+ errors = self.checkRouterCores(reportOnce=True)
+ if self.checkRouterVersion("<", minErrorVersion):
+ # ignore errors in old versions
+ errors = ""
+ if assertOnError and len(errors) > 0:
+ assert "Errors found - details follow:" == 0, errors
+ else:
+ daemonsNotRunning.append(daemon)
+ if len(daemonsNotRunning) > 0:
+ errors = errors + "Daemons are not running", daemonsNotRunning
+
+ return errors
+
+ def checkRouterCores(self, reportLeaks=True, reportOnce=False):
+ if reportOnce and not self.reportCores:
+ return
+ reportMade = False
+ traces = ""
+ for daemon in self.daemons:
+ if self.daemons[daemon] == 1:
+ # Look for core file
+ corefiles = glob.glob(
+ "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
+ )
+ if len(corefiles) > 0:
+ backtrace = gdb_core(self, daemon, corefiles)
+ traces = (
+ traces
+ + f"\nCORE FOUND: {self.name}: {daemon} crashed. Backtrace follows:\n{backtrace}"
+ )
+ reportMade = True
+ elif reportLeaks:
+ log = self.getStdErr(daemon)
+ if "memstats" in log:
+ sys.stderr.write(
+ "%s: %s has memory leaks:\n" % (self.name, daemon)
+ )
+ traces = traces + "\n%s: %s has memory leaks:\n" % (
+ self.name,
+ daemon,
+ )
+ log = re.sub("core_handler: ", "", log)
+ log = re.sub(
+ r"(showing active allocations in memory group [a-zA-Z0-9]+)",
+ r"\n ## \1",
+ log,
+ )
+ log = re.sub("memstats: ", " ", log)
+ sys.stderr.write(log)
+ reportMade = True
+ # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
+ if checkAddressSanitizerError(
+ self.getStdErr(daemon), self.name, daemon, self.logdir
+ ):
+ sys.stderr.write(
+ "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
+ )
+ traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
+ self.name,
+ daemon,
+ )
+ reportMade = True
+ if reportMade:
+ self.reportCores = False
+ return traces
+
+ def checkRouterRunning(self):
+ "Check if router daemons are running and collect crashinfo they don't run"
+
+ global fatal_error
+
+ daemonsRunning = self.cmd(
+ 'vtysh -c "show logging" | grep "Logging configuration for"'
+ )
+ # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
+ if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
+ return "%s: vtysh killed by AddressSanitizer" % (self.name)
+
+ for daemon in self.daemons:
+ if daemon == "snmpd":
+ continue
+ if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
+ sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
+ if daemon == "staticd":
+ sys.stderr.write(
+ "You may have a copy of staticd installed but are attempting to test against\n"
+ )
+ sys.stderr.write(
+ "a version of FRR that does not have staticd, please cleanup the install dir\n"
+ )
+
+ # Look for core file
+ corefiles = glob.glob(
+ "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
+ )
+ if len(corefiles) > 0:
+ gdb_core(self, daemon, corefiles)
+ else:
+ # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
+ if os.path.isfile(
+ "{}/{}/{}.log".format(self.logdir, self.name, daemon)
+ ):
+ log_tail = subprocess.check_output(
+ [
+ "tail -n20 {}/{}/{}.log 2> /dev/null".format(
+ self.logdir, self.name, daemon
+ )
+ ],
+ shell=True,
+ )
+ sys.stderr.write(
+ "\nFrom %s %s %s log file:\n"
+ % (self.routertype, self.name, daemon)
+ )
+ sys.stderr.write("%s\n" % log_tail)
+
+ # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
+ if checkAddressSanitizerError(
+ self.getStdErr(daemon), self.name, daemon, self.logdir
+ ):
+ return "%s: Daemon %s not running - killed by AddressSanitizer" % (
+ self.name,
+ daemon,
+ )
+
+ return "%s: Daemon %s not running" % (self.name, daemon)
+ return ""
+
+ def checkRouterVersion(self, cmpop, version):
+ """
+ Compares router version using operation `cmpop` with `version`.
+ Valid `cmpop` values:
+ * `>=`: has the same version or greater
+ * '>': has greater version
+ * '=': has the same version
+ * '<': has a lesser version
+ * '<=': has the same version or lesser
+
+ Usage example: router.checkRouterVersion('>', '1.0')
+ """
+
+ # Make sure we have version information first
+ if self.version == None:
+ self.version = self.cmd(
+ os.path.join(self.daemondir, "bgpd") + " -v"
+ ).split()[2]
+ logger.info("{}: running version: {}".format(self.name, self.version))
+
+ rversion = self.version
+ if rversion == None:
+ return False
+
+ result = version_cmp(rversion, version)
+ if cmpop == ">=":
+ return result >= 0
+ if cmpop == ">":
+ return result > 0
+ if cmpop == "=":
+ return result == 0
+ if cmpop == "<":
+ return result < 0
+ if cmpop == "<":
+ return result < 0
+ if cmpop == "<=":
+ return result <= 0
+
+ def get_ipv6_linklocal(self):
+ "Get LinkLocal Addresses from interfaces"
+
+ linklocal = []
+
+ ifaces = self.cmd("ip -6 address")
+ # Fix newlines (make them all the same)
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
+ interface = ""
+ ll_per_if_count = 0
+ for line in ifaces:
+ m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
+ if m:
+ interface = m.group(1)
+ ll_per_if_count = 0
+ m = re.search(
+ "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
+ line,
+ )
+ if m:
+ local = m.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+ return linklocal
+
+ def daemon_available(self, daemon):
+ "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
+
+ daemon_path = os.path.join(self.daemondir, daemon)
+ if not os.path.isfile(daemon_path):
+ return False
+ if daemon == "ldpd":
+ if version_cmp(platform.release(), "4.5") < 0:
+ return False
+ if not module_present("mpls-router", load=False):
+ return False
+ if not module_present("mpls-iptunnel", load=False):
+ return False
+ return True
+
+ def get_routertype(self):
+ "Return the type of Router (frr)"
+
+ return self.routertype
+
+ def report_memory_leaks(self, filename_prefix, testscript):
+ "Report Memory Leaks to file prefixed with given string"
+
+ leakfound = False
+ filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
+ for daemon in self.daemons:
+ if self.daemons[daemon] == 1:
+ log = self.getStdErr(daemon)
+ if "memstats" in log:
+ # Found memory leak
+ logger.warning(
+ "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
+ )
+ if not leakfound:
+ leakfound = True
+ # Check if file already exists
+ fileexists = os.path.isfile(filename)
+ leakfile = open(filename, "a")
+ if not fileexists:
+ # New file - add header
+ leakfile.write(
+ "# Memory Leak Detection for topotest %s\n\n"
+ % testscript
+ )
+ leakfile.write("## Router %s\n" % self.name)
+ leakfile.write("### Process %s\n" % daemon)
+ log = re.sub("core_handler: ", "", log)
+ log = re.sub(
+ r"(showing active allocations in memory group [a-zA-Z0-9]+)",
+ r"\n#### \1\n",
+ log,
+ )
+ log = re.sub("memstats: ", " ", log)
+ leakfile.write(log)
+ leakfile.write("\n")
+ if leakfound:
+ leakfile.close()
+
+
+def frr_unicode(s):
+ """Convert string to unicode, depending on python version"""
+ if sys.version_info[0] > 2:
+ return s
+ else:
+ return unicode(s) # pylint: disable=E0602
+
+
+def is_mapping(o):
+ return isinstance(o, Mapping)