summaryrefslogtreecommitdiffstats
path: root/pimd
diff options
context:
space:
mode:
Diffstat (limited to 'pimd')
-rw-r--r--pimd/.gitignore4
-rw-r--r--pimd/AUTHORS7
-rw-r--r--pimd/CAVEATS182
-rw-r--r--pimd/COMMANDS83
-rw-r--r--pimd/DEBUG84
-rw-r--r--pimd/LINUX_KERNEL_MROUTE_MFC24
-rw-r--r--pimd/Makefile10
-rw-r--r--pimd/README97
-rw-r--r--pimd/TODO70
-rw-r--r--pimd/TROUBLESHOOTING33
-rw-r--r--pimd/mtracebis.c577
-rw-r--r--pimd/mtracebis_netlink.c728
-rw-r--r--pimd/mtracebis_netlink.h132
-rw-r--r--pimd/mtracebis_routeget.c89
-rw-r--r--pimd/mtracebis_routeget.h18
-rw-r--r--pimd/pim6_cmd.c1900
-rw-r--r--pimd/pim6_cmd.h52
-rw-r--r--pimd/pim6_main.c199
-rw-r--r--pimd/pim6_mld.c3232
-rw-r--r--pimd/pim6_mld.h374
-rw-r--r--pimd/pim6_mld_protocol.h112
-rw-r--r--pimd/pim_addr.c49
-rw-r--r--pimd/pim_addr.h158
-rw-r--r--pimd/pim_assert.c736
-rw-r--r--pimd/pim_assert.h67
-rw-r--r--pimd/pim_bfd.c121
-rw-r--r--pimd/pim_bfd.h45
-rw-r--r--pimd/pim_bsm.c1453
-rw-r--r--pimd/pim_bsm.h213
-rw-r--r--pimd/pim_cmd.c6734
-rw-r--r--pimd/pim_cmd.h64
-rw-r--r--pimd/pim_cmd_common.c5695
-rw-r--r--pimd/pim_cmd_common.h193
-rw-r--r--pimd/pim_errors.c36
-rw-r--r--pimd/pim_errors.h20
-rw-r--r--pimd/pim_hello.c496
-rw-r--r--pimd/pim_hello.h25
-rw-r--r--pimd/pim_iface.c1834
-rw-r--r--pimd/pim_iface.h249
-rw-r--r--pimd/pim_ifchannel.c1494
-rw-r--r--pimd/pim_ifchannel.h148
-rw-r--r--pimd/pim_igmp.c1537
-rw-r--r--pimd/pim_igmp.h233
-rw-r--r--pimd/pim_igmp_join.h91
-rw-r--r--pimd/pim_igmp_mtrace.c848
-rw-r--r--pimd/pim_igmp_mtrace.h92
-rw-r--r--pimd/pim_igmp_stats.c43
-rw-r--r--pimd/pim_igmp_stats.h46
-rw-r--r--pimd/pim_igmpv2.c252
-rw-r--r--pimd/pim_igmpv2.h22
-rw-r--r--pimd/pim_igmpv3.c2028
-rw-r--r--pimd/pim_igmpv3.h101
-rw-r--r--pimd/pim_instance.c248
-rw-r--r--pimd/pim_instance.h200
-rw-r--r--pimd/pim_int.c31
-rw-r--r--pimd/pim_int.h15
-rw-r--r--pimd/pim_join.c604
-rw-r--r--pimd/pim_join.h21
-rw-r--r--pimd/pim_jp_agg.c352
-rw-r--r--pimd/pim_jp_agg.h40
-rw-r--r--pimd/pim_macro.c416
-rw-r--r--pimd/pim_macro.h29
-rw-r--r--pimd/pim_main.c166
-rw-r--r--pimd/pim_memory.c39
-rw-r--r--pimd/pim_memory.h40
-rw-r--r--pimd/pim_mlag.c1089
-rw-r--r--pimd/pim_mlag.h88
-rw-r--r--pimd/pim_mroute.c1366
-rw-r--r--pimd/pim_mroute.h168
-rw-r--r--pimd/pim_msdp.c1448
-rw-r--r--pimd/pim_msdp.h355
-rw-r--r--pimd/pim_msdp_packet.c780
-rw-r--r--pimd/pim_msdp_packet.h61
-rw-r--r--pimd/pim_msdp_socket.c278
-rw-r--r--pimd/pim_msdp_socket.h11
-rw-r--r--pimd/pim_msg.c331
-rw-r--r--pimd/pim_msg.h230
-rw-r--r--pimd/pim_nb.c428
-rw-r--r--pimd/pim_nb.h223
-rw-r--r--pimd/pim_nb_config.c2912
-rw-r--r--pimd/pim_neighbor.c788
-rw-r--r--pimd/pim_neighbor.h65
-rw-r--r--pimd/pim_nht.c1116
-rw-r--r--pimd/pim_nht.h73
-rw-r--r--pimd/pim_oil.c578
-rw-r--r--pimd/pim_oil.h205
-rw-r--r--pimd/pim_pim.c997
-rw-r--r--pimd/pim_pim.h51
-rw-r--r--pimd/pim_register.c756
-rw-r--r--pimd/pim_register.h36
-rw-r--r--pimd/pim_routemap.c43
-rw-r--r--pimd/pim_rp.c1328
-rw-r--r--pimd/pim_rp.h77
-rw-r--r--pimd/pim_rpf.c395
-rw-r--r--pimd/pim_rpf.h60
-rw-r--r--pimd/pim_signals.c64
-rw-r--r--pimd/pim_signals.h13
-rw-r--r--pimd/pim_sock.c420
-rw-r--r--pimd/pim_sock.h43
-rw-r--r--pimd/pim_ssm.c135
-rw-r--r--pimd/pim_ssm.h31
-rw-r--r--pimd/pim_ssmpingd.c381
-rw-r--r--pimd/pim_ssmpingd.h31
-rw-r--r--pimd/pim_static.c279
-rw-r--r--pimd/pim_static.h34
-rw-r--r--pimd/pim_str.h41
-rw-r--r--pimd/pim_tib.c168
-rw-r--r--pimd/pim_tib.h20
-rw-r--r--pimd/pim_time.c164
-rw-r--r--pimd/pim_time.h25
-rw-r--r--pimd/pim_tlv.c769
-rw-r--r--pimd/pim_tlv.h109
-rw-r--r--pimd/pim_upstream.c2174
-rw-r--r--pimd/pim_upstream.h386
-rw-r--r--pimd/pim_util.c169
-rw-r--r--pimd/pim_util.h28
-rw-r--r--pimd/pim_vty.c503
-rw-r--r--pimd/pim_vty.h19
-rw-r--r--pimd/pim_vxlan.c1218
-rw-r--r--pimd/pim_vxlan.h143
-rw-r--r--pimd/pim_vxlan_instance.h37
-rw-r--r--pimd/pim_zebra.c525
-rw-r--r--pimd/pim_zebra.h33
-rw-r--r--pimd/pim_zlookup.c574
-rw-r--r--pimd/pim_zlookup.h37
-rw-r--r--pimd/pim_zpthread.c216
-rw-r--r--pimd/pimd.c161
-rw-r--r--pimd/pimd.h278
-rw-r--r--pimd/subdir.am180
-rw-r--r--pimd/test_igmpv3_join.c132
130 files changed, 61207 insertions, 0 deletions
diff --git a/pimd/.gitignore b/pimd/.gitignore
new file mode 100644
index 0000000..3f73471
--- /dev/null
+++ b/pimd/.gitignore
@@ -0,0 +1,4 @@
+/pimd
+/pim6d
+/mtracebis
+/test_igmpv3_join
diff --git a/pimd/AUTHORS b/pimd/AUTHORS
new file mode 100644
index 0000000..08869ff
--- /dev/null
+++ b/pimd/AUTHORS
@@ -0,0 +1,7 @@
+# Everton da Silva Marques <everton.marques@gmail.com>
+$ more ~/.gitconfig
+[user]
+ name = Everton Marques
+ email = everton.marques@gmail.com
+
+-x-
diff --git a/pimd/CAVEATS b/pimd/CAVEATS
new file mode 100644
index 0000000..120708b
--- /dev/null
+++ b/pimd/CAVEATS
@@ -0,0 +1,182 @@
+C1 IGMPv3 backward compatibility with IGMPv1 and IGMPv2 is not
+ implemented. See RFC 3376, 7.3. Multicast Router Behavior. That's
+ because only Source-Specific Multicast is currently targeted.
+
+C2 IGMPv3 support for forwarding any-source groups is not
+ implemented. Traffic for groups in mode EXCLUDE {empty} won't be
+ forwarded. See RFC 3376, 6.3. Source-Specific Forwarding
+ Rules. That's because only Source-Specific Multicast is currently
+ targeted.
+
+C3 Load Splitting of IP Multicast Traffic over ECMP is not supported.
+ See also: RFC 2991
+ Multipath Issues in Unicast and Multicast Next-Hop Selection
+ http://www.rfc-editor.org/rfc/rfc2991.txt
+
+C4 IPSec AH authentication is not supported (RFC 4601:
+ 6.3. Authentication Using IPsec).
+
+C5 PIM support is limited to SSM mode as defined in section 4.8.2
+ (PIM-SSM-Only Routers) of RFC4601. That's because only
+ Source-Specific Multicast is currently targeted.
+
+C6 PIM implementation currently does not support IPv6. PIM-SSM
+ requires IGMPv3 for IPv4 and MLDv2 for IPv6. MLDv2 is currently
+ missing. See also CAVEAT C9.
+
+C7 FIXED (S,G) Assert state machine (RFC 4601, section 4.6.1) is not
+ implemented. See also TODO T6. See also CAVEAT C10.
+
+C8 It is not possible to disable join suppression in order to
+ explicitly track the join membership of individual downstream
+ routers.
+ - IGMPv3 Explicit Membership Tracking is not supported.
+ When explicit tracking is enabled on a router, the router can
+ individually track the Internet Group Management Protocol (IGMP)
+ membership state of all reporting hosts. This feature allows the
+ router to achieve minimal leave latencies when hosts leave a
+ multicast group or channel. Example:
+ conf t
+ interface eth0
+ ip igmp explicit-tracking
+
+C9 Only IPv4 Address Family (number=1) is supported in the PIM Address
+ Family field.
+ See also RFC 4601: 5.1. PIM Address Family
+ See also CAVEAT C6.
+ See also http://www.iana.org/assignments/address-family-numbers
+
+C10 FIXED Assert metric depends on metric_preference and
+ route_metric. Those parameters should be fetched from RIB
+ (zebra). See also pim_rpf.c, pim_rpf_update().
+
+C11 SSM Mapping is not supported
+
+ SSM Mapping Overview:
+
+ SSM mapping introduces a means for the last hop router to discover
+ sources sending to groups. When SSM mapping is configured, if a
+ router receives an IGMPv1 or IGMPv2 membership report for a
+ particular group G, the router translates this report into one or
+ more (S, G) channel memberships for the well-known sources
+ associated with this group.
+
+ When the router receives an IGMPv1 or IGMPv2 membership report for
+ a group G, the router uses SSM mapping to determine one or more
+ source IP addresses for the group G. SSM mapping then translates
+ the membership report as an IGMPv3 report INCLUDE (G, [S1, G],
+ [S2, G]...[Sn, G] and continues as if it had received an IGMPv3
+ report. The router then sends out PIM joins toward (S1, G) to (Sn,
+ G) and continues to be joined to these groups as long as it
+ continues to receive the IGMPv1 or IGMPv2 membership reports and
+ as long as the SSM mapping for the group remains the same. SSM
+ mapping, thus, enables you to leverage SSM for video delivery to
+ legacy STBs that do not support IGMPv3 or for applications that do
+ not take advantage of the IGMPv3 host stack.
+
+ SSM mapping enables the last hop router to determine the source
+ addresses either by a statically configured table on the router or
+ by consulting a DNS server. When the statically configured table
+ is changed, or when the DNS mapping changes, the router will leave
+ the current sources associated with the joined groups.
+
+C12 FIXED MRIB for incongruent unicast/multicast topologies is not
+ supported. RPF mechanism currently just looks up the information
+ in the unicast routing table.
+
+ See also:
+ RFC5110: 2.2.3. Issue: Overlapping Unicast/Multicast Topology
+
+ Sometimes, multicast RPF mechanisms first look up the multicast
+ routing table, or M-RIB ("topology database") with a longest
+ prefix match algorithm, and if they find any entry (including a
+ default route), that is used; if no match is found, the unicast
+ routing table is used instead.
+
+C13 Can't detect change of primary address before the actual change.
+ Possible approach is to craft old interface address into ip source
+ address by using raw ip socket.
+
+ See also:
+
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Before an interface goes down or changes primary IP address, a
+ Hello message with a zero HoldTime should be sent immediately
+ (with the old IP address if the IP address changed).
+
+ See also pim_sock_delete().
+
+C14 FIXED Detection of interface primary address changes may fail when
+ there are multiple addresses.
+ See also TODO T32.
+
+C15 Changes in interface secondary address list are not immediately
+ detected.
+ See also detect_secondary_address_change
+ See also TODO T31.
+
+C16 AMT Draft (mboned-auto-multicast) is not supported.
+ AMT = Automatic IP Multicast Without Explicit Tunnels
+
+ See also:
+
+ Draft
+ http://tools.ietf.org/html/draft-ietf-mboned-auto-multicast
+ http://tools.ietf.org/html/draft-ietf-mboned-auto-multicast-09
+
+ AMT gateway implementation for Linux
+ http://cs.utdallas.edu/amt/
+
+ AMT for Streaming (IPTV) on Global IP Multicast by Greg Shepherd (Cisco)
+ http://nznog.miniconf.org/nznog-2008-sysadmin-miniconf-greg-shepherd-iptv.pdf
+
+C17 SNMP / RFC 5060 (PIM MIB) is not supported.
+
+C18 MFC never recovers from removal of static route to source
+
+ # route add -host 1.2.3.4 gw 192.168.56.10
+ Before removal:
+ quagga-pimd-router# sh ip mroute
+ Source Group Proto Input iVifI Output oVifI TTL Uptime
+ 1.2.3.4 232.1.2.3 I eth1 3 eth0 2 1 00:00:36
+
+ # route del -host 1.2.3.4 gw 192.168.56.10
+ After removal: sh ip mroute --> empty output
+
+ # route add -host 1.2.3.4 gw 192.168.56.10
+ After the route is restored: sh ip mroute --> never recovers (empty output)
+
+ At this point, "no ip pim ssm" on the upstream interface (eth0) crashes pimd:
+
+ 2014/02/14 16:30:14 PIM: ifmembership_set: (S,G)=(1.2.3.4,232.1.2.3) membership now is NOINFO on interface eth0
+ 2014/02/14 16:30:14 PIM: pim_ifchannel_update_assert_tracking_desired: AssertTrackingDesired(1.2.3.4,232.1.2.3,eth0) changed from 1 to 0
+ 2014/02/14 16:30:14 PIM: pim_zebra.c del_oif: nonexistent protocol mask 2 removed OIF eth0 (vif_index=2, min_ttl=0) from channel (S,G)=(1.2.3.4,232.1.2.3)
+ 2014/02/14 16:30:14 PIM: pim_ifchannel_update_could_assert: CouldAssert(1.2.3.4,232.1.2.3,eth0) changed from 1 to 0
+ 2014/02/14 16:30:14 PIM: pim_ifchannel_update_my_assert_metric: my_assert_metric(1.2.3.4,232.1.2.3,eth0) changed from 0,0,0,10.0.2.15 to 1,4294967295,4294967295,0.0.0.0
+ 2014/02/14 16:30:14 PIM: pim_zebra.c del_oif: nonexistent protocol mask 1 removed OIF eth0 (vif_index=2, min_ttl=0) from channel (S,G)=(1.2.3.4,232.1.2.3)
+ 2014/02/14 16:30:14 PIM: Assertion `!IGMP_SOURCE_TEST_FORWARDING(source->source_flags)' failed in file pim_igmpv3.c, line 412, function igmp_source_delete
+
+C19 Provision to prevent group mode clash
+
+ Beware group mode clash. A host/application issuing IGMPv2
+ any-source joins for a group will disrupt SSM multicast for that
+ group.
+
+ For instance, support for source-specific static igmp WILL FAIL if
+ there is host/application issuing IGMPv2 any-source joins for the
+ same group.
+
+ The reason is the IGMPv2 any-source join forces qpimd to switch
+ the group mode to ASM (any-source multicast); however, qpimd is
+ unable to program ASM groups into the kernel; multicast won't
+ flow. There could be some provision to prevent such a behavior,
+ but currently there is none.
+
+C20 Multicast traceroute module is based on:
+ draft-ietf-idmr-traceroute-ipm-07
+ It only implements, so far, weak traceroutes. The multicast routing
+ state of the router is not quieried but RPF path is followed along
+ PIM and IGMP enabled interfaces.
+
+-x-
diff --git a/pimd/COMMANDS b/pimd/COMMANDS
new file mode 100644
index 0000000..141ec62
--- /dev/null
+++ b/pimd/COMMANDS
@@ -0,0 +1,83 @@
+global configuration commands:
+ pimd:
+ ip multicast-routing Enable IP multicast forwarding
+ ip ssmpingd Enable ssmpingd operation
+
+ zebra:
+ ip mroute Configure static unicast route into MRIB for multicast RPF lookup
+
+interface configuration commands:
+ pimd:
+ ip igmp Enable IGMP operation
+ ip igmp join IGMP join multicast group
+ ip igmp query-interval <1-1800> IGMP host query interval
+ ip igmp query-max-response-time <1-25> IGMP max query response (seconds)
+ ip igmp query-max-response-time-dsec <10-250> IGMP max query response (deciseconds)
+ ip pim ssm Enable PIM SSM operation
+
+verification commands:
+ pimd:
+ show ip igmp interface IGMP interface information
+ show ip igmp join IGMP static join information
+ show ip igmp parameters IGMP parameters information
+ show ip igmp groups IGMP groups information
+ show ip igmp groups retransmissions IGMP group retransmission
+ show ip igmp sources IGMP sources information
+ show ip igmp sources retransmissions IGMP source retransmission
+ show ip igmp statistics IGMP statistics information
+ show ip pim address PIM interface address
+ show ip pim assert PIM interface assert
+ show ip pim assert-internal PIM interface internal assert state
+ show ip pim assert-metric PIM interface assert metric
+ show ip pim assert-winner-metric PIM interface assert winner metric
+ show ip pim designated-router PIM interface designated router
+ show ip pim hello PIM interface hello information
+ show ip pim interface PIM interface information
+ show ip pim lan-prune-delay PIM neighbors LAN prune delay parameters
+ show ip pim local-membership PIM interface local-membership
+ show ip pim jp-override-interval PIM interface J/P override interval
+ show ip pim join PIM interface join information
+ show ip pim neighbor PIM neighbor information
+ show ip pim rpf PIM cached source rpf information
+ show ip pim secondary PIM neighbor addresses
+ show ip pim upstream PIM upstream information
+ show ip pim upstream-join-desired PIM upstream join-desired
+ show ip pim upstream-rpf PIM upstream source rpf
+ show ip multicast Multicast global information
+ show ip mroute IP multicast routing table
+ show ip mroute count Route and packet count data
+ show ip rib IP unicast routing table
+ show ip ssmpingd ssmpingd operation
+
+ zebra:
+ show ip rpf Display RPF information for multicast source
+
+debug commands:
+ pimd:
+ clear ip interfaces Reset interfaces
+ clear ip igmp interfaces Reset IGMP interfaces
+ clear ip mroute Reset multicast routes
+ clear ip pim interfaces Reset PIM interfaces
+ clear ip pim oil Rescan PIM OIL (output interface list)
+ debug igmp IGMP protocol activity
+ debug mtrace Mtrace protocol activity
+ debug mroute PIM interaction with kernel MFC cache
+ debug pim PIM protocol activity
+ debug pim zebra ZEBRA protocol activity
+ debug ssmpingd ssmpingd activity
+ show debugging State of each debugging option
+ test igmp receive report Test reception of IGMPv3 report
+ test pim receive assert Test reception of PIM assert
+ test pim receive dump Test reception of PIM packet dump
+ test pim receive hello Test reception of PIM hello
+ test pim receive join Test reception of PIM join
+ test pim receive prune Test reception of PIM prune
+ test pim receive upcall Test reception of kernel upcall
+
+statistics commands:
+ pimd:
+ show memory pim PIM memory statistics
+
+vtysh:
+ mtrace Multicast traceroute
+-x-
diff --git a/pimd/DEBUG b/pimd/DEBUG
new file mode 100644
index 0000000..a6ad260
--- /dev/null
+++ b/pimd/DEBUG
@@ -0,0 +1,84 @@
+DEBUG HINTS
+
+ - Check the source is issuing multicast packets with TTL high enough
+ to reach the recipients.
+
+ - Check the multicast packets are not being dropped due to
+ fragmentation problems.
+
+ - Three easy options to test IGMPv3 joins from the receiver host:
+
+ 1) Configure pimd on the receiver host with "ip igmp join":
+
+ interface eth0
+ ip pim ssm
+ ip igmp join 239.1.1.1 1.1.1.1
+
+ 2) Use test_igmpv3_join command-line utility (provided with qpimd):
+
+ test_igmpv3_join eth0 239.1.1.1 1.1.1.1
+
+ 3) User the Stig Venaas' ssmping utility:
+
+ ssmping -I eth0 1.1.1.1
+
+ To see multicast responses with ssmping, you will need run on
+ the host 1.1.1.1 either:
+ a) Stig Venaas' ssmpingd command-line daemon
+ OR
+ b) qpimd built-in ssmpingd service:
+ conf t
+ ip ssmpingd 1.1.1.1
+
+ - Using nepim to generate multicast stream from 1.1.1.1 to 239.1.1.1:
+
+ Notices:
+
+ a) The host unicast address 1.1.1.1 must be reachable from the
+ receiver.
+
+ b) nepim tool requires the receiver must be started *before* the
+ sender.
+
+ First: Start a receiver for that stream by running:
+
+ nepim -q -6 -j 1.1.1.1+239.1.1.1@eth0
+ (Remember of enabling both "ip pim ssm" and "ip igmp" under eth0.)
+
+ Second: Start the sender at host 1.1.1.1.
+
+ The following command generates a 100-kbps multicast stream for
+ channel 1.1.1.1,239.1.1.1 with TTL 10 and 1000-byte payload per UDP
+ packet (to avoid fragmentation):
+
+ nepim -6 -M -b 1.1.1.1 -c 239.1.1.1 -T 10 -W 1000 -r 100k -a 1d
+
+
+
+SAMPLE DEBUG COMMANDS
+
+ conf t
+ int eth0
+ ip pim ssm
+
+ test pim receive hello eth0 192.168.0.2 600 10 111 1000 3000 0
+ test pim receive join eth0 600 192.168.0.1 192.168.0.2 239.1.1.1 1.1.1.1
+
+ show ip pim join
+
+
+INTEROPERABILITY WITH CISCO
+
+ ! Cisco IP Multicast command reference:
+ ! ftp://ftpeng.cisco.com/ipmulticast/Multicast-Commands
+ !
+ ip pim ssm default ! enable SSM mode for groups 232.0.0.0/8
+ ip multicast-routing
+ ip pim state-refresh disable
+ no ip pim dm-fallback
+ !
+ interface FastEthernet0
+ ip pim sparse-mode
+ ip igmp version 3
+
+-x-
diff --git a/pimd/LINUX_KERNEL_MROUTE_MFC b/pimd/LINUX_KERNEL_MROUTE_MFC
new file mode 100644
index 0000000..3e48246
--- /dev/null
+++ b/pimd/LINUX_KERNEL_MROUTE_MFC
@@ -0,0 +1,24 @@
+#
+# The Linux Kernel MFC (Multicast Forwarding Cache)
+#
+
+# Check Linux kernel multicast interfaces:
+cat /proc/net/dev_mcast
+
+# Check that interface eth0 is forwarding multicast:
+cat /proc/sys/net/ipv4/conf/eth0/mc_forwarding
+
+# Check Linux kernel multicast VIFs:
+cat /proc/net/ip_mr_vif
+Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote
+
+# Check Linux kernel MFC:
+# Oifs format = vifi:TTL
+cat /proc/net/ip_mr_cache
+Group Origin Iif Pkts Bytes Wrong Oifs
+
+# iproute2 can display the MFC:
+ip mroute show
+(2.2.2.2, 239.2.2.2) Iif: eth1 Oifs: eth0
+
+# -- end-of-file --
diff --git a/pimd/Makefile b/pimd/Makefile
new file mode 100644
index 0000000..87a5388
--- /dev/null
+++ b/pimd/Makefile
@@ -0,0 +1,10 @@
+all: ALWAYS
+ @$(MAKE) -s -C .. pimd/pimd
+%: ALWAYS
+ @$(MAKE) -s -C .. pimd/$@
+
+Makefile:
+ #nothing
+ALWAYS:
+.PHONY: ALWAYS makefiles
+.SUFFIXES:
diff --git a/pimd/README b/pimd/README
new file mode 100644
index 0000000..1db0aad
--- /dev/null
+++ b/pimd/README
@@ -0,0 +1,97 @@
+INTRODUCTION
+
+ qpimd aims to implement a PIM (Protocol Independent Multicast)
+ daemon for the FRR Routing Suite.
+
+ qpimd implements PIM-SM (Sparse Mode) of RFC 4601.
+ Additionally MSDP has been implemented.
+
+ In order to deliver end-to-end multicast routing control
+ plane, qpimd includes the router-side of IGMPv[2|3] (RFC 3376).
+
+LICENSE
+
+ qpimd - pimd for FRR
+ Copyright (C) 2008 Everton da Silva Marques
+
+ qpimd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2,
+ or (at your option) any later version.
+
+ qpimd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with qpimd; see the file COPYING. If not, write
+ to the Free Software Foundation, Inc., 59 Temple Place - Suite
+ 330, Boston, MA 02111-1307, USA.
+
+HOME SITE
+
+ qpimd lives at:
+
+ https://github.com/frrouting/frr
+
+PLATFORMS
+
+ qpimd has been tested with Debian Jessie.
+
+REQUIREMENTS
+
+ qpimd requires FRR (2.0 or higher)
+
+
+CONFIGURATION COMMANDS
+
+ See available commands in the file pimd/COMMANDS.
+
+KNOWN CAVEATS
+
+ See list of known caveats in the file pimd/CAVEATS.
+
+SUPPORT
+
+ Please post comments, questions, patches, bug reports at the
+ support site:
+
+ https://frrouting.org/frr
+
+RELATED WORK
+
+ igmprt: An IGMPv3-router implementation
+ - http://www.loria.fr/~lahmadi/igmpv3-router.html
+
+ USC pimd: PIMv2-SM daemon
+ - http://netweb.usc.edu/pim/pimd (URL broken in 2008-12-23)
+ - http://packages.debian.org/source/sid/pimd (from Debian)
+
+ troglobit pimd: This is the original USC pimd from
+ http://netweb.usc.edu/pim/. In January 16, 2010 it was revived
+ with the intention to collect patches floating around in
+ Debian, Gentoo, Lintrack and other distribution repositories
+ and to provide a central point of collaboration.
+ - http://github.com/troglobit/pimd
+
+ zpimd: zpimd is not dependent of zebra or any other routing daemon
+ - ftp://robur.slu.se/pub/Routing/Zebra
+ - http://sunsite2.icm.edu.pl/pub/unix/routing/zpimd
+
+ mrd6: an IPv6 Multicast Router for Linux systems
+ - http://fivebits.net/proj/mrd6/
+
+ MBGP: Implementation of RFC 2858 for Quagga
+ - git://git.coplanar.net/~balajig/quagga
+ - http://www.gossamer-threads.com/lists/quagga/dev/18000
+
+REFERENCES
+
+ IANA Protocol Independent Multicast (PIM) Parameters
+ http://www.iana.org/assignments/pim-parameters/pim-parameters.txt
+
+ Address Family Numbers
+ http://www.iana.org/assignments/address-family-numbers
+
+ -- END --
diff --git a/pimd/TODO b/pimd/TODO
new file mode 100644
index 0000000..4a14775
--- /dev/null
+++ b/pimd/TODO
@@ -0,0 +1,70 @@
+T1 Consider reliable pim solution (refresh reduction)
+ A Reliable Transport Mechanism for PIM
+ http://tools.ietf.org/wg/pim/draft-ietf-pim-port/
+ PORT=PIM-Over-Reliable-Transport
+
+T2 If an interface changes one of its secondary IP addresses, a Hello
+ message with an updated Address_List option and a non-zero
+ HoldTime should be sent immediately.
+ See also detect_secondary_address_change
+ See also CAVEAT C15.
+ See also RFC 4601: 4.3.1. Sending Hello Messages
+
+T3 Lightweight MLDv2
+ http://tools.ietf.org/html/draft-ietf-mboned-lightweight-igmpv3-mldv2-05
+ http://www.ietf.org/internet-drafts/draft-ietf-mboned-lightweight-igmpv3-mldv2-05.txt
+ http://www.ietf.org/html.charters/mboned-charter.html
+
+T4 Static igmp join fails when loading config at boot time
+
+ ! Wrong behavior seen at boot time:
+ !
+ 2010/02/22 08:59:00 PIM: igmp_source_forward_start: ignoring request for
+ looped MFC entry (S,G)=(3.3.3.3,239.3.3.3): igmp_sock=12 oif=eth0 vif_index=2
+
+ ! Correct behavior seen later:
+ !
+ 2010/02/22 09:03:16 PIM: igmp_source_forward_start: ignoring request for
+ looped MFC entry (S,G)=(2.2.2.2,239.2.2.2): igmp_sock=17 oif=lo vif_index=1
+
+ ! To see the wrong message at boot:
+ !
+ debug igmp trace
+ !
+ interface lo
+ ip igmp
+ ip igmp join 239.2.2.2 2.2.2.2
+ ip igmp join 239.3.3.3 3.3.3.3
+ !
+
+ ! Interfaces indexes:
+ Interface Address ifi Vif PktsIn PktsOut BytesIn BytesOut
+ eth0 200.202.112.3 2 2 0 0 0 0
+ lo 127.0.0.1 1 1 0 0 0 0
+
+T5 PIM Neighbor Reduction
+ https://datatracker.ietf.org/doc/draft-wijnands-pim-neighbor-reduction/
+
+ "In a transit LAN (no directly connected source or receiver), many
+ of the PIM procedures don't apply. (...) This proposal describes
+ a procedure to reduce the amount of neighbors established over a
+ transit LAN."
+
+T6 Single Stream Multicast Fast Reroute (SMFR) Method
+ https://datatracker.ietf.org/doc/draft-liu-pim-single-stream-multicast-frr/
+
+ "This document proposes an IP multicast fast convergence method
+ based on differentiating primary and backup PIM join."
+
+T7 RFC5384 - The Join Attribute Format
+ "This document describes a modification of the Join message that
+ allows a node to associate attributes with a particular tree."
+
+T8 PIM Multi-Topology ID (MT-ID) Join-Attribute
+ http://tools.ietf.org/html/draft-cai-pim-mtid-00
+ Depends on T7.
+
+ "This draft introduces a new type of PIM Join Attribute used to
+ encode the identity of the topology PIM uses for RPF."
+
+-x-
diff --git a/pimd/TROUBLESHOOTING b/pimd/TROUBLESHOOTING
new file mode 100644
index 0000000..7d1f52d
--- /dev/null
+++ b/pimd/TROUBLESHOOTING
@@ -0,0 +1,33 @@
+TROUBLESHOOTING
+
+# Check kernel mcast cache
+# On Linux:
+ip mroute show
+
+! qpimd on last-hop router
+! . attached to mcast receiver
+! . runnning both PIM-SSM and IGMPv3
+!
+show ip mroute (kernel mcast programming is correct?)
+show ip pim upstream (we joined our upstream?)
+show ip pim neighbor (upstream is neighbor?)
+show ip pim interface (pim enabled on interfaces?)
+show ip multicast (multicast enabled at all?)
+show ip rib SRC (unicast route towards source?)
+
+show ip igmp sources (receiver joined on interface?)
+show ip igmp interface (igmp enabled on receiver interface?)
+
+! qpimd on intermmediate routers
+! . may be attached to mcast source
+! . runnning only PIM-SSM, not IGMPv3
+!
+show ip mroute (kernel mcast programming is correct?)
+show ip pim upstream (we joined our upstream?)
+show ip pim join (downstream joined us?)
+show ip pim neighbor (downstream is neighbor?)
+show ip pim interface (pim enabled on interfaces?)
+show ip multicast (multicast enabled at all?)
+show ip rib SRC (unicast route towards source?)
+
+--EOF--
diff --git a/pimd/mtracebis.c b/pimd/mtracebis.c
new file mode 100644
index 0000000..9c3f1a7
--- /dev/null
+++ b/pimd/mtracebis.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multicast Traceroute for FRRouting
+ * Copyright (C) 2018 Mladen Sablic
+ */
+
+#include <zebra.h>
+
+#ifdef __linux__
+
+#include "pim_igmp_mtrace.h"
+
+#include "checksum.h"
+#include "prefix.h"
+#include "mtracebis_routeget.h"
+#include <sys/select.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <netdb.h>
+
+#define MTRACEBIS_VERSION "0.1"
+#define MTRACE_TIMEOUT (5)
+
+#define IP_HDR_LEN (sizeof(struct ip))
+#define IP_RA_LEN (4)
+#define MTRACE_BUF_LEN (MTRACE_HDR_SIZE + (MTRACE_MAX_HOPS * MTRACE_RSP_SIZE))
+#define IP_AND_MTRACE_BUF_LEN (IP_HDR_LEN + IP_RA_LEN + MTRACE_BUF_LEN)
+
+static const char *progname;
+static void usage(void)
+{
+ fprintf(stderr, "Usage : %s <multicast source> [<multicast group>]\n",
+ progname);
+}
+static void version(void)
+{
+ fprintf(stderr, "%s %s\n", progname, MTRACEBIS_VERSION);
+}
+
+static void print_host(struct in_addr addr)
+{
+ struct hostent *h;
+ char buf[PREFIX_STRLEN];
+
+ h = gethostbyaddr(&addr, sizeof(addr), AF_INET);
+ if (h == NULL)
+ printf("?");
+ else
+ printf("%s", h->h_name);
+ printf(" (%s) ", inet_ntop(AF_INET, &addr, buf, sizeof(buf)));
+}
+
+static void print_line_no(int i)
+{
+ printf("%3d ", -i);
+}
+
+static const char *rtg_proto_str(enum mtrace_rtg_proto proto)
+{
+ static char buf[80];
+
+ buf[0] = '\0';
+
+ switch (proto) {
+ case MTRACE_RTG_PROTO_DVMRP:
+ return "DVMRP";
+ case MTRACE_RTG_PROTO_MOSPF:
+ return "MOSPF";
+ case MTRACE_RTG_PROTO_PIM:
+ return "PIM";
+ case MTRACE_RTG_PROTO_CBT:
+ return "CBT";
+ case MTRACE_RTG_PROTO_PIM_SPECIAL:
+ return "PIM special";
+ case MTRACE_RTG_PROTO_PIM_STATIC:
+ return "PIM static";
+ case MTRACE_RTG_PROTO_DVMRP_STATIC:
+ return "DVMRP static";
+ case MTRACE_RTG_PROTO_PIM_MBGP:
+ return "PIM MBGP";
+ case MTRACE_RTG_PROTO_CBT_SPECIAL:
+ return "CBT special";
+ case MTRACE_RTG_PROTO_CBT_STATIC:
+ return "CBT static";
+ case MTRACE_RTG_PROTO_PIM_ASSERT:
+ return "PIM assert";
+ default:
+ snprintf(buf, sizeof(buf), "unknown protocol (%d)", proto);
+ return buf;
+ }
+}
+
+static void print_rtg_proto(uint32_t rtg_proto)
+{
+ printf("%s", rtg_proto_str(rtg_proto));
+}
+
+static void print_fwd_ttl(uint32_t fwd_ttl)
+{
+ printf("thresh^ %d", fwd_ttl);
+}
+
+static const char *fwd_code_str(enum mtrace_fwd_code code)
+{
+ static char buf[80];
+
+ buf[0] = '\0';
+
+ switch (code) {
+ case MTRACE_FWD_CODE_NO_ERROR:
+ return "no error";
+ case MTRACE_FWD_CODE_WRONG_IF:
+ return "wrong interface";
+ case MTRACE_FWD_CODE_PRUNE_SENT:
+ return "prune sent";
+ case MTRACE_FWD_CODE_PRUNE_RCVD:
+ return "prune received";
+ case MTRACE_FWD_CODE_SCOPED:
+ return "scoped";
+ case MTRACE_FWD_CODE_NO_ROUTE:
+ return "no route";
+ case MTRACE_FWD_CODE_WRONG_LAST_HOP:
+ return "wrong last hop";
+ case MTRACE_FWD_CODE_NOT_FORWARDING:
+ return "not forwarding";
+ case MTRACE_FWD_CODE_REACHED_RP:
+ return "reached RP";
+ case MTRACE_FWD_CODE_RPF_IF:
+ return "RPF interface";
+ case MTRACE_FWD_CODE_NO_MULTICAST:
+ return "no multicast";
+ case MTRACE_FWD_CODE_INFO_HIDDEN:
+ return "info hidden";
+ case MTRACE_FWD_CODE_NO_SPACE:
+ return "no space";
+ case MTRACE_FWD_CODE_OLD_ROUTER:
+ return "old router";
+ case MTRACE_FWD_CODE_ADMIN_PROHIB:
+ return "admin. prohib.";
+ default:
+ snprintf(buf, sizeof(buf), "unknown fwd. code (%d)", code);
+ return buf;
+ }
+}
+
+static void print_fwd_code(uint32_t fwd_code)
+{
+ printf("%s", fwd_code_str(fwd_code));
+}
+
+static void print_rsp(struct igmp_mtrace_rsp *rsp)
+{
+ print_host(rsp->outgoing);
+ if (rsp->fwd_code == 0 || rsp->fwd_code == MTRACE_FWD_CODE_REACHED_RP) {
+ print_rtg_proto(rsp->rtg_proto);
+ printf(" ");
+ if (rsp->fwd_code == MTRACE_FWD_CODE_REACHED_RP)
+ printf("(RP) ");
+ if (rsp->rtg_proto == MTRACE_RTG_PROTO_PIM) {
+ switch (rsp->src_mask) {
+ case MTRACE_SRC_MASK_GROUP:
+ printf("(*,G) ");
+ break;
+ case MTRACE_SRC_MASK_SOURCE:
+ printf("(S,G) ");
+ break;
+ }
+ }
+ print_fwd_ttl(rsp->fwd_ttl);
+ } else {
+ print_fwd_code(rsp->fwd_code);
+ }
+ printf("\n");
+}
+
+static void print_dest(struct igmp_mtrace *mtrace)
+{
+ print_line_no(0);
+ print_host(mtrace->dst_addr);
+ printf("\n");
+}
+
+static void print_summary(struct igmp_mtrace *mtrace, int hops, long msec)
+{
+ int i;
+ int t = 0;
+
+ for (i = 0; i < hops; i++)
+ t += mtrace->rsp[i].fwd_ttl;
+
+ printf("Round trip time %ld ms; total ttl of %d required.\n", msec, t);
+}
+
+static void print_responses(struct igmp_mtrace *mtrace, int hops, long msec)
+{
+ int i;
+
+ print_dest(mtrace);
+
+ for (i = 0; i < hops; i++) {
+ print_line_no(i + 1);
+ print_rsp(&mtrace->rsp[i]);
+ }
+ print_summary(mtrace, hops, msec);
+}
+
+static int send_query(int fd, struct in_addr to_addr,
+ struct igmp_mtrace *mtrace)
+{
+ struct sockaddr_in to;
+ socklen_t tolen;
+ int sent;
+
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_addr = to_addr;
+ tolen = sizeof(to);
+
+ sent = sendto(fd, (char *)mtrace, sizeof(*mtrace), MSG_DONTWAIT,
+ (struct sockaddr *)&to, tolen);
+
+ if (sent < 1)
+ return -1;
+ return 0;
+}
+
+static void print_query(struct igmp_mtrace *mtrace)
+{
+ char src_str[INET_ADDRSTRLEN];
+ char dst_str[INET_ADDRSTRLEN];
+ char grp_str[INET_ADDRSTRLEN];
+
+ printf("* Mtrace from %s to %s via group %s\n",
+ inet_ntop(AF_INET, &mtrace->src_addr, src_str, sizeof(src_str)),
+ inet_ntop(AF_INET, &mtrace->dst_addr, dst_str, sizeof(dst_str)),
+ inet_ntop(AF_INET, &mtrace->grp_addr, grp_str, sizeof(grp_str)));
+}
+
+static int recv_response(int fd, int *hops, struct igmp_mtrace *mtracer)
+{
+ int recvd;
+ char mtrace_buf[IP_AND_MTRACE_BUF_LEN];
+ struct ip *ip;
+ struct igmp_mtrace *mtrace;
+ int mtrace_len;
+ int responses;
+ unsigned short sum;
+ size_t mtrace_off;
+ size_t ip_len;
+
+ recvd = recvfrom(fd, mtrace_buf, IP_AND_MTRACE_BUF_LEN, 0, NULL, 0);
+
+ if (recvd < 1) {
+ fprintf(stderr, "recvfrom error: %s\n", strerror(errno));
+ return -1;
+ }
+
+ if (recvd < (int)sizeof(struct ip)) {
+ fprintf(stderr, "no ip header\n");
+ return -1;
+ }
+
+ ip = (struct ip *)mtrace_buf;
+
+ if (ip->ip_v != 4) {
+ fprintf(stderr, "IP not version 4\n");
+ return -1;
+ }
+
+ sum = ip->ip_sum;
+ ip->ip_sum = 0;
+
+ if (sum != in_cksum(ip, ip->ip_hl * 4))
+ return -1;
+
+ /* Header overflow check */
+ mtrace_off = 4 * ip->ip_hl;
+ if (mtrace_off > MTRACE_BUF_LEN)
+ return -1;
+
+ /* Underflow/overflow check */
+ ip_len = ntohs(ip->ip_len);
+ if (ip_len < mtrace_off || ip_len < MTRACE_HDR_SIZE
+ || ip_len > MTRACE_BUF_LEN)
+ return -1;
+
+ mtrace_len = ip_len - mtrace_off;
+ mtrace = (struct igmp_mtrace *)(mtrace_buf + mtrace_off);
+
+ sum = mtrace->checksum;
+ mtrace->checksum = 0;
+ if (sum != in_cksum(mtrace, mtrace_len)) {
+ fprintf(stderr, "mtrace checksum wrong\n");
+ return -1;
+ }
+
+ if (mtrace->type != PIM_IGMP_MTRACE_RESPONSE)
+ return -1;
+
+
+ responses = mtrace_len - sizeof(struct igmp_mtrace);
+ responses /= sizeof(struct igmp_mtrace_rsp);
+
+ if (responses > MTRACE_MAX_HOPS) {
+ fprintf(stderr, "mtrace too large\n");
+ return -1;
+ }
+
+ if (hops)
+ *hops = responses;
+
+ if (mtracer)
+ memcpy(mtracer, mtrace, mtrace_len);
+
+ return 0;
+}
+
+static int wait_for_response(int fd, int *hops, struct igmp_mtrace *mtrace,
+ long *ret_msec)
+{
+ fd_set readfds;
+ struct timeval timeout;
+ int ret;
+ long msec, rmsec, tmsec;
+
+ FD_ZERO(&readfds);
+ FD_SET(fd, &readfds);
+
+ memset(&timeout, 0, sizeof(timeout));
+
+ timeout.tv_sec = MTRACE_TIMEOUT;
+
+ tmsec = timeout.tv_sec * 1000 + timeout.tv_usec / 1000;
+ do {
+ ret = select(fd + 1, &readfds, NULL, NULL, &timeout);
+ if (ret <= 0)
+ return ret;
+ rmsec = timeout.tv_sec * 1000 + timeout.tv_usec / 1000;
+ msec = tmsec - rmsec;
+ } while (recv_response(fd, hops, mtrace) != 0);
+
+ if (ret_msec)
+ *ret_msec = msec;
+
+ return ret;
+}
+
+static bool check_end(struct igmp_mtrace *mtrace, int hops)
+{
+ return mtrace->src_addr.s_addr == mtrace->rsp[hops - 1].prev_hop.s_addr;
+}
+
+int main(int argc, char *const argv[])
+{
+ struct in_addr mc_source;
+ struct in_addr mc_group;
+ struct in_addr iface_addr;
+ struct in_addr gw_addr;
+ struct in_addr mtrace_addr;
+ struct igmp_mtrace mtrace;
+ struct igmp_mtrace *mtracep;
+ int hops = 255;
+ int rhops;
+ int maxhops = 255;
+ int perhop = 3;
+ int ifindex;
+ int unicast = 1;
+ int ttl = 64;
+ int fd = -1;
+ int ret = -1;
+ int c;
+ long msec;
+ int i, j;
+ char ifname[IF_NAMESIZE];
+ char mbuf[MTRACE_BUF_LEN];
+ bool not_group;
+
+ mtrace_addr.s_addr = inet_addr("224.0.1.32");
+
+ uid_t uid = getuid();
+
+ if (uid != 0) {
+ printf("must run as root\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc <= 0)
+ progname = "mtracebis";
+ else
+ progname = argv[0];
+
+ if (argc != 2 && argc != 3) {
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ while (1) {
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"version", no_argument, 0, 'v'},
+ {0, 0, 0, 0}};
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "vh", long_options, &option_index);
+
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'h':
+ usage();
+ exit(0);
+ case 'v':
+ version();
+ exit(0);
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (inet_pton(AF_INET, argv[1], &mc_source) != 1) {
+ usage();
+ fprintf(stderr, "%s: %s is not a valid IPv4 address\n", argv[0],
+ argv[1]);
+ exit(EXIT_FAILURE);
+ }
+
+ mc_group.s_addr = INADDR_ANY;
+ not_group = false;
+
+ if (argc == 3) {
+ if (inet_pton(AF_INET, argv[2], &mc_group) != 1)
+ not_group = true;
+ if (!not_group && !IPV4_CLASS_DE(ntohl(mc_group.s_addr)))
+ not_group = true;
+ }
+
+ if (not_group) {
+ usage();
+ fprintf(stderr, "%s: %s is not a valid IPv4 group address\n",
+ argv[0], argv[2]);
+ exit(EXIT_FAILURE);
+ }
+
+ ifindex = routeget(mc_source, &iface_addr, &gw_addr);
+ if (ifindex < 0) {
+ fprintf(stderr, "%s: failed to get route to source %s\n",
+ argv[0], argv[1]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (if_indextoname(ifindex, ifname) == NULL) {
+ fprintf(stderr, "%s: if_indextoname error: %s\n", argv[0],
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* zero mtrace struct */
+ memset((char *)&mtrace, 0, sizeof(mtrace));
+
+ /* set up query */
+ mtrace.type = PIM_IGMP_MTRACE_QUERY_REQUEST;
+ mtrace.hops = hops;
+ mtrace.checksum = 0;
+ mtrace.grp_addr = mc_group;
+ mtrace.src_addr = mc_source;
+ mtrace.dst_addr = iface_addr;
+ mtrace.rsp_addr = unicast ? iface_addr : mtrace_addr;
+ mtrace.rsp_ttl = ttl;
+ mtrace.qry_id = 0xffffff & time(NULL);
+
+ mtrace.checksum = in_cksum(&mtrace, sizeof(mtrace));
+
+ fd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
+
+ if (fd < 1) {
+ fprintf(stderr, "%s: socket error: %s\n", argv[0],
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifname,
+ strlen(ifname));
+
+ if (ret < 0) {
+ fprintf(stderr, "%s: setsockopt error: %s\n", argv[0],
+ strerror(errno));
+ ret = EXIT_FAILURE;
+ goto close_fd;
+ }
+
+ print_query(&mtrace);
+ if (send_query(fd, gw_addr, &mtrace) < 0) {
+ fprintf(stderr, "%s: sendto error: %s\n", argv[0],
+ strerror(errno));
+ ret = EXIT_FAILURE;
+ goto close_fd;
+ }
+ printf("Querying full reverse path...\n");
+ mtracep = (struct igmp_mtrace *)mbuf;
+ ret = wait_for_response(fd, &rhops, mtracep, &msec);
+ if (ret > 0) {
+ print_responses(mtracep, rhops, msec);
+ ret = 0;
+ goto close_fd;
+ }
+ if (ret < 0) {
+ fprintf(stderr, "%s: select error: %s\n", argv[0],
+ strerror(errno));
+ ret = EXIT_FAILURE;
+ goto close_fd;
+ }
+ printf(" * ");
+ printf("switching to hop-by-hop:\n");
+ print_dest(&mtrace);
+ for (i = 1; i < maxhops; i++) {
+ print_line_no(i);
+ mtrace.hops = i;
+ for (j = 0; j < perhop; j++) {
+ mtrace.qry_id++;
+ mtrace.checksum = 0;
+ mtrace.checksum = in_cksum(&mtrace, sizeof(mtrace));
+ if (send_query(fd, gw_addr, &mtrace) < 0) {
+ fprintf(stderr, "%s: sendto error: %s\n",
+ argv[0], strerror(errno));
+ ret = EXIT_FAILURE;
+ goto close_fd;
+ }
+ ret = wait_for_response(fd, &rhops, mtracep, &msec);
+ if (ret > 0) {
+ if (check_end(mtracep, rhops)) {
+ print_rsp(&mtracep->rsp[rhops - 1]);
+ print_summary(mtracep, rhops, msec);
+ ret = 0;
+ goto close_fd;
+ }
+ if (i > rhops) {
+ printf(" * ...giving up.\n");
+ ret = 0;
+ goto close_fd;
+ }
+ print_rsp(&mtracep->rsp[rhops - 1]);
+ break;
+ }
+ printf(" *");
+ }
+ if (ret <= 0)
+ printf("\n");
+ }
+ ret = 0;
+close_fd:
+ close(fd);
+ exit(ret);
+}
+
+#else /* __linux__ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char *argv[])
+{
+ printf("%s implemented only for GNU/Linux\n", argv[0]);
+ exit(0);
+}
+
+#endif /* __linux__ */
diff --git a/pimd/mtracebis_netlink.c b/pimd/mtracebis_netlink.c
new file mode 100644
index 0000000..16873ed
--- /dev/null
+++ b/pimd/mtracebis_netlink.c
@@ -0,0 +1,728 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * libnetlink.c RTnetlink service routines.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef __linux__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <fcntl.h>
+#include <net/if_arp.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/uio.h>
+#include <assert.h>
+
+#include "mtracebis_netlink.h"
+
+int rcvbuf = 1024 * 1024;
+
+void rtnl_close(struct rtnl_handle *rth)
+{
+ if (rth->fd >= 0) {
+ close(rth->fd);
+ rth->fd = -1;
+ }
+}
+
+int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions,
+ int protocol)
+{
+ socklen_t addr_len;
+ int sndbuf = 32768;
+
+ memset(rth, 0, sizeof(*rth));
+
+ rth->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
+ if (rth->fd < 0) {
+ perror("Cannot open netlink socket");
+ return -1;
+ }
+
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf))
+ < 0) {
+ perror("SO_SNDBUF");
+ return -1;
+ }
+
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof(rcvbuf))
+ < 0) {
+ perror("SO_RCVBUF");
+ return -1;
+ }
+
+ memset(&rth->local, 0, sizeof(rth->local));
+ rth->local.nl_family = AF_NETLINK;
+ rth->local.nl_groups = subscriptions;
+
+ if (bind(rth->fd, (struct sockaddr *)&rth->local, sizeof(rth->local))
+ < 0) {
+ perror("Cannot bind netlink socket");
+ return -1;
+ }
+ addr_len = sizeof(rth->local);
+ if (getsockname(rth->fd, (struct sockaddr *)&rth->local, &addr_len)
+ < 0) {
+ perror("Cannot getsockname");
+ return -1;
+ }
+ if (addr_len != sizeof(rth->local)) {
+ fprintf(stderr, "Wrong address length %d\n", addr_len);
+ return -1;
+ }
+ if (rth->local.nl_family != AF_NETLINK) {
+ fprintf(stderr, "Wrong address family %d\n",
+ rth->local.nl_family);
+ return -1;
+ }
+ rth->seq = getpid();
+ return 0;
+}
+
+int rtnl_open(struct rtnl_handle *rth, unsigned subscriptions)
+{
+ return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE);
+}
+
+int rtnl_wilddump_request(struct rtnl_handle *rth, int family, int type)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct rtgenmsg g;
+ } req;
+
+ memset(&req, 0, sizeof(req));
+ req.nlh.nlmsg_len = sizeof(req);
+ req.nlh.nlmsg_type = type;
+ req.nlh.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+ req.nlh.nlmsg_pid = 0;
+ req.nlh.nlmsg_seq = rth->dump = ++rth->seq;
+ req.g.rtgen_family = family;
+
+ return send(rth->fd, (void *)&req, sizeof(req), 0);
+}
+
+int rtnl_send(struct rtnl_handle *rth, const char *buf, int len)
+{
+ return send(rth->fd, buf, len, 0);
+}
+
+int rtnl_send_check(struct rtnl_handle *rth, const char *buf, int len)
+{
+ struct nlmsghdr *h;
+ int status;
+ char resp[1024];
+
+ status = send(rth->fd, buf, len, 0);
+ if (status < 0)
+ return status;
+
+ /* Check for immediate errors */
+ status = recv(rth->fd, resp, sizeof(resp), MSG_DONTWAIT | MSG_PEEK);
+ if (status < 0) {
+ if (errno == EAGAIN)
+ return 0;
+ return -1;
+ }
+
+ for (h = (struct nlmsghdr *)resp; NLMSG_OK(h, (uint32_t)status);
+ h = NLMSG_NEXT(h, status)) {
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h);
+ if (h->nlmsg_len
+ < NLMSG_LENGTH(sizeof(struct nlmsgerr)))
+ fprintf(stderr, "ERROR truncated\n");
+ else
+ errno = -err->error;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int rtnl_dump_request(struct rtnl_handle *rth, int type, void *req, int len)
+{
+ struct nlmsghdr nlh;
+ struct sockaddr_nl nladdr;
+ struct iovec iov[2] = {{.iov_base = &nlh, .iov_len = sizeof(nlh)},
+ {.iov_base = req, .iov_len = len}};
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = iov,
+ .msg_iovlen = 2,
+ };
+
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+
+ nlh.nlmsg_len = NLMSG_LENGTH(len);
+ nlh.nlmsg_type = type;
+ nlh.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+ nlh.nlmsg_pid = 0;
+ nlh.nlmsg_seq = rth->dump = ++rth->seq;
+
+ return sendmsg(rth->fd, &msg, 0);
+}
+
+int rtnl_dump_filter_l(struct rtnl_handle *rth,
+ const struct rtnl_dump_filter_arg *arg)
+{
+ struct sockaddr_nl nladdr;
+ char buf[16384];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+
+ while (1) {
+ int status;
+ const struct rtnl_dump_filter_arg *a;
+ int found_done = 0;
+ int msglen = 0;
+
+ iov.iov_len = sizeof(buf);
+ status = recvmsg(rth->fd, &msg, 0);
+
+ if (status < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ fprintf(stderr, "netlink receive error %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+
+ if (status == 0) {
+ fprintf(stderr, "EOF on netlink\n");
+ return -1;
+ }
+
+ for (a = arg; a->filter; a++) {
+ struct nlmsghdr *h = (struct nlmsghdr *)iov.iov_base;
+ msglen = status;
+
+ while (NLMSG_OK(h, (uint32_t)msglen)) {
+ int err;
+
+ if (nladdr.nl_pid != 0
+ || h->nlmsg_pid != rth->local.nl_pid
+ || h->nlmsg_seq != rth->dump) {
+ if (a->junk) {
+ err = a->junk(&nladdr, h,
+ a->arg2);
+ if (err < 0)
+ return err;
+ }
+ goto skip_it;
+ }
+
+ if (h->nlmsg_type == NLMSG_DONE) {
+ found_done = 1;
+ break; /* process next filter */
+ }
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *merr =
+ (struct nlmsgerr *)NLMSG_DATA(
+ h);
+ if (h->nlmsg_len
+ < NLMSG_LENGTH(sizeof(
+ struct nlmsgerr))) {
+ fprintf(stderr,
+ "ERROR truncated\n");
+ } else {
+ errno = -merr->error;
+ perror("RTNETLINK answers");
+ }
+ return -1;
+ }
+ err = a->filter(&nladdr, h, a->arg1);
+ if (err < 0)
+ return err;
+
+ skip_it:
+ h = NLMSG_NEXT(h, msglen);
+ }
+ }
+
+ if (found_done)
+ return 0;
+
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Message truncated\n");
+ continue;
+ }
+ if (msglen) {
+ fprintf(stderr, "!!!Remnant of size %d\n", msglen);
+ exit(1);
+ }
+ }
+}
+
+int rtnl_dump_filter(struct rtnl_handle *rth, rtnl_filter_t filter, void *arg1,
+ rtnl_filter_t junk, void *arg2)
+{
+ const struct rtnl_dump_filter_arg a[2] = {
+ {.filter = filter, .arg1 = arg1, .junk = junk, .arg2 = arg2},
+ {.filter = NULL, .arg1 = NULL, .junk = NULL, .arg2 = NULL}};
+
+ return rtnl_dump_filter_l(rth, a);
+}
+
+int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer,
+ unsigned groups, struct nlmsghdr *answer, rtnl_filter_t junk,
+ void *jarg)
+{
+ int status;
+ unsigned seq;
+ struct nlmsghdr *h;
+ struct sockaddr_nl nladdr;
+ struct iovec iov = {.iov_base = (void *)n, .iov_len = n->nlmsg_len};
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ char buf[16384];
+
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ nladdr.nl_pid = peer;
+ nladdr.nl_groups = groups;
+
+ n->nlmsg_seq = seq = ++rtnl->seq;
+
+ if (answer == NULL)
+ n->nlmsg_flags |= NLM_F_ACK;
+
+ status = sendmsg(rtnl->fd, &msg, 0);
+
+ if (status < 0) {
+ perror("Cannot talk to rtnetlink");
+ return -1;
+ }
+
+ memset(buf, 0, sizeof(buf));
+
+ iov.iov_base = buf;
+
+ while (1) {
+ iov.iov_len = sizeof(buf);
+ status = recvmsg(rtnl->fd, &msg, 0);
+
+ if (status < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ fprintf(stderr, "netlink receive error %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+ if (status == 0) {
+ fprintf(stderr, "EOF on netlink\n");
+ return -1;
+ }
+ if (msg.msg_namelen != sizeof(nladdr)) {
+ fprintf(stderr, "sender address length == %d\n",
+ msg.msg_namelen);
+ exit(1);
+ }
+ for (h = (struct nlmsghdr *)iov.iov_base;
+ status >= (int)sizeof(*h);) {
+ int err;
+ int len = h->nlmsg_len;
+ int l = len - sizeof(*h);
+
+ if (l < 0 || len > status) {
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Truncated message\n");
+ return -1;
+ }
+ fprintf(stderr,
+ "!!!malformed message: len=%d\n", len);
+ exit(1);
+ }
+
+ if ((int)nladdr.nl_pid != peer
+ || h->nlmsg_pid != rtnl->local.nl_pid
+ || h->nlmsg_seq != seq) {
+ if (junk) {
+ err = junk(&nladdr, h, jarg);
+ if (err < 0)
+ return err;
+ }
+ /* Don't forget to skip that message. */
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h
+ + NLMSG_ALIGN(len));
+ continue;
+ }
+
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *merr =
+ (struct nlmsgerr *)NLMSG_DATA(h);
+ if (l < (int)sizeof(struct nlmsgerr)) {
+ fprintf(stderr, "ERROR truncated\n");
+ } else {
+ errno = -merr->error;
+ if (errno == 0) {
+ if (answer)
+ memcpy(answer, h,
+ h->nlmsg_len);
+ return 0;
+ }
+ perror("RTNETLINK answers");
+ }
+ return -1;
+ }
+ if (answer) {
+ memcpy(answer, h, h->nlmsg_len);
+ return 0;
+ }
+
+ fprintf(stderr, "Unexpected reply!!!\n");
+
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ }
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Message truncated\n");
+ continue;
+ }
+ if (status) {
+ fprintf(stderr, "!!!Remnant of size %d\n", status);
+ exit(1);
+ }
+ }
+}
+
+int rtnl_listen(struct rtnl_handle *rtnl, rtnl_filter_t handler, void *jarg)
+{
+ int status;
+ struct nlmsghdr *h;
+ struct sockaddr_nl nladdr;
+ char buf[8192];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ nladdr.nl_pid = 0;
+ nladdr.nl_groups = 0;
+
+ while (1) {
+ iov.iov_len = sizeof(buf);
+ status = recvmsg(rtnl->fd, &msg, 0);
+
+ if (status < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ fprintf(stderr, "netlink receive error %s (%d)\n",
+ strerror(errno), errno);
+ if (errno == ENOBUFS)
+ continue;
+ return -1;
+ }
+ if (status == 0) {
+ fprintf(stderr, "EOF on netlink\n");
+ return -1;
+ }
+ if (msg.msg_namelen != sizeof(nladdr)) {
+ fprintf(stderr, "Sender address length == %d\n",
+ msg.msg_namelen);
+ exit(1);
+ }
+ for (h = (struct nlmsghdr *)buf; status >= (int)sizeof(*h);) {
+ int err;
+ int len = h->nlmsg_len;
+ int l = len - sizeof(*h);
+
+ if (l < 0 || len > status) {
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Truncated message\n");
+ return -1;
+ }
+ fprintf(stderr,
+ "!!!malformed message: len=%d\n", len);
+ exit(1);
+ }
+
+ err = handler(&nladdr, h, jarg);
+ if (err < 0)
+ return err;
+
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ }
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Message truncated\n");
+ continue;
+ }
+ if (status) {
+ fprintf(stderr, "!!!Remnant of size %d\n", status);
+ exit(1);
+ }
+ }
+}
+
+int rtnl_from_file(FILE *rtnl, rtnl_filter_t handler, void *jarg)
+{
+ struct sockaddr_nl nladdr;
+ char buf[8192];
+ struct nlmsghdr *h = (void *)buf;
+
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ nladdr.nl_pid = 0;
+ nladdr.nl_groups = 0;
+
+ while (1) {
+ int err;
+ size_t l, rl, arl;
+
+ rl = sizeof(*h);
+ arl = fread(&buf, 1, rl, rtnl);
+
+ if (arl != rl) {
+ if (arl == 0)
+ return 0;
+
+ if (ferror(rtnl))
+ fprintf(stderr, "%s: header read failed\n",
+ __func__);
+ else
+ fprintf(stderr, "%s: truncated header\n",
+ __func__);
+ return -1;
+ }
+
+ l = h->nlmsg_len > rl ? h->nlmsg_len - rl : 0;
+
+ if (l == 0 || (l + (size_t)NLMSG_HDRLEN) > sizeof(buf)) {
+ fprintf(stderr, "%s: malformed message: len=%zu @%lu\n",
+ __func__, (size_t)h->nlmsg_len, ftell(rtnl));
+ return -1;
+ }
+
+ rl = NLMSG_ALIGN(l);
+ arl = fread(NLMSG_DATA(h), 1, rl, rtnl);
+
+ if (arl != rl) {
+ if (ferror(rtnl))
+ fprintf(stderr, "%s: msg read failed\n",
+ __func__);
+ else
+ fprintf(stderr, "%s: truncated message\n",
+ __func__);
+ return -1;
+ }
+
+ err = handler(&nladdr, h, jarg);
+ if (err < 0)
+ return err;
+ }
+}
+
+int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data)
+{
+ int len = RTA_LENGTH(4);
+ struct rtattr *rta;
+ if ((int)(NLMSG_ALIGN(n->nlmsg_len) + len) > maxlen) {
+ fprintf(stderr,
+ "addattr32: Error! max allowed bound %d exceeded\n",
+ maxlen);
+ return -1;
+ }
+ rta = NLMSG_TAIL(n);
+ rta->rta_type = type;
+ rta->rta_len = len;
+ memcpy(RTA_DATA(rta), &data, 4);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + len;
+ return 0;
+}
+
+int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data,
+ int alen)
+{
+ int len = RTA_LENGTH(alen);
+ struct rtattr *rta;
+
+ if ((int)(NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len)) > maxlen) {
+ fprintf(stderr,
+ "addattr_l ERROR: message exceeded bound of %d\n",
+ maxlen);
+ return -1;
+ }
+ rta = NLMSG_TAIL(n);
+ rta->rta_type = type;
+ rta->rta_len = len;
+
+ if (data)
+ memcpy(RTA_DATA(rta), data, alen);
+ else
+ assert(alen == 0);
+
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len);
+ return 0;
+}
+
+int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len)
+{
+ if ((int)(NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len)) > maxlen) {
+ fprintf(stderr,
+ "addraw_l ERROR: message exceeded bound of %d\n",
+ maxlen);
+ return -1;
+ }
+
+ memcpy(NLMSG_TAIL(n), data, len);
+ memset((uint8_t *)NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len);
+ return 0;
+}
+
+struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type)
+{
+ struct rtattr *nest = NLMSG_TAIL(n);
+
+ addattr_l(n, maxlen, type, NULL, 0);
+ return nest;
+}
+
+int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest)
+{
+ nest->rta_len = (uint8_t *)NLMSG_TAIL(n) - (uint8_t *)nest;
+ return n->nlmsg_len;
+}
+
+struct rtattr *addattr_nest_compat(struct nlmsghdr *n, int maxlen, int type,
+ const void *data, int len)
+{
+ struct rtattr *start = NLMSG_TAIL(n);
+
+ addattr_l(n, maxlen, type, data, len);
+ addattr_nest(n, maxlen, type);
+ return start;
+}
+
+int addattr_nest_compat_end(struct nlmsghdr *n, struct rtattr *start)
+{
+ struct rtattr *nest = start + NLMSG_ALIGN(start->rta_len);
+
+ start->rta_len = (uint8_t *)NLMSG_TAIL(n) - (uint8_t *)start;
+ addattr_nest_end(n, nest);
+ return n->nlmsg_len;
+}
+
+int rta_addattr32(struct rtattr *rta, int maxlen, int type, __u32 data)
+{
+ int len = RTA_LENGTH(4);
+ struct rtattr *subrta;
+
+ if ((int)(RTA_ALIGN(rta->rta_len) + len) > maxlen) {
+ fprintf(stderr,
+ "rta_addattr32: Error! max allowed bound %d exceeded\n",
+ maxlen);
+ return -1;
+ }
+ subrta = (struct rtattr *)(((char *)rta) + RTA_ALIGN(rta->rta_len));
+ subrta->rta_type = type;
+ subrta->rta_len = len;
+ memcpy(RTA_DATA(subrta), &data, 4);
+ rta->rta_len = NLMSG_ALIGN(rta->rta_len) + len;
+ return 0;
+}
+
+int rta_addattr_l(struct rtattr *rta, int maxlen, int type, const void *data,
+ int alen)
+{
+ struct rtattr *subrta;
+ int len = RTA_LENGTH(alen);
+
+ if ((int)(RTA_ALIGN(rta->rta_len) + RTA_ALIGN(len)) > maxlen) {
+ fprintf(stderr,
+ "rta_addattr_l: Error! max allowed bound %d exceeded\n",
+ maxlen);
+ return -1;
+ }
+ subrta = (struct rtattr *)(((char *)rta) + RTA_ALIGN(rta->rta_len));
+ subrta->rta_type = type;
+ subrta->rta_len = len;
+ memcpy(RTA_DATA(subrta), data, alen);
+ rta->rta_len = NLMSG_ALIGN(rta->rta_len) + RTA_ALIGN(len);
+ return 0;
+}
+
+int parse_rtattr(struct rtattr *tb[], int max, struct rtattr *rta, int len)
+{
+ memset(tb, 0, sizeof(struct rtattr *) * (max + 1));
+ while (RTA_OK(rta, len)) {
+ if ((rta->rta_type <= max) && (!tb[rta->rta_type]))
+ tb[rta->rta_type] = rta;
+ rta = RTA_NEXT(rta, len);
+ }
+ if (len)
+ fprintf(stderr, "!!!Deficit %d, rta_len=%d\n", len,
+ rta->rta_len);
+ return 0;
+}
+
+int parse_rtattr_byindex(struct rtattr *tb[], int max, struct rtattr *rta,
+ int len)
+{
+ int i = 0;
+
+ memset(tb, 0, sizeof(struct rtattr *) * max);
+ while (RTA_OK(rta, len)) {
+ if (rta->rta_type <= max && i < max)
+ tb[i++] = rta;
+ rta = RTA_NEXT(rta, len);
+ }
+ if (len)
+ fprintf(stderr, "!!!Deficit %d, rta_len=%d\n", len,
+ rta->rta_len);
+ return i;
+}
+
+int __parse_rtattr_nested_compat(struct rtattr *tb[], int max,
+ struct rtattr *rta, int len)
+{
+ if ((int)RTA_PAYLOAD(rta) < len)
+ return -1;
+ if (RTA_PAYLOAD(rta) >= RTA_ALIGN(len) + sizeof(struct rtattr)) {
+ rta = (struct rtattr *)(uint8_t *)RTA_DATA(rta)
+ + RTA_ALIGN(len);
+ return parse_rtattr_nested(tb, max, rta);
+ }
+ memset(tb, 0, sizeof(struct rtattr *) * (max + 1));
+ return 0;
+}
+
+#endif /* __linux__ */
diff --git a/pimd/mtracebis_netlink.h b/pimd/mtracebis_netlink.h
new file mode 100644
index 0000000..1f22927
--- /dev/null
+++ b/pimd/mtracebis_netlink.h
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * libnetlink.c RTnetlink service routines.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ */
+
+#ifdef __linux__
+
+#ifndef __LIBNETLINK_H__
+#define __LIBNETLINK_H__ 1
+
+#include <asm/types.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_link.h>
+#include <linux/if_addr.h>
+#include <linux/neighbour.h>
+
+struct rtnl_handle {
+ int fd;
+ struct sockaddr_nl local;
+ struct sockaddr_nl peer;
+ __u32 seq;
+ __u32 dump;
+};
+
+extern int rcvbuf;
+
+extern int rtnl_open(struct rtnl_handle *rth, unsigned subscriptions);
+extern int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions,
+ int protocol);
+extern void rtnl_close(struct rtnl_handle *rth);
+extern int rtnl_wilddump_request(struct rtnl_handle *rth, int fam, int type);
+extern int rtnl_dump_request(struct rtnl_handle *rth, int type, void *req,
+ int len);
+
+typedef int (*rtnl_filter_t)(const struct sockaddr_nl *, struct nlmsghdr *n,
+ void *);
+
+struct rtnl_dump_filter_arg {
+ rtnl_filter_t filter;
+ void *arg1;
+ rtnl_filter_t junk;
+ void *arg2;
+};
+
+extern int rtnl_dump_filter_l(struct rtnl_handle *rth,
+ const struct rtnl_dump_filter_arg *arg);
+extern int rtnl_dump_filter(struct rtnl_handle *rth, rtnl_filter_t filter,
+ void *arg1, rtnl_filter_t junk, void *arg2);
+
+extern int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer,
+ unsigned groups, struct nlmsghdr *answer,
+ rtnl_filter_t junk, void *jarg);
+extern int rtnl_send(struct rtnl_handle *rth, const char *buf, int);
+extern int rtnl_send_check(struct rtnl_handle *rth, const char *buf, int);
+
+extern int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data);
+extern int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data,
+ int alen);
+extern int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len);
+extern struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type);
+extern int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest);
+extern struct rtattr *addattr_nest_compat(struct nlmsghdr *n, int maxlen,
+ int type, const void *data, int len);
+extern int addattr_nest_compat_end(struct nlmsghdr *n, struct rtattr *nest);
+extern int rta_addattr32(struct rtattr *rta, int maxlen, int type, __u32 data);
+extern int rta_addattr_l(struct rtattr *rta, int maxlen, int type,
+ const void *data, int alen);
+
+extern int parse_rtattr(struct rtattr *tb[], int max, struct rtattr *rta,
+ int len);
+extern int parse_rtattr_byindex(struct rtattr *tb[], int max,
+ struct rtattr *rta, int len);
+extern int __parse_rtattr_nested_compat(struct rtattr *tb[], int max,
+ struct rtattr *rta, int len);
+
+#define parse_rtattr_nested(tb, max, rta) \
+ (parse_rtattr((tb), (max), RTA_DATA(rta), RTA_PAYLOAD(rta)))
+
+#define parse_rtattr_nested_compat(tb, max, rta, data, len) \
+ ({ \
+ data = RTA_PAYLOAD(rta) >= len ? RTA_DATA(rta) : NULL; \
+ __parse_rtattr_nested_compat(tb, max, rta, len); \
+ })
+
+extern int rtnl_listen(struct rtnl_handle *, rtnl_filter_t handler, void *jarg);
+extern int rtnl_from_file(FILE *, rtnl_filter_t handler, void *jarg);
+
+#define NLMSG_TAIL(nmsg) \
+ ((struct rtattr *)(((uint8_t *)(nmsg)) \
+ + NLMSG_ALIGN((nmsg)->nlmsg_len)))
+
+#ifndef IFA_RTA
+#define IFA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) \
+ + NLMSG_ALIGN(sizeof(struct ifaddrmsg))))
+#endif
+#ifndef IFA_PAYLOAD
+#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
+#endif
+
+#ifndef IFLA_RTA
+#define IFLA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) \
+ + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
+#endif
+#ifndef IFLA_PAYLOAD
+#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
+#endif
+
+#ifndef NDA_RTA
+#define NDA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
+#endif
+#ifndef NDA_PAYLOAD
+#define NDA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndmsg))
+#endif
+
+#ifndef NDTA_RTA
+#define NDTA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndtmsg))))
+#endif
+#ifndef NDTA_PAYLOAD
+#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))
+#endif
+
+#endif /* __LIBNETLINK_H__ */
+
+#endif /* __linux__ */
diff --git a/pimd/mtracebis_routeget.c b/pimd/mtracebis_routeget.c
new file mode 100644
index 0000000..20618fa
--- /dev/null
+++ b/pimd/mtracebis_routeget.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multicast Traceroute for FRRouting
+ * Copyright (C) 2018 Mladen Sablic
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef __linux__
+
+#include <asm/types.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <arpa/inet.h>
+#include <string.h>
+
+#include "mtracebis_netlink.h"
+#include "mtracebis_routeget.h"
+
+static int find_dst(struct nlmsghdr *n, struct in_addr *src, struct in_addr *gw)
+{
+ struct rtmsg *r = NLMSG_DATA(n);
+ int len = n->nlmsg_len;
+ struct rtattr *tb[RTA_MAX + 1];
+
+ len -= NLMSG_LENGTH(sizeof(*r));
+ if (len < 0) {
+ fprintf(stderr, "BUG: wrong nlmsg len %d\n", len);
+ return -1;
+ }
+
+ parse_rtattr(tb, RTA_MAX, RTM_RTA(r), len);
+ if (tb[RTA_PREFSRC])
+ src->s_addr = *(uint32_t *)RTA_DATA(tb[RTA_PREFSRC]);
+ if (tb[RTA_GATEWAY])
+ gw->s_addr = *(uint32_t *)RTA_DATA(tb[RTA_GATEWAY]);
+ if (tb[RTA_OIF])
+ return *(int *)RTA_DATA(tb[RTA_OIF]);
+ return 0;
+}
+
+int routeget(struct in_addr dst, struct in_addr *src, struct in_addr *gw)
+{
+ struct {
+ struct nlmsghdr n;
+ struct rtmsg r;
+ char buf[1024];
+ } req;
+ int ret;
+ struct rtnl_handle rth = {.fd = -1};
+
+ memset(&req, 0, sizeof(req));
+
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
+ req.n.nlmsg_flags = NLM_F_REQUEST;
+ req.n.nlmsg_type = RTM_GETROUTE;
+ req.r.rtm_family = AF_INET;
+ req.r.rtm_table = 0;
+ req.r.rtm_protocol = 0;
+ req.r.rtm_scope = 0;
+ req.r.rtm_type = 0;
+ req.r.rtm_src_len = 0;
+ req.r.rtm_dst_len = 0;
+ req.r.rtm_tos = 0;
+
+ addattr_l(&req.n, sizeof(req), RTA_DST, &dst.s_addr, 4);
+ req.r.rtm_dst_len = 32;
+
+ ret = rtnl_open(&rth, 0);
+
+ if (ret < 0 || rth.fd <= 0)
+ return ret;
+
+ if (rtnl_talk(&rth, &req.n, 0, 0, &req.n, NULL, NULL) < 0) {
+ ret = -1;
+ goto close_rth;
+ }
+
+ ret = find_dst(&req.n, src, gw);
+close_rth:
+ rtnl_close(&rth);
+ return ret;
+}
+
+#endif /* __linux__ */
diff --git a/pimd/mtracebis_routeget.h b/pimd/mtracebis_routeget.h
new file mode 100644
index 0000000..cf97723
--- /dev/null
+++ b/pimd/mtracebis_routeget.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multicast Traceroute for FRRouting
+ * Copyright (C) 2018 Mladen Sablic
+ */
+
+#ifdef __linux__
+
+#ifndef ROUTEGET_H
+#define ROUTEGET_H
+
+#include <netinet/in.h>
+
+int routeget(struct in_addr dst, struct in_addr *src, struct in_addr *gw);
+
+#endif /* ROUTEGET */
+
+#endif /* __linux__ */
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
new file mode 100644
index 0000000..262ce86
--- /dev/null
+++ b/pimd/pim6_cmd.c
@@ -0,0 +1,1900 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for IPv6 FRR
+ * Copyright (C) 2022 Vmware, Inc.
+ * Mobashshera Rasool <mrasool@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "lib/json.h"
+#include "command.h"
+#include "if.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "plist.h"
+#include "hash.h"
+#include "nexthop.h"
+#include "vrf.h"
+#include "ferr.h"
+
+#include "pimd.h"
+#include "pim6_cmd.h"
+#include "pim_cmd_common.h"
+#include "pim_vty.h"
+#include "lib/northbound_cli.h"
+#include "pim_errors.h"
+#include "pim_nb.h"
+#include "pim_addr.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_iface.h"
+#include "pim_zebra.h"
+#include "pim_instance.h"
+
+#include "pimd/pim6_cmd_clippy.c"
+
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = pim_debug_config_write,
+};
+
+DEFPY (ipv6_pim_joinprune_time,
+ ipv6_pim_joinprune_time_cmd,
+ "ipv6 pim join-prune-interval (1-65535)$jpi",
+ IPV6_STR
+ PIM_STR
+ "Join Prune Send Interval\n"
+ "Seconds\n")
+{
+ return pim_process_join_prune_cmd(vty, jpi_str);
+}
+
+DEFPY (no_ipv6_pim_joinprune_time,
+ no_ipv6_pim_joinprune_time_cmd,
+ "no ipv6 pim join-prune-interval [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Join Prune Send Interval\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_join_prune_cmd(vty);
+}
+
+DEFPY (ipv6_pim_spt_switchover_infinity,
+ ipv6_pim_spt_switchover_infinity_cmd,
+ "ipv6 pim spt-switchover infinity-and-beyond",
+ IPV6_STR
+ PIM_STR
+ "SPT-Switchover\n"
+ "Never switch to SPT Tree\n")
+{
+ return pim_process_spt_switchover_infinity_cmd(vty);
+}
+
+DEFPY (ipv6_pim_spt_switchover_infinity_plist,
+ ipv6_pim_spt_switchover_infinity_plist_cmd,
+ "ipv6 pim spt-switchover infinity-and-beyond prefix-list WORD$plist",
+ IPV6_STR
+ PIM_STR
+ "SPT-Switchover\n"
+ "Never switch to SPT Tree\n"
+ "Prefix-List to control which groups to switch\n"
+ "Prefix-List name\n")
+{
+ return pim_process_spt_switchover_prefixlist_cmd(vty, plist);
+}
+
+DEFPY (no_ipv6_pim_spt_switchover_infinity,
+ no_ipv6_pim_spt_switchover_infinity_cmd,
+ "no ipv6 pim spt-switchover infinity-and-beyond",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "SPT_Switchover\n"
+ "Never switch to SPT Tree\n")
+{
+ return pim_process_no_spt_switchover_cmd(vty);
+}
+
+DEFPY (no_ipv6_pim_spt_switchover_infinity_plist,
+ no_ipv6_pim_spt_switchover_infinity_plist_cmd,
+ "no ipv6 pim spt-switchover infinity-and-beyond prefix-list WORD",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "SPT_Switchover\n"
+ "Never switch to SPT Tree\n"
+ "Prefix-List to control which groups to switch\n"
+ "Prefix-List name\n")
+{
+ return pim_process_no_spt_switchover_cmd(vty);
+}
+
+DEFPY (ipv6_pim_packets,
+ ipv6_pim_packets_cmd,
+ "ipv6 pim packets (1-255)",
+ IPV6_STR
+ PIM_STR
+ "packets to process at one time per fd\n"
+ "Number of packets\n")
+{
+ return pim_process_pim_packet_cmd(vty, packets_str);
+}
+
+DEFPY (no_ipv6_pim_packets,
+ no_ipv6_pim_packets_cmd,
+ "no ipv6 pim packets [(1-255)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "packets to process at one time per fd\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_pim_packet_cmd(vty);
+}
+
+DEFPY (ipv6_pim_keep_alive,
+ ipv6_pim_keep_alive_cmd,
+ "ipv6 pim keep-alive-timer (1-65535)$kat",
+ IPV6_STR
+ PIM_STR
+ "Keep alive Timer\n"
+ "Seconds\n")
+{
+ return pim_process_keepalivetimer_cmd(vty, kat_str);
+}
+
+DEFPY (no_ipv6_pim_keep_alive,
+ no_ipv6_pim_keep_alive_cmd,
+ "no ipv6 pim keep-alive-timer [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Keep alive Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_keepalivetimer_cmd(vty);
+}
+
+DEFPY (ipv6_pim_rp_keep_alive,
+ ipv6_pim_rp_keep_alive_cmd,
+ "ipv6 pim rp keep-alive-timer (1-65535)$kat",
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "Keep alive Timer\n"
+ "Seconds\n")
+{
+ return pim_process_rp_kat_cmd(vty, kat_str);
+}
+
+DEFPY (no_ipv6_pim_rp_keep_alive,
+ no_ipv6_pim_rp_keep_alive_cmd,
+ "no ipv6 pim rp keep-alive-timer [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "Keep alive Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_rp_kat_cmd(vty);
+}
+
+DEFPY (ipv6_pim_register_suppress,
+ ipv6_pim_register_suppress_cmd,
+ "ipv6 pim register-suppress-time (1-65535)$rst",
+ IPV6_STR
+ PIM_STR
+ "Register Suppress Timer\n"
+ "Seconds\n")
+{
+ return pim_process_register_suppress_cmd(vty, rst_str);
+}
+
+DEFPY (no_ipv6_pim_register_suppress,
+ no_ipv6_pim_register_suppress_cmd,
+ "no ipv6 pim register-suppress-time [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Register Suppress Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_register_suppress_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim,
+ interface_ipv6_pim_cmd,
+ "ipv6 pim [passive$passive]",
+ IPV6_STR
+ PIM_STR
+ "Disable exchange of protocol packets\n")
+{
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (interface_no_ipv6_pim,
+ interface_no_ipv6_pim_cmd,
+ "no ipv6 pim [passive$passive]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Disable exchange of protocol packets\n")
+{
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, false);
+
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_drprio,
+ interface_ipv6_pim_drprio_cmd,
+ "ipv6 pim drpriority (1-4294967295)",
+ IPV6_STR
+ PIM_STR
+ "Set the Designated Router Election Priority\n"
+ "Value of the new DR Priority\n")
+{
+ return pim_process_ip_pim_drprio_cmd(vty, drpriority_str);
+}
+
+DEFPY (interface_no_ipv6_pim_drprio,
+ interface_no_ipv6_pim_drprio_cmd,
+ "no ipv6 pim drpriority [(1-4294967295)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Revert the Designated Router Priority to default\n"
+ "Old Value of the Priority\n")
+{
+ return pim_process_no_ip_pim_drprio_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_hello,
+ interface_ipv6_pim_hello_cmd,
+ "ipv6 pim hello (1-65535) [(1-65535)]$hold",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IFACE_PIM_HELLO_TIME_STR
+ IFACE_PIM_HELLO_HOLD_STR)
+{
+ return pim_process_ip_pim_hello_cmd(vty, hello_str, hold_str);
+}
+
+DEFPY (interface_no_ipv6_pim_hello,
+ interface_no_ipv6_pim_hello_cmd,
+ "no ipv6 pim hello [(1-65535) [(1-65535)]]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IGNORED_IN_NO_STR
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_ip_pim_hello_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_activeactive,
+ interface_ipv6_pim_activeactive_cmd,
+ "[no] ipv6 pim active-active",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Mark interface as Active-Active for MLAG operations\n")
+{
+ return pim_process_ip_pim_activeactive_cmd(vty, no);
+}
+
+DEFPY_HIDDEN (interface_ipv6_pim_ssm,
+ interface_ipv6_pim_ssm_cmd,
+ "ipv6 pim ssm",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ vty_out(vty,
+ "Enabled PIM SM on interface; configure PIM SSM range if needed\n");
+
+ return NB_OK;
+}
+
+DEFPY_HIDDEN (interface_no_ipv6_pim_ssm,
+ interface_no_ipv6_pim_ssm_cmd,
+ "no ipv6 pim ssm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFPY_HIDDEN (interface_ipv6_pim_sm,
+ interface_ipv6_pim_sm_cmd,
+ "ipv6 pim sm",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_ip_pim_cmd(vty);
+}
+
+DEFPY_HIDDEN (interface_no_ipv6_pim_sm,
+ interface_no_ipv6_pim_sm_cmd,
+ "no ipv6 pim sm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+/* boundaries */
+DEFPY (interface_ipv6_pim_boundary_oil,
+ interface_ipv6_pim_boundary_oil_cmd,
+ "ipv6 multicast boundary oil WORD",
+ IPV6_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_ip_pim_boundary_oil_cmd(vty, oil);
+}
+
+DEFPY (interface_no_ipv6_pim_boundary_oil,
+ interface_no_ipv6_pim_boundary_oil_cmd,
+ "no ipv6 multicast boundary oil [WORD]",
+ NO_STR
+ IPV6_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_no_ip_pim_boundary_oil_cmd(vty);
+}
+
+DEFPY (interface_ipv6_mroute,
+ interface_ipv6_mroute_cmd,
+ "ipv6 mroute INTERFACE X:X::X:X$group [X:X::X:X]$source",
+ IPV6_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group address\n"
+ "Source address\n")
+{
+ return pim_process_ip_mroute_cmd(vty, interface, group_str, source_str);
+}
+
+DEFPY (interface_no_ipv6_mroute,
+ interface_no_ipv6_mroute_cmd,
+ "no ipv6 mroute INTERFACE X:X::X:X$group [X:X::X:X]$source",
+ NO_STR
+ IPV6_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group Address\n"
+ "Source Address\n")
+{
+ return pim_process_no_ip_mroute_cmd(vty, interface, group_str,
+ source_str);
+}
+
+DEFPY (ipv6_pim_rp,
+ ipv6_pim_rp_cmd,
+ "ipv6 pim rp X:X::X:X$rp [X:X::X:X/M]$gp",
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "FF00::0/8";
+
+ return pim_process_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (no_ipv6_pim_rp,
+ no_ipv6_pim_rp_cmd,
+ "no ipv6 pim rp X:X::X:X$rp [X:X::X:X/M]$gp",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "FF00::0/8";
+
+ return pim_process_no_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (ipv6_pim_rp_prefix_list,
+ ipv6_pim_rp_prefix_list_cmd,
+ "ipv6 pim rp X:X::X:X$rp prefix-list WORD$plist",
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFPY (no_ipv6_pim_rp_prefix_list,
+ no_ipv6_pim_rp_prefix_list_cmd,
+ "no ipv6 pim rp X:X::X:X$rp prefix-list WORD$plist",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFPY (ipv6_pim_bsm,
+ ipv6_pim_bsm_cmd,
+ "ipv6 pim bsm",
+ IPV6_STR
+ PIM_STR
+ "Enable BSM support on the interface\n")
+{
+ return pim_process_bsm_cmd(vty);
+}
+
+DEFPY (no_ipv6_pim_bsm,
+ no_ipv6_pim_bsm_cmd,
+ "no ipv6 pim bsm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Enable BSM support on the interface\n")
+{
+ return pim_process_no_bsm_cmd(vty);
+}
+
+DEFPY (ipv6_pim_ucast_bsm,
+ ipv6_pim_ucast_bsm_cmd,
+ "ipv6 pim unicast-bsm",
+ IPV6_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ return pim_process_unicast_bsm_cmd(vty);
+}
+
+DEFPY (no_ipv6_pim_ucast_bsm,
+ no_ipv6_pim_ucast_bsm_cmd,
+ "no ipv6 pim unicast-bsm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ return pim_process_no_unicast_bsm_cmd(vty);
+}
+
+DEFPY (ipv6_ssmpingd,
+ ipv6_ssmpingd_cmd,
+ "ipv6 ssmpingd [X:X::X:X]$source",
+ IPV6_STR
+ CONF_SSMPINGD_STR
+ "Source address\n")
+{
+ const char *src_str = (source_str) ? source_str : "::";
+
+ return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str);
+}
+
+
+DEFPY (no_ipv6_ssmpingd,
+ no_ipv6_ssmpingd_cmd,
+ "no ipv6 ssmpingd [X:X::X:X]$source",
+ NO_STR
+ IPV6_STR
+ CONF_SSMPINGD_STR
+ "Source address\n")
+{
+ const char *src_str = (source_str) ? source_str : "::";
+
+ return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str);
+}
+
+DEFPY (interface_ipv6_mld_join,
+ interface_ipv6_mld_join_cmd,
+ "ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ char xpath[XPATH_MAXLEN];
+
+ if (!IN6_IS_ADDR_MULTICAST(&group)) {
+ vty_out(vty, "Invalid Multicast Address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (source_str) {
+ if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
+ vty_out(vty, "Bad source address %s\n", source_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "::";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
+ group_str, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (interface_no_ipv6_mld_join,
+ interface_no_ipv6_mld_join_cmd,
+ "no ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ char xpath[XPATH_MAXLEN];
+
+ if (source_str) {
+ if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
+ vty_out(vty, "Bad source address %s\n", source_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "::";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
+ group_str, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (interface_ipv6_mld,
+ interface_ipv6_mld_cmd,
+ "ipv6 mld",
+ IPV6_STR
+ IFACE_MLD_STR)
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld,
+ interface_no_ipv6_mld_cmd,
+ "no ipv6 mld",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR)
+{
+ const struct lyd_node *pim_enable_dnode;
+ char pim_if_xpath[XPATH_MAXLEN + 64];
+
+ snprintf(pim_if_xpath, sizeof(pim_if_xpath),
+ "%s/frr-pim:pim/address-family[address-family='%s']",
+ VTY_CURR_XPATH, "frr-routing:ipv6");
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv6");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, ".")) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "false");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_ipv6_mld_version,
+ interface_ipv6_mld_version_cmd,
+ "ipv6 mld version (1-2)$version",
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD version\n"
+ "MLD version number\n")
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ nb_cli_enqueue_change(vty, "./mld-version", NB_OP_MODIFY, version_str);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld_version,
+ interface_no_ipv6_mld_version_cmd,
+ "no ipv6 mld version [(1-2)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD version\n"
+ "MLD version number\n")
+{
+ nb_cli_enqueue_change(vty, "./mld-version", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_ipv6_mld_query_interval,
+ interface_ipv6_mld_query_interval_cmd,
+ "ipv6 mld query-interval (1-65535)$q_interval",
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_INTERVAL_STR
+ "Query interval in seconds\n")
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv6");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY,
+ q_interval_str);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld_query_interval,
+ interface_no_ipv6_mld_query_interval_cmd,
+ "no ipv6 mld query-interval [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_INTERVAL_STR
+ IGNORED_IN_NO_STR)
+{
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (ipv6_mld_group_watermark,
+ ipv6_mld_group_watermark_cmd,
+ "ipv6 mld watermark-warn (1-65535)$limit",
+ IPV6_STR
+ MLD_STR
+ "Configure group limit for watermark warning\n"
+ "Group count to generate watermark warning\n")
+{
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+ pim->gm_watermark_limit = limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_ipv6_mld_group_watermark,
+ no_ipv6_mld_group_watermark_cmd,
+ "no ipv6 mld watermark-warn [(1-65535)$limit]",
+ NO_STR
+ IPV6_STR
+ MLD_STR
+ "Unconfigure group limit for watermark warning\n"
+ IGNORED_IN_NO_STR)
+{
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+ pim->gm_watermark_limit = 0;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (interface_ipv6_mld_query_max_response_time,
+ interface_ipv6_mld_query_max_response_time_cmd,
+ "ipv6 mld query-max-response-time (1-65535)$qmrt",
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR
+ "Query response value in deci-seconds\n")
+{
+ return gm_process_query_max_response_time_cmd(vty, qmrt_str);
+}
+
+DEFPY (interface_no_ipv6_mld_query_max_response_time,
+ interface_no_ipv6_mld_query_max_response_time_cmd,
+ "no ipv6 mld query-max-response-time [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_query_max_response_time_cmd(vty);
+}
+
+DEFPY (interface_ipv6_mld_last_member_query_count,
+ interface_ipv6_mld_last_member_query_count_cmd,
+ "ipv6 mld last-member-query-count (1-255)$lmqc",
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_LAST_MEMBER_QUERY_COUNT_STR
+ "Last member query count\n")
+{
+ return gm_process_last_member_query_count_cmd(vty, lmqc_str);
+}
+
+DEFPY (interface_no_ipv6_mld_last_member_query_count,
+ interface_no_ipv6_mld_last_member_query_count_cmd,
+ "no ipv6 mld last-member-query-count [(1-255)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_LAST_MEMBER_QUERY_COUNT_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_last_member_query_count_cmd(vty);
+}
+
+DEFPY (interface_ipv6_mld_last_member_query_interval,
+ interface_ipv6_mld_last_member_query_interval_cmd,
+ "ipv6 mld last-member-query-interval (1-65535)$lmqi",
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_LAST_MEMBER_QUERY_INTERVAL_STR
+ "Last member query interval in deciseconds\n")
+{
+ return gm_process_last_member_query_interval_cmd(vty, lmqi_str);
+}
+
+DEFPY (interface_no_ipv6_mld_last_member_query_interval,
+ interface_no_ipv6_mld_last_member_query_interval_cmd,
+ "no ipv6 mld last-member-query-interval [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_LAST_MEMBER_QUERY_INTERVAL_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_last_member_query_interval_cmd(vty);
+}
+
+DEFPY (show_ipv6_pim_rp,
+ show_ipv6_pim_rp_cmd,
+ "show ipv6 pim [vrf NAME] rp-info [X:X::X:X/M$group] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM RP information\n"
+ "Multicast Group range\n"
+ JSON_STR)
+{
+ return pim_show_rp_helper(vrf, vty, group_str, (struct prefix *)group,
+ !!json);
+}
+
+DEFPY (show_ipv6_pim_rp_vrf_all,
+ show_ipv6_pim_rp_vrf_all_cmd,
+ "show ipv6 pim vrf all rp-info [X:X::X:X/M$group] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM RP information\n"
+ "Multicast Group range\n"
+ JSON_STR)
+{
+ return pim_show_rp_vrf_all_helper(vty, group_str,
+ (struct prefix *)group, !!json);
+}
+
+DEFPY (show_ipv6_pim_rpf,
+ show_ipv6_pim_rpf_cmd,
+ "show ipv6 pim [vrf NAME] rpf [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached source rpf information\n"
+ JSON_STR)
+{
+ return pim_show_rpf_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_rpf_vrf_all,
+ show_ipv6_pim_rpf_vrf_all_cmd,
+ "show ipv6 pim vrf all rpf [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached source rpf information\n"
+ JSON_STR)
+{
+ return pim_show_rpf_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_secondary,
+ show_ipv6_pim_secondary_cmd,
+ "show ipv6 pim [vrf NAME] secondary",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor addresses\n")
+{
+ return pim_show_secondary_helper(vrf, vty);
+}
+
+DEFPY (show_ipv6_pim_statistics,
+ show_ipv6_pim_statistics_cmd,
+ "show ipv6 pim [vrf NAME] statistics [interface WORD$word] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM statistics\n"
+ INTERFACE_STR
+ "PIM interface\n"
+ JSON_STR)
+{
+ return pim_show_statistics_helper(vrf, vty, word, !!json);
+}
+
+DEFPY (show_ipv6_pim_upstream,
+ show_ipv6_pim_upstream_cmd,
+ "show ipv6 pim [vrf NAME] upstream [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream information\n"
+ "The Source or Group\n"
+ "The Group\n"
+ JSON_STR)
+{
+ return pim_show_upstream_helper(vrf, vty, s_or_g, g, !!json);
+}
+
+DEFPY (show_ipv6_pim_upstream_vrf_all,
+ show_ipv6_pim_upstream_vrf_all_cmd,
+ "show ipv6 pim vrf all upstream [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream information\n"
+ JSON_STR)
+{
+ return pim_show_upstream_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_upstream_join_desired,
+ show_ipv6_pim_upstream_join_desired_cmd,
+ "show ipv6 pim [vrf NAME] upstream-join-desired [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream join-desired\n"
+ JSON_STR)
+{
+ return pim_show_upstream_join_desired_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_upstream_rpf,
+ show_ipv6_pim_upstream_rpf_cmd,
+ "show ipv6 pim [vrf NAME] upstream-rpf [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream source rpf\n"
+ JSON_STR)
+{
+ return pim_show_upstream_rpf_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_state,
+ show_ipv6_pim_state_cmd,
+ "show ipv6 pim [vrf NAME] state [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM state information\n"
+ "Unicast or Multicast address\n"
+ "Multicast address\n"
+ JSON_STR)
+{
+ return pim_show_state_helper(vrf, vty, s_or_g_str, g_str, !!json);
+}
+
+DEFPY (show_ipv6_pim_state_vrf_all,
+ show_ipv6_pim_state_vrf_all_cmd,
+ "show ipv6 pim vrf all state [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM state information\n"
+ "Unicast or Multicast address\n"
+ "Multicast address\n"
+ JSON_STR)
+{
+ return pim_show_state_vrf_all_helper(vty, s_or_g_str, g_str, !!json);
+}
+
+DEFPY (show_ipv6_pim_channel,
+ show_ipv6_pim_channel_cmd,
+ "show ipv6 pim [vrf NAME] channel [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM downstream channel info\n"
+ JSON_STR)
+{
+ return pim_show_channel_cmd_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_interface,
+ show_ipv6_pim_interface_cmd,
+ "show ipv6 pim [vrf NAME] interface [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_cmd_helper(vrf, vty, !!json, false,
+ interface);
+}
+
+DEFPY (show_ipv6_pim_interface_vrf_all,
+ show_ipv6_pim_interface_vrf_all_cmd,
+ "show ipv6 pim vrf all interface [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_vrf_all_cmd_helper(vty, !!json, false,
+ interface);
+}
+
+DEFPY (show_ipv6_pim_join,
+ show_ipv6_pim_join_cmd,
+ "show ipv6 pim [vrf NAME] join [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface join information\n"
+ "The Source or Group\n"
+ "The Group\n"
+ JSON_STR)
+{
+ return pim_show_join_cmd_helper(vrf, vty, s_or_g, g, json);
+}
+
+DEFPY (show_ipv6_pim_join_vrf_all,
+ show_ipv6_pim_join_vrf_all_cmd,
+ "show ipv6 pim vrf all join [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface join information\n"
+ JSON_STR)
+{
+ return pim_show_join_vrf_all_cmd_helper(vty, json);
+}
+
+DEFPY (show_ipv6_pim_jp_agg,
+ show_ipv6_pim_jp_agg_cmd,
+ "show ipv6 pim [vrf NAME] jp-agg",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "join prune aggregation list\n")
+{
+ return pim_show_jp_agg_list_cmd_helper(vrf, vty);
+}
+
+DEFPY (show_ipv6_pim_local_membership,
+ show_ipv6_pim_local_membership_cmd,
+ "show ipv6 pim [vrf NAME] local-membership [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface local-membership\n"
+ JSON_STR)
+{
+ return pim_show_membership_cmd_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_neighbor,
+ show_ipv6_pim_neighbor_cmd,
+ "show ipv6 pim [vrf NAME] neighbor [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor information\n"
+ "Detailed output\n"
+ "Name of interface or neighbor\n"
+ JSON_STR)
+{
+ return pim_show_neighbors_cmd_helper(vrf, vty, json, interface);
+}
+
+DEFPY (show_ipv6_pim_neighbor_vrf_all,
+ show_ipv6_pim_neighbor_vrf_all_cmd,
+ "show ipv6 pim vrf all neighbor [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor information\n"
+ "Detailed output\n"
+ "Name of interface or neighbor\n"
+ JSON_STR)
+{
+ return pim_show_neighbors_vrf_all_cmd_helper(vty, json, interface);
+}
+
+DEFPY (show_ipv6_pim_nexthop,
+ show_ipv6_pim_nexthop_cmd,
+ "show ipv6 pim [vrf NAME] nexthop [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached nexthop rpf information\n"
+ JSON_STR)
+{
+ return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_nexthop_lookup,
+ show_ipv6_pim_nexthop_lookup_cmd,
+ "show ipv6 pim [vrf NAME] nexthop-lookup X:X::X:X$source X:X::X:X$group",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached nexthop rpf lookup\n"
+ "Source/RP address\n"
+ "Multicast Group address\n")
+{
+ return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group);
+}
+
+DEFPY (show_ipv6_multicast,
+ show_ipv6_multicast_cmd,
+ "show ipv6 multicast [vrf NAME]",
+ SHOW_STR
+ IPV6_STR
+ "Multicast global information\n"
+ VRF_CMD_HELP_STR)
+{
+ return pim_show_multicast_helper(vrf, vty);
+}
+
+DEFPY (show_ipv6_multicast_vrf_all,
+ show_ipv6_multicast_vrf_all_cmd,
+ "show ipv6 multicast vrf all",
+ SHOW_STR
+ IPV6_STR
+ "Multicast global information\n"
+ VRF_CMD_HELP_STR)
+{
+ return pim_show_multicast_vrf_all_helper(vty);
+}
+
+DEFPY (show_ipv6_multicast_count,
+ show_ipv6_multicast_count_cmd,
+ "show ipv6 multicast count [vrf NAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ "Multicast global information\n"
+ "Data packet count\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_multicast_count_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_multicast_count_vrf_all,
+ show_ipv6_multicast_count_vrf_all_cmd,
+ "show ipv6 multicast count vrf all [json$json]",
+ SHOW_STR
+ IPV6_STR
+ "Multicast global information\n"
+ "Data packet count\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_multicast_count_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ipv6_mroute,
+ show_ipv6_mroute_cmd,
+ "show ipv6 mroute [vrf NAME] [X:X::X:X$s_or_g [X:X::X:X$g]] [fill$fill] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "The Source or Group\n"
+ "The Group\n"
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_helper(vrf, vty, s_or_g, g, !!fill, !!json);
+}
+
+DEFPY (show_ipv6_mroute_vrf_all,
+ show_ipv6_mroute_vrf_all_cmd,
+ "show ipv6 mroute vrf all [fill$fill] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_vrf_all_helper(vty, !!fill, !!json);
+}
+
+DEFPY (show_ipv6_mroute_count,
+ show_ipv6_mroute_count_cmd,
+ "show ipv6 mroute [vrf NAME] count [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_count_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_mroute_count_vrf_all,
+ show_ipv6_mroute_count_vrf_all_cmd,
+ "show ipv6 mroute vrf all count [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_count_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ipv6_mroute_summary,
+ show_ipv6_mroute_summary_cmd,
+ "show ipv6 mroute [vrf NAME] summary [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ return pim_show_mroute_summary_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_mroute_summary_vrf_all,
+ show_ipv6_mroute_summary_vrf_all_cmd,
+ "show ipv6 mroute vrf all summary [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ return pim_show_mroute_summary_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_interface_traffic,
+ show_ipv6_pim_interface_traffic_cmd,
+ "show ipv6 pim [vrf NAME] interface traffic [WORD$if_name] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Protocol Packet counters\n"
+ "Interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_traffic_helper(vrf, if_name, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_bsr,
+ show_ipv6_pim_bsr_cmd,
+ "show ipv6 pim bsr [vrf NAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "boot-strap router information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_bsr_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_bsm_db,
+ show_ipv6_pim_bsm_db_cmd,
+ "show ipv6 pim bsm-database [vrf NAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "PIM cached bsm packets information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_bsm_db_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ipv6_pim_bsrp,
+ show_ipv6_pim_bsrp_cmd,
+ "show ipv6 pim bsrp-info [vrf NAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "PIM cached group-rp mappings information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
+}
+
+DEFPY (clear_ipv6_pim_statistics,
+ clear_ipv6_pim_statistics_cmd,
+ "clear ipv6 pim statistics [vrf NAME]$name",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM statistics\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_statistics(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_interface_traffic,
+ clear_ipv6_pim_interface_traffic_cmd,
+ "clear ipv6 pim [vrf NAME] interface traffic",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM interfaces\n"
+ "Reset Protocol Packet counters\n")
+{
+ return clear_pim_interface_traffic(vrf, vty);
+}
+
+DEFPY (clear_ipv6_mroute,
+ clear_ipv6_mroute_cmd,
+ "clear ipv6 mroute [vrf NAME]$name",
+ CLEAR_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR)
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_mroute(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_oil,
+ clear_ipv6_pim_oil_cmd,
+ "clear ipv6 pim [vrf NAME]$name oil",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Rescan PIMv6 OIL (output interface list)\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_scan_oil(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_mroute_count,
+ clear_ipv6_mroute_count_cmd,
+ "clear ipv6 mroute [vrf NAME]$name count",
+ CLEAR_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n")
+{
+ return clear_ip_mroute_count_command(vty, name);
+}
+
+DEFPY (clear_ipv6_pim_interfaces,
+ clear_ipv6_pim_interfaces_cmd,
+ "clear ipv6 pim [vrf NAME] interfaces",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM interfaces\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_interfaces(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_bsr_db,
+ clear_ipv6_pim_bsr_db_cmd,
+ "clear ipv6 pim [vrf NAME] bsr-data",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset pim bsr data\n")
+{
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+ if (!v)
+ return CMD_WARNING;
+
+ pim_bsm_clear(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6,
+ debug_pimv6_cmd,
+ "[no] debug pimv6",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR)
+{
+ if (!no)
+ return pim_debug_pim_cmd();
+ else
+ return pim_no_debug_pim_cmd();
+}
+
+DEFPY (debug_pimv6_nht,
+ debug_pimv6_nht_cmd,
+ "[no] debug pimv6 nht",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ "Nexthop Tracking\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT;
+ else
+ PIM_DONT_DEBUG_PIM_NHT;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_nht_det,
+ debug_pimv6_nht_det_cmd,
+ "[no] debug pimv6 nht detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ "Nexthop Tracking\n"
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_NHT_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_events,
+ debug_pimv6_events_cmd,
+ "[no] debug pimv6 events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_EVENTS_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_EVENTS;
+ else
+ PIM_DONT_DEBUG_PIM_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_packets,
+ debug_pimv6_packets_cmd,
+ "[no] debug pimv6 packets [<hello$hello|joins$joins|register$registers>]",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETS_STR
+ DEBUG_PIMV6_HELLO_PACKETS_STR
+ DEBUG_PIMV6_J_P_PACKETS_STR
+ DEBUG_PIMV6_PIM_REG_PACKETS_STR)
+{
+ if (!no)
+ return pim_debug_pim_packets_cmd(hello, joins, registers, vty);
+ else
+ return pim_no_debug_pim_packets_cmd(hello, joins, registers,
+ vty);
+}
+
+DEFPY (debug_pimv6_packetdump_send,
+ debug_pimv6_packetdump_send_cmd,
+ "[no] debug pimv6 packet-dump send",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETDUMP_STR
+ DEBUG_PIMV6_PACKETDUMP_SEND_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_packetdump_recv,
+ debug_pimv6_packetdump_recv_cmd,
+ "[no] debug pimv6 packet-dump receive",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETDUMP_STR
+ DEBUG_PIMV6_PACKETDUMP_RECV_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_trace,
+ debug_pimv6_trace_cmd,
+ "[no] debug pimv6 trace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_TRACE_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_trace_detail,
+ debug_pimv6_trace_detail_cmd,
+ "[no] debug pimv6 trace detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_TRACE_STR
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_zebra,
+ debug_pimv6_zebra_cmd,
+ "[no] debug pimv6 zebra",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_ZEBRA_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_ZEBRA;
+ else
+ PIM_DONT_DEBUG_ZEBRA;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mroute6,
+ debug_mroute6_cmd,
+ "[no] debug mroute6",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MROUTE6_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_MROUTE;
+ else
+ PIM_DONT_DEBUG_MROUTE;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mroute6_detail,
+ debug_mroute6_detail_cmd,
+ "[no] debug mroute6 detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MROUTE6_STR
+ "detailed\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_MROUTE_DETAIL;
+ else
+ PIM_DONT_DEBUG_MROUTE_DETAIL;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (show_debugging_pimv6,
+ show_debugging_pimv6_cmd,
+ "show debugging [pimv6]",
+ SHOW_STR
+ DEBUG_STR
+ "PIMv6 Information\n")
+{
+ vty_out(vty, "PIMv6 debugging status\n");
+
+ pim_debug_config_write(vty);
+
+ cmd_show_lib_debugs(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld,
+ debug_mld_cmd,
+ "[no] debug mld",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MLD_STR)
+{
+ if (!no) {
+ PIM_DO_DEBUG_GM_EVENTS;
+ PIM_DO_DEBUG_GM_PACKETS;
+ PIM_DO_DEBUG_GM_TRACE;
+ } else {
+ PIM_DONT_DEBUG_GM_EVENTS;
+ PIM_DONT_DEBUG_GM_PACKETS;
+ PIM_DONT_DEBUG_GM_TRACE;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_events,
+ debug_mld_events_cmd,
+ "[no] debug mld events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MLD_STR
+ DEBUG_MLD_EVENTS_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_GM_EVENTS;
+ else
+ PIM_DONT_DEBUG_GM_EVENTS;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_packets,
+ debug_mld_packets_cmd,
+ "[no] debug mld packets",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MLD_STR
+ DEBUG_MLD_PACKETS_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_GM_PACKETS;
+ else
+ PIM_DONT_DEBUG_GM_PACKETS;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_trace,
+ debug_mld_trace_cmd,
+ "[no] debug mld trace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MLD_STR
+ DEBUG_MLD_TRACE_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_GM_TRACE;
+ else
+ PIM_DONT_DEBUG_GM_TRACE;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_mld_trace_detail,
+ debug_mld_trace_detail_cmd,
+ "[no] debug mld trace detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MLD_STR
+ DEBUG_MLD_TRACE_STR
+ "detailed\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_GM_TRACE_DETAIL;
+ else
+ PIM_DONT_DEBUG_GM_TRACE_DETAIL;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_bsm,
+ debug_pimv6_bsm_cmd,
+ "[no] debug pimv6 bsm",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_BSM_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_BSM;
+ else
+ PIM_DONT_DEBUG_BSM;
+
+ return CMD_SUCCESS;
+}
+
+void pim_cmd_init(void)
+{
+ if_cmd_init(pim_interface_config_write);
+
+ install_node(&debug_node);
+
+ install_element(CONFIG_NODE, &ipv6_pim_joinprune_time_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_joinprune_time_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_spt_switchover_infinity_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_spt_switchover_infinity_plist_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_spt_switchover_infinity_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_spt_switchover_infinity_plist_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_packets_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_packets_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_keep_alive_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_keep_alive_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_rp_keep_alive_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_rp_keep_alive_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_register_suppress_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_register_suppress_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_activeactive_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_sm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_sm_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ipv6_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mroute_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mroute_cmd);
+ /* Install BSM command */
+ install_element(INTERFACE_NODE, &ipv6_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ipv6_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &ipv6_pim_ucast_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ipv6_pim_ucast_bsm_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_rp_cmd);
+ install_element(VRF_NODE, &ipv6_pim_rp_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_rp_cmd);
+ install_element(VRF_NODE, &no_ipv6_pim_rp_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &ipv6_pim_rp_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &no_ipv6_pim_rp_prefix_list_cmd);
+ install_element(CONFIG_NODE, &ipv6_ssmpingd_cmd);
+ install_element(VRF_NODE, &ipv6_ssmpingd_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_ssmpingd_cmd);
+ install_element(VRF_NODE, &no_ipv6_ssmpingd_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_join_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_join_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_mld_query_interval_cmd);
+ install_element(CONFIG_NODE, &ipv6_mld_group_watermark_cmd);
+ install_element(VRF_NODE, &ipv6_mld_group_watermark_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_mld_group_watermark_cmd);
+ install_element(VRF_NODE, &no_ipv6_mld_group_watermark_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ipv6_mld_query_max_response_time_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_mld_query_max_response_time_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ipv6_mld_last_member_query_count_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_mld_last_member_query_count_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ipv6_mld_last_member_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_mld_last_member_query_interval_cmd);
+
+ install_element(VIEW_NODE, &show_ipv6_pim_rp_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_rp_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_rpf_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_rpf_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_secondary_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_statistics_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_upstream_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_upstream_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_upstream_join_desired_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_upstream_rpf_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_state_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_state_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_channel_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_interface_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_interface_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_join_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_join_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_jp_agg_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_local_membership_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_neighbor_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_neighbor_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_nexthop_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ipv6_multicast_cmd);
+ install_element(VIEW_NODE, &show_ipv6_multicast_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_multicast_count_cmd);
+ install_element(VIEW_NODE, &show_ipv6_multicast_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_count_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_summary_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_summary_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_interface_traffic_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_bsr_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_bsm_db_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_bsrp_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_statistics_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_mroute_count_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_bsr_db_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_interfaces_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_interface_traffic_cmd);
+
+ install_element(ENABLE_NODE, &show_debugging_pimv6_cmd);
+
+ install_element(ENABLE_NODE, &debug_pimv6_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_nht_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_nht_det_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_events_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packets_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packetdump_send_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packetdump_recv_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_trace_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_trace_detail_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_zebra_cmd);
+ install_element(ENABLE_NODE, &debug_mroute6_cmd);
+ install_element(ENABLE_NODE, &debug_mroute6_detail_cmd);
+ install_element(ENABLE_NODE, &debug_mld_cmd);
+ install_element(ENABLE_NODE, &debug_mld_events_cmd);
+ install_element(ENABLE_NODE, &debug_mld_packets_cmd);
+ install_element(ENABLE_NODE, &debug_mld_trace_cmd);
+ install_element(ENABLE_NODE, &debug_mld_trace_detail_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_bsm_cmd);
+
+ install_element(CONFIG_NODE, &debug_pimv6_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_nht_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_nht_det_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_events_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packets_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packetdump_send_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packetdump_recv_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_trace_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_trace_detail_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_zebra_cmd);
+ install_element(CONFIG_NODE, &debug_mroute6_cmd);
+ install_element(CONFIG_NODE, &debug_mroute6_detail_cmd);
+ install_element(CONFIG_NODE, &debug_mld_cmd);
+ install_element(CONFIG_NODE, &debug_mld_events_cmd);
+ install_element(CONFIG_NODE, &debug_mld_packets_cmd);
+ install_element(CONFIG_NODE, &debug_mld_trace_cmd);
+ install_element(CONFIG_NODE, &debug_mld_trace_detail_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_bsm_cmd);
+}
diff --git a/pimd/pim6_cmd.h b/pimd/pim6_cmd.h
new file mode 100644
index 0000000..201d8d6
--- /dev/null
+++ b/pimd/pim6_cmd.h
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for IPv6 FRR
+ * Copyright (C) 2022 Vmware, Inc.
+ * Mobashshera Rasool <mrasool@vmware.com>
+ */
+#ifndef PIM6_CMD_H
+#define PIM6_CMD_H
+
+#define PIM_STR "PIM information\n"
+#define MLD_STR "MLD information\n"
+#define MLD_GROUP_STR "MLD groups information\n"
+#define MLD_SOURCE_STR "MLD sources information\n"
+#define IFACE_MLD_STR "Enable MLD operation\n"
+#define IFACE_MLD_QUERY_INTERVAL_STR "MLD host query interval\n"
+#define IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR \
+ "MLD max query response value (seconds)\n"
+#define IFACE_MLD_QUERY_MAX_RESPONSE_TIME_DSEC_STR \
+ "MLD max query response value (deciseconds)\n"
+#define IFACE_MLD_LAST_MEMBER_QUERY_INTERVAL_STR \
+ "MLD last member query interval\n"
+#define IFACE_MLD_LAST_MEMBER_QUERY_COUNT_STR "MLD last member query count\n"
+#define IFACE_PIM_STR "Enable PIM SSM operation\n"
+#define IFACE_PIM_SM_STR "Enable PIM SM operation\n"
+#define IFACE_PIM_HELLO_STR "Hello Interval\n"
+#define IFACE_PIM_HELLO_TIME_STR "Time in seconds for Hello Interval\n"
+#define IFACE_PIM_HELLO_HOLD_STR "Time in seconds for Hold Interval\n"
+#define MROUTE_STR "IP multicast routing table\n"
+#define CLEAR_IP_PIM_STR "PIM clear commands\n"
+#define DEBUG_MLD_STR "MLD protocol activity\n"
+#define DEBUG_MLD_EVENTS_STR "MLD protocol events\n"
+#define DEBUG_MLD_PACKETS_STR "MLD protocol packets\n"
+#define DEBUG_MLD_TRACE_STR "MLD internal daemon activity\n"
+#define CONF_SSMPINGD_STR "Enable ssmpingd operation\n"
+#define DEBUG_PIMV6_STR "PIMv6 protocol activity\n"
+#define DEBUG_PIMV6_EVENTS_STR "PIMv6 protocol events\n"
+#define DEBUG_PIMV6_PACKETS_STR "PIMv6 protocol packets\n"
+#define DEBUG_PIMV6_HELLO_PACKETS_STR "PIMv6 Hello protocol packets\n"
+#define DEBUG_PIMV6_J_P_PACKETS_STR "PIMv6 Join/Prune protocol packets\n"
+#define DEBUG_PIMV6_PIM_REG_PACKETS_STR \
+ "PIMv6 Register/Reg-Stop protocol packets\n"
+#define DEBUG_PIMV6_PACKETDUMP_STR "PIMv6 packet dump\n"
+#define DEBUG_PIMV6_PACKETDUMP_SEND_STR "Dump sent packets\n"
+#define DEBUG_PIMV6_PACKETDUMP_RECV_STR "Dump received packets\n"
+#define DEBUG_PIMV6_TRACE_STR "PIMv6 internal daemon activity\n"
+#define DEBUG_PIMV6_ZEBRA_STR "ZEBRA protocol activity\n"
+#define DEBUG_MROUTE6_STR "PIMv6 interaction with kernel MFC cache\n"
+#define DEBUG_PIMV6_BSM_STR "BSR message processing activity\n"
+
+void pim_cmd_init(void);
+
+#endif /* PIM6_CMD_H */
diff --git a/pimd/pim6_main.c b/pimd/pim6_main.c
new file mode 100644
index 0000000..1af4a17
--- /dev/null
+++ b/pimd/pim6_main.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIMv6 main()
+ * Copyright (C) 2021 David Lamparter for NetDEF, Inc.
+ * Copyright (C) 2008 Everton da Silva Marques (pim_main.c)
+ */
+
+#include <zebra.h>
+
+#include "lib/vrf.h"
+#include "lib/filter.h"
+#include "lib/plist.h"
+#include "lib/routemap.h"
+#include "lib/routing_nb.h"
+
+#include "lib/privs.h"
+#include "lib/sigevent.h"
+#include "lib/libfrr.h"
+#include "lib/version.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_errors.h"
+#include "pim_iface.h"
+#include "pim_zebra.h"
+#include "pim_nb.h"
+#include "pim6_cmd.h"
+#include "pim6_mld.h"
+
+zebra_capabilities_t _caps_p[] = {
+ ZCAP_SYS_ADMIN,
+ ZCAP_NET_ADMIN,
+ ZCAP_NET_RAW,
+ ZCAP_BIND,
+};
+
+/* pimd privileges to run with */
+struct zebra_privs_t pimd_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0,
+};
+
+static void pim6_terminate(void);
+
+static void pim6_sighup(void)
+{
+ zlog_info("SIGHUP received, ignoring");
+}
+
+static void pim6_sigint(void)
+{
+ zlog_notice("Terminating on signal SIGINT");
+ pim6_terminate();
+ exit(1);
+}
+
+static void pim6_sigterm(void)
+{
+ zlog_notice("Terminating on signal SIGTERM");
+ pim6_terminate();
+ exit(1);
+}
+
+static void pim6_sigusr1(void)
+{
+ zlog_rotate();
+}
+
+struct frr_signal_t pim6d_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &pim6_sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &pim6_sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &pim6_sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &pim6_sigterm,
+ },
+};
+
+static const struct frr_yang_module_info *const pim6d_yang_modules[] = {
+ &frr_filter_info,
+ &frr_interface_info,
+ &frr_route_map_info,
+ &frr_vrf_info,
+ &frr_routing_info,
+ &frr_pim_info,
+ &frr_pim_rp_info,
+ &frr_gmp_info,
+};
+
+/* clang-format off */
+FRR_DAEMON_INFO(pim6d, PIM6,
+ .vty_port = PIM6D_VTY_PORT,
+ .proghelp = "Protocol Independent Multicast (RFC7761) for IPv6",
+
+ .signals = pim6d_signals,
+ .n_signals = array_size(pim6d_signals),
+
+ .privs = &pimd_privs,
+
+ .yang_modules = pim6d_yang_modules,
+ .n_yang_modules = array_size(pim6d_yang_modules),
+);
+/* clang-format on */
+
+int main(int argc, char **argv, char **envp)
+{
+ static struct option longopts[] = {
+ {},
+ };
+
+ frr_preinit(&pim6d_di, argc, argv);
+ frr_opt_add("", longopts, "");
+
+ /* this while just reads the options */
+ while (1) {
+ int opt;
+
+ opt = frr_getopt(argc, argv, NULL);
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ default:
+ frr_help_exit(1);
+ }
+ }
+
+ pim_router_init();
+
+ access_list_init();
+ prefix_list_init();
+
+ /*
+ * Initializations
+ */
+ pim_error_init();
+ pim_vrf_init();
+#if 0
+ prefix_list_add_hook(pim_prefix_list_update);
+ prefix_list_delete_hook(pim_prefix_list_update);
+
+ pim_route_map_init();
+#endif
+ pim_init();
+ /*
+ * Initialize zclient "update" and "lookup" sockets
+ */
+ pim_iface_init();
+
+ gm_cli_init();
+
+ pim_zebra_init();
+#if 0
+ pim_bfd_init();
+ pim_mlag_init();
+#endif
+
+ hook_register(routing_conf_event,
+ routing_control_plane_protocols_name_validate);
+
+ routing_control_plane_protocols_register_vrf_dependency();
+
+ frr_config_fork();
+ frr_run(router->master);
+
+ /* never reached */
+ return 0;
+}
+
+static void pim6_terminate(void)
+{
+ pim_vrf_terminate();
+ pim_router_terminate();
+
+ prefix_list_reset();
+ access_list_reset();
+
+ frr_fini();
+}
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
new file mode 100644
index 0000000..20ef921
--- /dev/null
+++ b/pimd/pim6_mld.c
@@ -0,0 +1,3232 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIMv6 MLD querier
+ * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
+ */
+
+/*
+ * keep pim6_mld.h open when working on this code. Most data structures are
+ * commented in the header.
+ *
+ * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
+ * that this code will replace the old IGMP querier at some point.
+ */
+
+#include <zebra.h>
+#include <netinet/ip6.h>
+
+#include "lib/memory.h"
+#include "lib/jhash.h"
+#include "lib/prefix.h"
+#include "lib/checksum.h"
+#include "lib/frrevent.h"
+#include "termtable.h"
+
+#include "pimd/pim6_mld.h"
+#include "pimd/pim6_mld_protocol.h"
+#include "pimd/pim_memory.h"
+#include "pimd/pim_instance.h"
+#include "pimd/pim_iface.h"
+#include "pimd/pim6_cmd.h"
+#include "pimd/pim_cmd_common.h"
+#include "pimd/pim_util.h"
+#include "pimd/pim_tib.h"
+#include "pimd/pimd.h"
+
+#ifndef IPV6_MULTICAST_ALL
+#define IPV6_MULTICAST_ALL 29
+#endif
+
+DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
+DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
+DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
+DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
+DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
+DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
+DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
+
+static void gm_t_query(struct event *t);
+static void gm_trigger_specific(struct gm_sg *sg);
+static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval expire_wait);
+
+/* shorthand for log messages */
+#define log_ifp(msg) \
+ "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
+#define log_pkt_src(msg) \
+ "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
+ &pkt_src->sin6_addr
+#define log_sg(sg, msg) \
+ "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
+ sg->iface->ifp->name, &sg->sgaddr
+
+/* clang-format off */
+#if PIM_IPV == 6
+static const pim_addr gm_all_hosts = {
+ .s6_addr = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+};
+static const pim_addr gm_all_routers = {
+ .s6_addr = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ },
+};
+/* MLDv1 does not allow subscriber tracking due to report suppression
+ * hence, the source address is replaced with ffff:...:ffff
+ */
+static const pim_addr gm_dummy_untracked = {
+ .s6_addr = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+};
+#else
+/* 224.0.0.1 */
+static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
+/* 224.0.0.22 */
+static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
+static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
+#endif
+/* clang-format on */
+
+#define IPV6_MULTICAST_SCOPE_LINK 2
+
+static inline uint8_t in6_multicast_scope(const pim_addr *addr)
+{
+ return addr->s6_addr[1] & 0xf;
+}
+
+bool in6_multicast_nofwd(const pim_addr *addr)
+{
+ return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
+}
+
+/*
+ * (S,G) -> subscriber,(S,G)
+ */
+
+static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
+ const struct gm_packet_sg *b)
+{
+ const struct gm_packet_state *s_a, *s_b;
+
+ s_a = gm_packet_sg2state(a);
+ s_b = gm_packet_sg2state(b);
+ return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
+}
+
+DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
+ gm_packet_sg_cmp);
+
+static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
+ enum gm_sub_sense sense,
+ struct gm_subscriber *sub)
+{
+ struct {
+ struct gm_packet_state hdr;
+ struct gm_packet_sg item;
+ } ref = {
+ /* clang-format off */
+ .hdr = {
+ .subscriber = sub,
+ },
+ .item = {
+ .offset = 0,
+ },
+ /* clang-format on */
+ };
+
+ return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
+}
+
+/*
+ * interface -> (*,G),pending
+ */
+
+static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
+ const struct gm_grp_pending *b)
+{
+ return IPV6_ADDR_CMP(&a->grp, &b->grp);
+}
+
+DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
+ gm_grp_pending_cmp);
+
+/*
+ * interface -> ([S1,S2,...],G),pending
+ */
+
+static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
+ const struct gm_gsq_pending *b)
+{
+ if (a->s_bit != b->s_bit)
+ return numcmp(a->s_bit, b->s_bit);
+
+ return IPV6_ADDR_CMP(&a->grp, &b->grp);
+}
+
+static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
+{
+ uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
+
+ return jhash(&a->grp, sizeof(a->grp), seed);
+}
+
+DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
+ gm_gsq_pending_hash);
+
+/*
+ * interface -> (S,G)
+ */
+
+int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
+{
+ return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
+}
+
+static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
+ pim_addr src)
+{
+ struct gm_sg ref = {};
+
+ ref.sgaddr.grp = grp;
+ ref.sgaddr.src = src;
+ return gm_sgs_find(gm_ifp->sgs, &ref);
+}
+
+static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
+ pim_addr src)
+{
+ struct gm_sg *ret, *prev;
+
+ ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
+ ret->sgaddr.grp = grp;
+ ret->sgaddr.src = src;
+ ret->iface = gm_ifp;
+ prev = gm_sgs_add(gm_ifp->sgs, ret);
+
+ if (prev) {
+ XFREE(MTYPE_GM_SG, ret);
+ ret = prev;
+ } else {
+ monotime(&ret->created);
+ gm_packet_sg_subs_init(ret->subs_positive);
+ gm_packet_sg_subs_init(ret->subs_negative);
+ }
+ return ret;
+}
+
+/*
+ * interface -> packets, sorted by expiry (because add_tail insert order)
+ */
+
+DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
+
+/*
+ * subscriber -> packets
+ */
+
+DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
+
+/*
+ * interface -> subscriber
+ */
+
+static int gm_subscriber_cmp(const struct gm_subscriber *a,
+ const struct gm_subscriber *b)
+{
+ return IPV6_ADDR_CMP(&a->addr, &b->addr);
+}
+
+static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
+{
+ return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
+}
+
+DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
+ gm_subscriber_hash);
+
+static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
+ pim_addr addr)
+{
+ struct gm_subscriber ref = {}, *ret;
+
+ ref.addr = addr;
+ ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
+ if (ret)
+ ret->refcount++;
+ return ret;
+}
+
+static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
+ pim_addr addr)
+{
+ struct gm_subscriber ref = {}, *ret;
+
+ ref.addr = addr;
+ ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
+
+ if (!ret) {
+ ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
+ ret->iface = gm_ifp;
+ ret->addr = addr;
+ ret->refcount = 1;
+ monotime(&ret->created);
+ gm_packets_init(ret->packets);
+
+ gm_subscribers_add(gm_ifp->subscribers, ret);
+ }
+ return ret;
+}
+
+static void gm_subscriber_drop(struct gm_subscriber **subp)
+{
+ struct gm_subscriber *sub = *subp;
+ struct gm_if *gm_ifp;
+
+ if (!sub)
+ return;
+ gm_ifp = sub->iface;
+
+ *subp = NULL;
+ sub->refcount--;
+
+ if (sub->refcount)
+ return;
+
+ gm_subscribers_del(gm_ifp->subscribers, sub);
+ XFREE(MTYPE_GM_SUBSCRIBER, sub);
+}
+
+/****************************************************************************/
+
+/* bundle query timer values for combined v1/v2 handling */
+struct gm_query_timers {
+ unsigned int qrv;
+ unsigned int max_resp_ms;
+ unsigned int qqic_ms;
+
+ struct timeval fuzz;
+ struct timeval expire_wait;
+};
+
+static void gm_expiry_calc(struct gm_query_timers *timers)
+{
+ unsigned int expire =
+ (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
+ ldiv_t exp_div = ldiv(expire, 1000);
+
+ timers->expire_wait.tv_sec = exp_div.quot;
+ timers->expire_wait.tv_usec = exp_div.rem * 1000;
+ timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
+}
+
+static void gm_sg_free(struct gm_sg *sg)
+{
+ /* t_sg_expiry is handled before this is reached */
+ EVENT_OFF(sg->t_sg_query);
+ gm_packet_sg_subs_fini(sg->subs_negative);
+ gm_packet_sg_subs_fini(sg->subs_positive);
+ XFREE(MTYPE_GM_SG, sg);
+}
+
+/* clang-format off */
+static const char *const gm_states[] = {
+ [GM_SG_NOINFO] = "NOINFO",
+ [GM_SG_JOIN] = "JOIN",
+ [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
+ [GM_SG_PRUNE] = "PRUNE",
+ [GM_SG_NOPRUNE] = "NOPRUNE",
+ [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
+};
+/* clang-format on */
+
+/* TODO: S,G entries in EXCLUDE (i.e. prune) unsupported" */
+
+/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
+ * joined. Whether we actually want/need to support this is a separate
+ * question - it is almost never used. In fact this is exactly what RFC5790
+ * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
+ */
+
+static void gm_sg_update(struct gm_sg *sg, bool has_expired)
+{
+ struct gm_if *gm_ifp = sg->iface;
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ enum gm_sg_state prev, desired;
+ bool new_join;
+ struct gm_sg *grp = NULL;
+
+ if (!pim_addr_is_any(sg->sgaddr.src))
+ grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
+ else
+ assert(sg->state != GM_SG_PRUNE);
+
+ if (gm_packet_sg_subs_count(sg->subs_positive)) {
+ desired = GM_SG_JOIN;
+ assert(!sg->t_sg_expire);
+ } else if ((sg->state == GM_SG_JOIN ||
+ sg->state == GM_SG_JOIN_EXPIRING) &&
+ !has_expired)
+ desired = GM_SG_JOIN_EXPIRING;
+ else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
+ desired = GM_SG_NOINFO;
+ else if (gm_packet_sg_subs_count(grp->subs_positive) ==
+ gm_packet_sg_subs_count(sg->subs_negative)) {
+ if ((sg->state == GM_SG_NOPRUNE ||
+ sg->state == GM_SG_NOPRUNE_EXPIRING) &&
+ !has_expired)
+ desired = GM_SG_NOPRUNE_EXPIRING;
+ else
+ desired = GM_SG_PRUNE;
+ } else if (gm_packet_sg_subs_count(sg->subs_negative))
+ desired = GM_SG_NOPRUNE;
+ else
+ desired = GM_SG_NOINFO;
+
+ if (desired != sg->state && !gm_ifp->stopping) {
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
+ gm_states[desired]);
+
+ if (desired == GM_SG_JOIN_EXPIRING ||
+ desired == GM_SG_NOPRUNE_EXPIRING) {
+ struct gm_query_timers timers;
+
+ timers.qrv = gm_ifp->cur_qrv;
+ timers.max_resp_ms = gm_ifp->cur_max_resp;
+ timers.qqic_ms = gm_ifp->cur_query_intv_trig;
+ timers.fuzz = gm_ifp->cfg_timing_fuzz;
+
+ gm_expiry_calc(&timers);
+ gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
+
+ EVENT_OFF(sg->t_sg_query);
+ sg->query_sbit = false;
+ /* Trigger the specific queries only for querier. */
+ if (IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest)) {
+ sg->n_query = gm_ifp->cur_lmqc;
+ gm_trigger_specific(sg);
+ }
+ }
+ }
+ prev = sg->state;
+ sg->state = desired;
+
+ if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
+ new_join = false;
+ else
+ new_join = gm_sg_state_want_join(desired);
+
+ if (new_join && !sg->tib_joined) {
+ /* this will retry if join previously failed */
+ sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
+ gm_ifp->ifp, &sg->oil);
+ if (!sg->tib_joined)
+ zlog_warn(
+ "MLD join for %pSG%%%s not propagated into TIB",
+ &sg->sgaddr, gm_ifp->ifp->name);
+ else
+ zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
+ gm_ifp->ifp->name);
+
+ } else if (sg->tib_joined && !new_join) {
+ tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
+
+ sg->oil = NULL;
+ sg->tib_joined = false;
+ }
+
+ if (desired == GM_SG_NOINFO) {
+ /* multiple paths can lead to the last state going away;
+ * t_sg_expire can still be running if we're arriving from
+ * another path.
+ */
+ if (has_expired)
+ EVENT_OFF(sg->t_sg_expire);
+
+ assertf((!sg->t_sg_expire &&
+ !gm_packet_sg_subs_count(sg->subs_positive) &&
+ !gm_packet_sg_subs_count(sg->subs_negative)),
+ "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
+ &sg->sgaddr, gm_ifp->ifp->name, has_expired,
+ sg->t_sg_expire, gm_states[prev], gm_states[desired],
+ gm_packet_sg_subs_count(sg->subs_positive),
+ gm_packet_sg_subs_count(sg->subs_negative), grp);
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_sg(sg, "dropping"));
+
+ gm_sgs_del(gm_ifp->sgs, sg);
+ gm_sg_free(sg);
+ }
+}
+
+/****************************************************************************/
+
+/* the following bunch of functions deals with transferring state from
+ * received packets into gm_packet_state. As a reminder, the querier is
+ * structured to keep all items received in one packet together, since they
+ * will share expiry timers and thus allows efficient handling.
+ */
+
+static void gm_packet_free(struct gm_packet_state *pkt)
+{
+ gm_packet_expires_del(pkt->iface->expires, pkt);
+ gm_packets_del(pkt->subscriber->packets, pkt);
+ gm_subscriber_drop(&pkt->subscriber);
+ XFREE(MTYPE_GM_STATE, pkt);
+}
+
+static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
+ struct gm_sg *sg, bool is_excl,
+ bool is_src)
+{
+ struct gm_packet_sg *item;
+
+ assert(pkt->n_active < pkt->n_sg);
+
+ item = &pkt->items[pkt->n_active];
+ item->sg = sg;
+ item->is_excl = is_excl;
+ item->is_src = is_src;
+ item->offset = pkt->n_active;
+
+ pkt->n_active++;
+ return item;
+}
+
+static bool gm_packet_sg_drop(struct gm_packet_sg *item)
+{
+ struct gm_packet_state *pkt;
+ size_t i;
+
+ assert(item->sg);
+
+ pkt = gm_packet_sg2state(item);
+ if (item->sg->most_recent == item)
+ item->sg->most_recent = NULL;
+
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *excl_item;
+
+ excl_item = item + 1 + i;
+ if (!excl_item->sg)
+ continue;
+
+ gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
+ excl_item->sg = NULL;
+ pkt->n_active--;
+
+ assert(pkt->n_active > 0);
+ }
+
+ if (item->is_excl && item->is_src)
+ gm_packet_sg_subs_del(item->sg->subs_negative, item);
+ else
+ gm_packet_sg_subs_del(item->sg->subs_positive, item);
+ item->sg = NULL;
+ pkt->n_active--;
+
+ if (!pkt->n_active) {
+ gm_packet_free(pkt);
+ return true;
+ }
+ return false;
+}
+
+static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
+{
+ for (size_t i = 0; i < pkt->n_sg; i++) {
+ struct gm_sg *sg = pkt->items[i].sg;
+ bool deleted;
+
+ if (!sg)
+ continue;
+
+ if (trace && PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_sg(sg, "general-dropping from %pPA"),
+ &pkt->subscriber->addr);
+ deleted = gm_packet_sg_drop(&pkt->items[i]);
+
+ gm_sg_update(sg, true);
+ if (deleted)
+ break;
+ }
+}
+
+static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
+ struct gm_subscriber *subscriber,
+ pim_addr grp, pim_addr *srcs,
+ size_t n_src, enum gm_sub_sense sense)
+{
+ struct gm_sg *sg;
+ struct gm_packet_sg *old_src;
+ size_t i;
+
+ for (i = 0; i < n_src; i++) {
+ sg = gm_sg_find(gm_ifp, grp, srcs[i]);
+ if (!sg)
+ continue;
+
+ old_src = gm_packet_sg_find(sg, sense, subscriber);
+ if (!old_src)
+ continue;
+
+ gm_packet_sg_drop(old_src);
+ gm_sg_update(sg, false);
+ }
+}
+
+static void gm_sg_expiry_cancel(struct gm_sg *sg)
+{
+ if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
+ EVENT_OFF(sg->t_sg_expire);
+ sg->query_sbit = true;
+}
+
+/* first pass: process all changes resulting in removal of state:
+ * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
+ * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
+ * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
+ * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
+ * note *replacing* state is NOT considered *removing* state here
+ *
+ * everything else is thrown into pkt for creation of state in pass 2
+ */
+static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
+ struct mld_v2_rec_hdr *rechdr, size_t n_src)
+{
+ /* NB: pkt->subscriber can be NULL here if the subscriber was not
+ * previously seen!
+ */
+ struct gm_subscriber *subscriber = pkt->subscriber;
+ struct gm_sg *grp;
+ struct gm_packet_sg *old_grp = NULL;
+ struct gm_packet_sg *item;
+ size_t j;
+ bool is_excl = false;
+
+ grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
+ if (grp && subscriber)
+ old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
+
+ assert(old_grp == NULL || old_grp->is_excl);
+
+ switch (rechdr->type) {
+ case MLD_RECTYPE_IS_EXCLUDE:
+ case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
+ /* this always replaces or creates state */
+ is_excl = true;
+ if (!grp)
+ grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
+
+ item = gm_packet_sg_setup(pkt, grp, is_excl, false);
+ item->n_exclude = n_src;
+
+ /* [EXCL_INCL_SG_NOTE] referenced below
+ *
+ * in theory, we should drop any S,G that the host may have
+ * previously added in INCLUDE mode. In practice, this is both
+ * incredibly rare and entirely irrelevant. It only makes any
+ * difference if an S,G that the host previously had on the
+ * INCLUDE list is now on the blocked list for EXCLUDE, which
+ * we can cover in processing the S,G list in pass2_excl().
+ *
+ * Other S,G from the host are simply left to expire
+ * "naturally" through general expiry.
+ */
+ break;
+
+ case MLD_RECTYPE_IS_INCLUDE:
+ case MLD_RECTYPE_CHANGE_TO_INCLUDE:
+ if (old_grp) {
+ /* INCLUDE has no *,G state, so old_grp here refers to
+ * previous EXCLUDE => delete it
+ */
+ gm_packet_sg_drop(old_grp);
+ gm_sg_update(grp, false);
+/* TODO "need S,G PRUNE => NO_INFO transition here" */
+ }
+ break;
+
+ case MLD_RECTYPE_ALLOW_NEW_SOURCES:
+ if (old_grp) {
+ /* remove S,Gs from EXCLUDE, and then we're done */
+ gm_packet_sg_remove_sources(pkt->iface, subscriber,
+ rechdr->grp, rechdr->srcs,
+ n_src, GM_SUB_NEG);
+ return;
+ }
+ /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
+ * idential to IS_INCLUDE (because the list of sources in
+ * IS_INCLUDE is not exhaustive)
+ */
+ break;
+
+ case MLD_RECTYPE_BLOCK_OLD_SOURCES:
+ if (old_grp) {
+ /* this is intentionally not implemented because it
+ * would be complicated as hell. we only take the list
+ * of blocked sources from full group state records
+ */
+ return;
+ }
+
+ if (subscriber)
+ gm_packet_sg_remove_sources(pkt->iface, subscriber,
+ rechdr->grp, rechdr->srcs,
+ n_src, GM_SUB_POS);
+ return;
+ }
+
+ for (j = 0; j < n_src; j++) {
+ struct gm_sg *sg;
+
+ sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
+ if (!sg)
+ sg = gm_sg_make(pkt->iface, rechdr->grp,
+ rechdr->srcs[j]);
+
+ gm_packet_sg_setup(pkt, sg, is_excl, true);
+ }
+}
+
+/* second pass: creating/updating/refreshing state. All the items from the
+ * received packet have already been thrown into gm_packet_state.
+ */
+
+static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
+{
+ struct gm_packet_sg *item = &pkt->items[i];
+ struct gm_packet_sg *old = NULL;
+ struct gm_sg *sg = item->sg;
+
+ /* EXCLUDE state was already dropped in pass1 */
+ assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
+
+ old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
+ if (old)
+ gm_packet_sg_drop(old);
+
+ pkt->n_active++;
+ gm_packet_sg_subs_add(sg->subs_positive, item);
+
+ sg->most_recent = item;
+ gm_sg_expiry_cancel(sg);
+ gm_sg_update(sg, false);
+}
+
+static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
+{
+ struct gm_packet_sg *item = &pkt->items[offs];
+ struct gm_packet_sg *old_grp, *item_dup;
+ struct gm_sg *sg_grp = item->sg;
+ size_t i;
+
+ old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
+ if (old_grp) {
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *item_src, *old_src;
+
+ item_src = &pkt->items[offs + 1 + i];
+ old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
+ pkt->subscriber);
+ if (old_src)
+ gm_packet_sg_drop(old_src);
+
+ /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
+ * items left over if the host previously had INCLUDE
+ * mode going. Remove them here if we find any.
+ */
+ old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
+ pkt->subscriber);
+ if (old_src)
+ gm_packet_sg_drop(old_src);
+ }
+
+ /* the previous loop has removed the S,G entries which are
+ * still excluded after this update. So anything left on the
+ * old item was previously excluded but is now included
+ * => need to trigger update on S,G
+ */
+ for (i = 0; i < old_grp->n_exclude; i++) {
+ struct gm_packet_sg *old_src;
+ struct gm_sg *old_sg_src;
+
+ old_src = old_grp + 1 + i;
+ old_sg_src = old_src->sg;
+ if (!old_sg_src)
+ continue;
+
+ gm_packet_sg_drop(old_src);
+ gm_sg_update(old_sg_src, false);
+ }
+
+ gm_packet_sg_drop(old_grp);
+ }
+
+ item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
+ assert(!item_dup);
+ pkt->n_active++;
+
+ sg_grp->most_recent = item;
+ gm_sg_expiry_cancel(sg_grp);
+
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *item_src;
+
+ item_src = &pkt->items[offs + 1 + i];
+ item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
+ item_src);
+
+ if (item_dup)
+ item_src->sg = NULL;
+ else {
+ pkt->n_active++;
+ gm_sg_update(item_src->sg, false);
+ }
+ }
+
+ /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
+ * to get lower PIM churn/flapping
+ */
+ gm_sg_update(sg_grp, false);
+}
+
+/* TODO: QRV/QQIC are not copied from queries to local state" */
+
+/* on receiving a query, we need to update our robustness/query interval to
+ * match, so we correctly process group/source specific queries after last
+ * member leaves
+ */
+
+static void gm_handle_v2_report(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v2_report_hdr *hdr;
+ size_t i, n_records, max_entries;
+ struct gm_packet_state *pkt;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv2 report (truncated header)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ hdr = (struct mld_v2_report_hdr *)data;
+ data += sizeof(*hdr);
+ len -= sizeof(*hdr);
+
+ n_records = ntohs(hdr->n_records);
+ if (n_records > len / sizeof(struct mld_v2_rec_hdr)) {
+ /* note this is only an upper bound, records with source lists
+ * are larger. This is mostly here to make coverity happy.
+ */
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (infeasible record count)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ /* errors after this may at least partially process the packet */
+ gm_ifp->stats.rx_new_report++;
+
+ /* can't have more *,G and S,G items than there is space for ipv6
+ * addresses, so just use this to allocate temporary buffer
+ */
+ max_entries = len / sizeof(pim_addr);
+ pkt = XCALLOC(MTYPE_GM_STATE,
+ offsetof(struct gm_packet_state, items[max_entries]));
+ pkt->n_sg = max_entries;
+ pkt->iface = gm_ifp;
+ pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
+
+ /* validate & remove state in v2_pass1() */
+ for (i = 0; i < n_records; i++) {
+ struct mld_v2_rec_hdr *rechdr;
+ size_t n_src, record_size;
+
+ if (len < sizeof(*rechdr)) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (truncated record header)"));
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+
+ rechdr = (struct mld_v2_rec_hdr *)data;
+ data += sizeof(*rechdr);
+ len -= sizeof(*rechdr);
+
+ n_src = ntohs(rechdr->n_src);
+ record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
+
+ if (len < record_size) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (truncated source list)"));
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
+ zlog_warn(
+ log_pkt_src(
+ "malformed MLDv2 report (invalid group %pI6)"),
+ &rechdr->grp);
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+
+ data += record_size;
+ len -= record_size;
+
+ gm_handle_v2_pass1(pkt, rechdr, n_src);
+ }
+
+ if (!pkt->n_active) {
+ gm_subscriber_drop(&pkt->subscriber);
+ XFREE(MTYPE_GM_STATE, pkt);
+ return;
+ }
+
+ pkt = XREALLOC(MTYPE_GM_STATE, pkt,
+ offsetof(struct gm_packet_state, items[pkt->n_active]));
+ pkt->n_sg = pkt->n_active;
+ pkt->n_active = 0;
+
+ monotime(&pkt->received);
+ if (!pkt->subscriber)
+ pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
+ gm_packets_add_tail(pkt->subscriber->packets, pkt);
+ gm_packet_expires_add_tail(gm_ifp->expires, pkt);
+
+ for (i = 0; i < pkt->n_sg; i++)
+ if (!pkt->items[i].is_excl)
+ gm_handle_v2_pass2_incl(pkt, i);
+ else {
+ gm_handle_v2_pass2_excl(pkt, i);
+ i += pkt->items[i].n_exclude;
+ }
+
+ if (pkt->n_active == 0)
+ gm_packet_free(pkt);
+}
+
+static void gm_handle_v1_report(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v1_pkt *hdr;
+ struct gm_packet_state *pkt;
+ struct gm_sg *grp;
+ struct gm_packet_sg *item;
+ size_t max_entries;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv1 report (truncated)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ gm_ifp->stats.rx_old_report++;
+
+ hdr = (struct mld_v1_pkt *)data;
+
+ max_entries = 1;
+ pkt = XCALLOC(MTYPE_GM_STATE,
+ offsetof(struct gm_packet_state, items[max_entries]));
+ pkt->n_sg = max_entries;
+ pkt->iface = gm_ifp;
+ pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
+
+ /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
+
+ grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
+ if (!grp)
+ grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
+
+ item = gm_packet_sg_setup(pkt, grp, true, false);
+ item->n_exclude = 0;
+
+/* TODO "set v1-seen timer on grp here" */
+
+ /* } */
+
+ /* pass2 will count n_active back up to 1. Also since a v1 report
+ * has exactly 1 group, we can skip the realloc() that v2 needs here.
+ */
+ assert(pkt->n_active == 1);
+ pkt->n_sg = pkt->n_active;
+ pkt->n_active = 0;
+
+ monotime(&pkt->received);
+ if (!pkt->subscriber)
+ pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
+ gm_packets_add_tail(pkt->subscriber->packets, pkt);
+ gm_packet_expires_add_tail(gm_ifp->expires, pkt);
+
+ /* pass2 covers installing state & removing old state; all the v1
+ * compat is handled at this point.
+ *
+ * Note that "old state" may be v2; subscribers will switch from v2
+ * reports to v1 reports when the querier changes from v2 to v1. So,
+ * limiting this to v1 would be wrong.
+ */
+ gm_handle_v2_pass2_excl(pkt, 0);
+
+ if (pkt->n_active == 0)
+ gm_packet_free(pkt);
+}
+
+static void gm_handle_v1_leave(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v1_pkt *hdr;
+ struct gm_subscriber *subscriber;
+ struct gm_sg *grp;
+ struct gm_packet_sg *old_grp;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv1 leave (truncated)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ gm_ifp->stats.rx_old_leave++;
+
+ hdr = (struct mld_v1_pkt *)data;
+
+ subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
+ if (!subscriber)
+ return;
+
+ /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
+
+ grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
+ if (grp) {
+ old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
+ if (old_grp) {
+ gm_packet_sg_drop(old_grp);
+ gm_sg_update(grp, false);
+
+/* TODO "need S,G PRUNE => NO_INFO transition here" */
+
+ }
+ }
+
+ /* } */
+
+ /* nothing more to do here, pass2 is no-op for leaves */
+ gm_subscriber_drop(&subscriber);
+}
+
+/* for each general query received (or sent), a timer is started to expire
+ * _everything_ at the appropriate time (including robustness multiplier).
+ *
+ * So when this timer hits, all packets - with all of their items - that were
+ * received *before* the query are aged out, and state updated accordingly.
+ * Note that when we receive a refresh/update, the previous/old packet is
+ * already dropped and replaced with a new one, so in normal steady-state
+ * operation, this timer won't be doing anything.
+ *
+ * Additionally, if a subscriber actively leaves a group, that goes through
+ * its own path too and won't hit this. This is really only triggered when a
+ * host straight up disappears.
+ */
+static void gm_t_expire(struct event *t)
+{
+ struct gm_if *gm_ifp = EVENT_ARG(t);
+ struct gm_packet_state *pkt;
+
+ zlog_info(log_ifp("general expiry timer"));
+
+ while (gm_ifp->n_pending) {
+ struct gm_general_pending *pend = gm_ifp->pending;
+ struct timeval remain;
+ int64_t remain_ms;
+
+ remain_ms = monotime_until(&pend->expiry, &remain);
+ if (remain_ms > 0) {
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(
+ log_ifp("next general expiry in %" PRId64 "ms"),
+ remain_ms / 1000);
+
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &remain, &gm_ifp->t_expire);
+ return;
+ }
+
+ while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
+ if (timercmp(&pkt->received, &pend->query, >=))
+ break;
+
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(log_ifp("expire packet %p"), pkt);
+ gm_packet_drop(pkt, true);
+ }
+
+ gm_ifp->n_pending--;
+ memmove(gm_ifp->pending, gm_ifp->pending + 1,
+ gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
+ }
+
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(log_ifp("next general expiry waiting for query"));
+}
+
+/* NB: the receive handlers will also run when sending packets, since we
+ * receive our own packets back in.
+ */
+static void gm_handle_q_general(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers)
+{
+ struct timeval now, expiry;
+ struct gm_general_pending *pend;
+
+ monotime(&now);
+ timeradd(&now, &timers->expire_wait, &expiry);
+
+ while (gm_ifp->n_pending) {
+ pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
+
+ if (timercmp(&pend->expiry, &expiry, <))
+ break;
+
+ /* if we end up here, the last item in pending[] has an expiry
+ * later than the expiry for this query. But our query time
+ * (now) is later than that of the item (because, well, that's
+ * how time works.) This makes this query meaningless since
+ * it's "supersetted" within the preexisting query
+ */
+
+ if (PIM_DEBUG_GM_TRACE_DETAIL)
+ zlog_debug(
+ log_ifp("zapping supersetted general timer %pTVMu"),
+ &pend->expiry);
+
+ gm_ifp->n_pending--;
+ if (!gm_ifp->n_pending)
+ EVENT_OFF(gm_ifp->t_expire);
+ }
+
+ /* people might be messing with their configs or something */
+ if (gm_ifp->n_pending == array_size(gm_ifp->pending))
+ return;
+
+ pend = &gm_ifp->pending[gm_ifp->n_pending];
+ pend->query = now;
+ pend->expiry = expiry;
+
+ if (!gm_ifp->n_pending++) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ log_ifp("starting general timer @ 0: %pTVMu"),
+ &pend->expiry);
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &timers->expire_wait, &gm_ifp->t_expire);
+ } else if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
+ gm_ifp->n_pending, &pend->expiry);
+}
+
+static void gm_t_sg_expire(struct event *t)
+{
+ struct gm_sg *sg = EVENT_ARG(t);
+ struct gm_if *gm_ifp = sg->iface;
+ struct gm_packet_sg *item;
+
+ assertf(sg->state == GM_SG_JOIN_EXPIRING ||
+ sg->state == GM_SG_NOPRUNE_EXPIRING,
+ "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
+
+ frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
+ /* this will also drop EXCLUDE mode S,G lists together with
+ * the *,G entry
+ */
+ gm_packet_sg_drop(item);
+
+ /* subs_negative items are only timed out together with the *,G entry
+ * since we won't get any reports for a group-and-source query
+ */
+ gm_sg_update(sg, true);
+}
+
+static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval ref)
+{
+ struct gm_packet_state *pkt;
+
+ if (!sg->most_recent) {
+ struct gm_packet_state *best_pkt = NULL;
+ struct gm_packet_sg *item;
+
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ pkt = gm_packet_sg2state(item);
+
+ if (!best_pkt ||
+ timercmp(&pkt->received, &best_pkt->received, >)) {
+ best_pkt = pkt;
+ sg->most_recent = item;
+ }
+ }
+ }
+ if (sg->most_recent) {
+ struct timeval fuzz;
+
+ pkt = gm_packet_sg2state(sg->most_recent);
+
+ /* this shouldn't happen on plain old real ethernet segment,
+ * but on something like a VXLAN or VPLS it is very possible
+ * that we get a report before the query that triggered it.
+ * (imagine a triangle scenario with 3 datacenters, it's very
+ * possible A->B + B->C is faster than A->C due to odd routing)
+ *
+ * This makes a little tolerance allowance to handle that case.
+ */
+ timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
+
+ if (timercmp(&fuzz, &ref, >))
+ return true;
+ }
+ return false;
+}
+
+static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval expire_wait)
+{
+ struct timeval now;
+
+ if (!sg)
+ return;
+ if (sg->state == GM_SG_PRUNE)
+ return;
+
+ monotime(&now);
+ if (gm_sg_check_recent(gm_ifp, sg, now))
+ return;
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
+
+ if (sg->t_sg_expire) {
+ struct timeval remain;
+
+ remain = event_timer_remain(sg->t_sg_expire);
+ if (timercmp(&remain, &expire_wait, <=))
+ return;
+
+ EVENT_OFF(sg->t_sg_expire);
+ }
+
+ event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
+ &sg->t_sg_expire);
+}
+
+static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers, pim_addr grp,
+ const pim_addr *srcs, size_t n_src)
+{
+ struct gm_sg *sg;
+ size_t i;
+
+ for (i = 0; i < n_src; i++) {
+ sg = gm_sg_find(gm_ifp, grp, srcs[i]);
+ GM_UPDATE_SG_STATE(sg);
+ gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
+ }
+}
+
+static void gm_t_grp_expire(struct event *t)
+{
+ /* if we're here, that means when we received the group-specific query
+ * there was one or more active S,G for this group. For *,G the timer
+ * in sg->t_sg_expire is running separately and gets cancelled when we
+ * receive a report, so that work is left to gm_t_sg_expire and we
+ * shouldn't worry about it here.
+ */
+ struct gm_grp_pending *pend = EVENT_ARG(t);
+ struct gm_if *gm_ifp = pend->iface;
+ struct gm_sg *sg, *sg_start, sg_ref = {};
+
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
+
+ /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
+ * could technically be gt to skip a possible *,G
+ */
+ sg_ref.sgaddr.grp = pend->grp;
+ sg_ref.sgaddr.src = PIMADDR_ANY;
+ sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+
+ frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+ struct gm_packet_sg *item;
+
+ if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
+ break;
+ if (pim_addr_is_any(sg->sgaddr.src))
+ /* handled by gm_t_sg_expire / sg->t_sg_expire */
+ continue;
+ if (gm_sg_check_recent(gm_ifp, sg, pend->query))
+ continue;
+
+ /* we may also have a group-source-specific query going on in
+ * parallel. But if we received nothing for the *,G query,
+ * the S,G query is kinda irrelevant.
+ */
+ EVENT_OFF(sg->t_sg_expire);
+
+ frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
+ /* this will also drop the EXCLUDE S,G lists */
+ gm_packet_sg_drop(item);
+
+ gm_sg_update(sg, true);
+ }
+
+ gm_grp_pends_del(gm_ifp->grp_pends, pend);
+ XFREE(MTYPE_GM_GRP_PENDING, pend);
+}
+
+static void gm_handle_q_group(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers, pim_addr grp)
+{
+ struct gm_sg *sg, sg_ref = {};
+ struct gm_grp_pending *pend, pend_ref = {};
+
+ sg_ref.sgaddr.grp = grp;
+ sg_ref.sgaddr.src = PIMADDR_ANY;
+ /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
+ sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+
+ if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
+ /* we have nothing at all for this group - don't waste RAM */
+ return;
+
+ if (pim_addr_is_any(sg->sgaddr.src)) {
+ /* actually found *,G entry here */
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
+ &grp);
+ GM_UPDATE_SG_STATE(sg);
+ gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
+
+ sg = gm_sgs_next(gm_ifp->sgs, sg);
+ if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
+ /* no S,G for this group */
+ return;
+ }
+
+ pend_ref.grp = grp;
+ pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
+
+ if (pend) {
+ struct timeval remain;
+
+ remain = event_timer_remain(pend->t_expire);
+ if (timercmp(&remain, &timers->expire_wait, <=))
+ return;
+
+ EVENT_OFF(pend->t_expire);
+ } else {
+ pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
+ pend->grp = grp;
+ pend->iface = gm_ifp;
+ gm_grp_pends_add(gm_ifp->grp_pends, pend);
+ }
+
+ monotime(&pend->query);
+ event_add_timer_tv(router->master, gm_t_grp_expire, pend,
+ &timers->expire_wait, &pend->t_expire);
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
+ pend->t_expire);
+}
+
+static void gm_bump_querier(struct gm_if *gm_ifp)
+{
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+
+ EVENT_OFF(gm_ifp->t_query);
+
+ if (pim_addr_is_any(pim_ifp->ll_lowest))
+ return;
+ if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
+ return;
+
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+
+ event_execute(router->master, gm_t_query, gm_ifp, 0, NULL);
+}
+
+static void gm_t_other_querier(struct event *t)
+{
+ struct gm_if *gm_ifp = EVENT_ARG(t);
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+
+ zlog_info(log_ifp("other querier timer expired"));
+
+ gm_ifp->querier = pim_ifp->ll_lowest;
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+
+ event_execute(router->master, gm_t_query, gm_ifp, 0, NULL);
+}
+
+static void gm_handle_query(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src,
+ pim_addr *pkt_dst, char *data, size_t len)
+{
+ struct mld_v2_query_hdr *hdr;
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ struct gm_query_timers timers;
+ bool general_query;
+
+ if (len < sizeof(struct mld_v2_query_hdr) &&
+ len != sizeof(struct mld_v1_pkt)) {
+ zlog_warn(log_pkt_src("invalid query size"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ hdr = (struct mld_v2_query_hdr *)data;
+ general_query = pim_addr_is_any(hdr->grp);
+
+ if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (invalid group %pI6)"),
+ &hdr->grp);
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ if (len >= sizeof(struct mld_v2_query_hdr)) {
+ size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
+
+ if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (truncated source list)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ if (general_query && src_space) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (general query with non-empty source list)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+ }
+
+ /* accepting queries unicast to us (or addressed to a wrong group)
+ * can mess up querier election as well as cause us to terminate
+ * traffic (since after a unicast query no reports will be coming in)
+ */
+ if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
+ if (pim_addr_is_any(hdr->grp)) {
+ zlog_warn(
+ log_pkt_src(
+ "wrong destination %pPA for general query"),
+ pkt_dst);
+ gm_ifp->stats.rx_drop_dstaddr++;
+ return;
+ }
+
+ if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
+ gm_ifp->stats.rx_drop_dstaddr++;
+ zlog_warn(
+ log_pkt_src(
+ "wrong destination %pPA for group specific query"),
+ pkt_dst);
+ return;
+ }
+ }
+
+ if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(
+ log_pkt_src("replacing elected querier %pPA"),
+ &gm_ifp->querier);
+
+ gm_ifp->querier = pkt_src->sin6_addr;
+ }
+
+ if (len == sizeof(struct mld_v1_pkt)) {
+ timers.qrv = gm_ifp->cur_qrv;
+ timers.max_resp_ms = hdr->max_resp_code;
+ timers.qqic_ms = gm_ifp->cur_query_intv;
+ } else {
+ timers.qrv = (hdr->flags & 0x7) ?: 8;
+ timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
+ timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
+ }
+ timers.fuzz = gm_ifp->cfg_timing_fuzz;
+
+ gm_expiry_calc(&timers);
+
+ if (PIM_DEBUG_GM_TRACE_DETAIL)
+ zlog_debug(
+ log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
+ timers.qrv, timers.max_resp_ms, timers.qqic_ms,
+ &timers.expire_wait);
+
+ if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
+ unsigned int other_ms;
+
+ EVENT_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_other_querier);
+
+ other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
+ event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
+ other_ms, &gm_ifp->t_other_querier);
+ }
+
+ if (len == sizeof(struct mld_v1_pkt)) {
+ if (general_query) {
+ gm_handle_q_general(gm_ifp, &timers);
+ gm_ifp->stats.rx_query_old_general++;
+ } else {
+ gm_handle_q_group(gm_ifp, &timers, hdr->grp);
+ gm_ifp->stats.rx_query_old_group++;
+ }
+ return;
+ }
+
+ /* v2 query - [S]uppress bit */
+ if (hdr->flags & 0x8) {
+ gm_ifp->stats.rx_query_new_sbit++;
+ return;
+ }
+
+ if (general_query) {
+ gm_handle_q_general(gm_ifp, &timers);
+ gm_ifp->stats.rx_query_new_general++;
+ } else if (!ntohs(hdr->n_src)) {
+ gm_handle_q_group(gm_ifp, &timers, hdr->grp);
+ gm_ifp->stats.rx_query_new_group++;
+ } else {
+ /* this is checked above:
+ * if (len >= sizeof(struct mld_v2_query_hdr)) {
+ * size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
+ * if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
+ */
+ assume(ntohs(hdr->n_src) <=
+ (len - sizeof(struct mld_v2_query_hdr)) /
+ sizeof(pim_addr));
+
+ gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
+ ntohs(hdr->n_src));
+ gm_ifp->stats.rx_query_new_groupsrc++;
+ }
+}
+
+static void gm_rx_process(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
+ void *data, size_t pktlen)
+{
+ struct icmp6_plain_hdr *icmp6 = data;
+ uint16_t pkt_csum, ref_csum;
+ struct ipv6_ph ph6 = {
+ .src = pkt_src->sin6_addr,
+ .dst = *pkt_dst,
+ .ulpl = htons(pktlen),
+ .next_hdr = IPPROTO_ICMPV6,
+ };
+
+ pkt_csum = icmp6->icmp6_cksum;
+ icmp6->icmp6_cksum = 0;
+ ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
+
+ if (pkt_csum != ref_csum) {
+ zlog_warn(
+ log_pkt_src(
+ "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
+ pkt_dst, pkt_csum, ref_csum);
+ gm_ifp->stats.rx_drop_csum++;
+ return;
+ }
+
+ data = (icmp6 + 1);
+ pktlen -= sizeof(*icmp6);
+
+ switch (icmp6->icmp6_type) {
+ case ICMP6_MLD_QUERY:
+ gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
+ break;
+ case ICMP6_MLD_V1_REPORT:
+ gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
+ break;
+ case ICMP6_MLD_V1_DONE:
+ gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
+ break;
+ case ICMP6_MLD_V2_REPORT:
+ gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
+ break;
+ }
+}
+
+static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
+ uint16_t alert_type)
+{
+ uint8_t *hopopt_end;
+
+ if (hopopt_len < 8)
+ return false;
+ if (hopopt_len < (hopopts[1] + 1U) * 8U)
+ return false;
+
+ hopopt_end = hopopts + (hopopts[1] + 1) * 8;
+ hopopts += 2;
+
+ while (hopopts < hopopt_end) {
+ if (hopopts[0] == IP6OPT_PAD1) {
+ hopopts++;
+ continue;
+ }
+
+ if (hopopts > hopopt_end - 2)
+ break;
+ if (hopopts > hopopt_end - 2 - hopopts[1])
+ break;
+
+ if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
+ uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
+
+ if (have_type == alert_type)
+ return true;
+ }
+
+ hopopts += 2 + hopopts[1];
+ }
+ return false;
+}
+
+static void gm_t_recv(struct event *t)
+{
+ struct pim_instance *pim = EVENT_ARG(t);
+ union {
+ char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
+ CMSG_SPACE(256) /* hop options */ +
+ CMSG_SPACE(sizeof(int)) /* hopcount */];
+ struct cmsghdr align;
+ } cmsgbuf;
+ struct cmsghdr *cmsg;
+ struct in6_pktinfo *pktinfo = NULL;
+ uint8_t *hopopts = NULL;
+ size_t hopopt_len = 0;
+ int *hoplimit = NULL;
+ char rxbuf[2048];
+ struct msghdr mh[1] = {};
+ struct iovec iov[1];
+ struct sockaddr_in6 pkt_src[1] = {};
+ ssize_t nread;
+ size_t pktlen;
+
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
+
+ iov->iov_base = rxbuf;
+ iov->iov_len = sizeof(rxbuf);
+
+ mh->msg_name = pkt_src;
+ mh->msg_namelen = sizeof(pkt_src);
+ mh->msg_control = cmsgbuf.buf;
+ mh->msg_controllen = sizeof(cmsgbuf.buf);
+ mh->msg_iov = iov;
+ mh->msg_iovlen = array_size(iov);
+ mh->msg_flags = 0;
+
+ nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
+ if (nread <= 0) {
+ zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
+ pim->gm_rx_drop_sys++;
+ return;
+ }
+
+ if ((size_t)nread > sizeof(rxbuf)) {
+ iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
+ iov->iov_len = nread;
+ }
+ nread = recvmsg(pim->gm_socket, mh, 0);
+ if (nread <= 0) {
+ zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
+ pim->gm_rx_drop_sys++;
+ goto out_free;
+ }
+
+ struct interface *ifp;
+
+ ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ goto out_free;
+
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp = pim_ifp->mld;
+
+ if (!gm_ifp)
+ goto out_free;
+
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
+ if (cmsg->cmsg_level != SOL_IPV6)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case IPV6_PKTINFO:
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ break;
+ case IPV6_HOPOPTS:
+ hopopts = CMSG_DATA(cmsg);
+ hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
+ break;
+ case IPV6_HOPLIMIT:
+ hoplimit = (int *)CMSG_DATA(cmsg);
+ break;
+ }
+ }
+
+ if (!pktinfo || !hoplimit) {
+ zlog_err(log_ifp(
+ "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
+ pim->gm_rx_drop_sys++;
+ goto out_free;
+ }
+
+ if (*hoplimit != 1) {
+ zlog_err(log_pkt_src("packet with hop limit != 1"));
+ /* spoofing attempt => count on srcaddr counter */
+ gm_ifp->stats.rx_drop_srcaddr++;
+ goto out_free;
+ }
+
+ if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
+ zlog_err(log_pkt_src(
+ "packet without IPv6 Router Alert MLD option"));
+ gm_ifp->stats.rx_drop_ra++;
+ goto out_free;
+ }
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
+ /* reports from :: happen in normal operation for DAD, so
+ * don't spam log messages about this
+ */
+ goto out_free;
+
+ if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
+ zlog_warn(log_pkt_src("packet from invalid source address"));
+ gm_ifp->stats.rx_drop_srcaddr++;
+ goto out_free;
+ }
+
+ pktlen = nread;
+ if (pktlen < sizeof(struct icmp6_plain_hdr)) {
+ zlog_warn(log_pkt_src("truncated packet"));
+ gm_ifp->stats.rx_drop_malformed++;
+ goto out_free;
+ }
+
+ gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
+ pktlen);
+
+out_free:
+ if (iov->iov_base != rxbuf)
+ XFREE(MTYPE_GM_PACKET, iov->iov_base);
+}
+
+static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
+ const pim_addr *srcs, size_t n_srcs, bool s_bit)
+{
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ struct sockaddr_in6 dstaddr = {
+ .sin6_family = AF_INET6,
+ .sin6_scope_id = gm_ifp->ifp->ifindex,
+ };
+ struct {
+ struct icmp6_plain_hdr hdr;
+ struct mld_v2_query_hdr v2_query;
+ } query = {
+ /* clang-format off */
+ .hdr = {
+ .icmp6_type = ICMP6_MLD_QUERY,
+ .icmp6_code = 0,
+ },
+ .v2_query = {
+ .grp = grp,
+ },
+ /* clang-format on */
+ };
+ struct ipv6_ph ph6 = {
+ .src = pim_ifp->ll_lowest,
+ .ulpl = htons(sizeof(query)),
+ .next_hdr = IPPROTO_ICMPV6,
+ };
+ union {
+ char buf[CMSG_SPACE(8) /* hop options */ +
+ CMSG_SPACE(sizeof(struct in6_pktinfo))];
+ struct cmsghdr align;
+ } cmsg = {};
+ struct cmsghdr *cmh;
+ struct msghdr mh[1] = {};
+ struct iovec iov[3];
+ size_t iov_len;
+ ssize_t ret, expect_ret;
+ uint8_t *dp;
+ struct in6_pktinfo *pktinfo;
+
+ if (if_is_loopback(gm_ifp->ifp)) {
+ /* Linux is a bit odd with multicast on loopback */
+ ph6.src = in6addr_loopback;
+ dstaddr.sin6_addr = in6addr_loopback;
+ } else if (pim_addr_is_any(grp))
+ dstaddr.sin6_addr = gm_all_hosts;
+ else
+ dstaddr.sin6_addr = grp;
+
+ query.v2_query.max_resp_code =
+ mld_max_resp_encode(gm_ifp->cur_max_resp);
+ query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
+ if (s_bit)
+ query.v2_query.flags |= 0x08;
+ query.v2_query.qqic =
+ igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
+ query.v2_query.n_src = htons(n_srcs);
+
+ ph6.dst = dstaddr.sin6_addr;
+
+ /* ph6 not included in sendmsg */
+ iov[0].iov_base = &ph6;
+ iov[0].iov_len = sizeof(ph6);
+ iov[1].iov_base = &query;
+ if (gm_ifp->cur_version == GM_MLDV1) {
+ iov_len = 2;
+ iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
+ } else if (!n_srcs) {
+ iov_len = 2;
+ iov[1].iov_len = sizeof(query);
+ } else {
+ iov[1].iov_len = sizeof(query);
+ iov[2].iov_base = (void *)srcs;
+ iov[2].iov_len = n_srcs * sizeof(srcs[0]);
+ iov_len = 3;
+ }
+
+ query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
+
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(
+ log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
+ &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
+
+ mh->msg_name = &dstaddr;
+ mh->msg_namelen = sizeof(dstaddr);
+ mh->msg_iov = iov + 1;
+ mh->msg_iovlen = iov_len - 1;
+ mh->msg_control = &cmsg;
+ mh->msg_controllen = sizeof(cmsg.buf);
+
+ cmh = CMSG_FIRSTHDR(mh);
+ cmh->cmsg_level = IPPROTO_IPV6;
+ cmh->cmsg_type = IPV6_HOPOPTS;
+ cmh->cmsg_len = CMSG_LEN(8);
+ dp = CMSG_DATA(cmh);
+ *dp++ = 0; /* next header */
+ *dp++ = 0; /* length (8-byte blocks, minus 1) */
+ *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
+ *dp++ = 2; /* length */
+ *dp++ = 0; /* value (2 bytes) */
+ *dp++ = 0; /* value (2 bytes) (0 = MLD) */
+ *dp++ = 0; /* pad0 */
+ *dp++ = 0; /* pad0 */
+
+ cmh = CMSG_NXTHDR(mh, cmh);
+ cmh->cmsg_level = IPPROTO_IPV6;
+ cmh->cmsg_type = IPV6_PKTINFO;
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
+ pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
+ pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
+
+ expect_ret = iov[1].iov_len;
+ if (iov_len == 3)
+ expect_ret += iov[2].iov_len;
+
+ frr_with_privs (&pimd_privs) {
+ ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
+ }
+
+ if (ret != expect_ret) {
+ zlog_warn(log_ifp("failed to send query: %m"));
+ gm_ifp->stats.tx_query_fail++;
+ } else {
+ if (gm_ifp->cur_version == GM_MLDV1) {
+ if (pim_addr_is_any(grp))
+ gm_ifp->stats.tx_query_old_general++;
+ else
+ gm_ifp->stats.tx_query_old_group++;
+ } else {
+ if (pim_addr_is_any(grp))
+ gm_ifp->stats.tx_query_new_general++;
+ else if (!n_srcs)
+ gm_ifp->stats.tx_query_new_group++;
+ else
+ gm_ifp->stats.tx_query_new_groupsrc++;
+ }
+ }
+}
+
+static void gm_t_query(struct event *t)
+{
+ struct gm_if *gm_ifp = EVENT_ARG(t);
+ unsigned int timer_ms = gm_ifp->cur_query_intv;
+
+ if (gm_ifp->n_startup) {
+ timer_ms /= 4;
+ gm_ifp->n_startup--;
+ }
+
+ event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
+ &gm_ifp->t_query);
+
+ gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
+}
+
+static void gm_t_sg_query(struct event *t)
+{
+ struct gm_sg *sg = EVENT_ARG(t);
+
+ gm_trigger_specific(sg);
+}
+
+/* S,G specific queries (triggered by a member leaving) get a little slack
+ * time so we can bundle queries for [S1,S2,S3,...],G into the same query
+ */
+static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
+{
+ struct gm_if *gm_ifp = pend_gsq->iface;
+
+ gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
+ pend_gsq->s_bit);
+
+ gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
+ XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
+}
+
+static void gm_t_gsq_pend(struct event *t)
+{
+ struct gm_gsq_pending *pend_gsq = EVENT_ARG(t);
+
+ gm_send_specific(pend_gsq);
+}
+
+static void gm_trigger_specific(struct gm_sg *sg)
+{
+ struct gm_if *gm_ifp = sg->iface;
+ struct gm_gsq_pending *pend_gsq, ref = {};
+
+ sg->n_query--;
+ if (sg->n_query)
+ event_add_timer_msec(router->master, gm_t_sg_query, sg,
+ gm_ifp->cur_query_intv_trig,
+ &sg->t_sg_query);
+
+ /* As per RFC 2271, s6 p14:
+ * E.g. a router that starts as a Querier, receives a
+ * Done message for a group and then receives a Query from a router with
+ * a lower address (causing a transition to the Non-Querier state)
+ * continues to send multicast-address-specific queries for the group in
+ * question until it either receives a Report or its timer expires, at
+ * which time it starts performing the actions of a Non-Querier for this
+ * group.
+ */
+ /* Therefore here we do not need to check if this router is querier or
+ * not. This is called only for querier, hence it will work even if the
+ * router transitions from querier to non-querier.
+ */
+
+ if (gm_ifp->pim->gm_socket == -1)
+ return;
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_sg(sg, "triggered query"));
+
+ if (pim_addr_is_any(sg->sgaddr.src)) {
+ gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
+ return;
+ }
+
+ ref.grp = sg->sgaddr.grp;
+ ref.s_bit = sg->query_sbit;
+
+ pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
+ if (!pend_gsq) {
+ pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
+ pend_gsq->grp = sg->sgaddr.grp;
+ pend_gsq->s_bit = sg->query_sbit;
+ pend_gsq->iface = gm_ifp;
+ gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
+
+ event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
+ &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
+ }
+
+ assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
+
+ pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
+ pend_gsq->n_src++;
+
+ if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
+ EVENT_OFF(pend_gsq->t_send);
+ gm_send_specific(pend_gsq);
+ pend_gsq = NULL;
+ }
+}
+
+static void gm_vrf_socket_incref(struct pim_instance *pim)
+{
+ struct vrf *vrf = pim->vrf;
+ int ret, intval;
+ struct icmp6_filter filter[1];
+
+ if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
+ return;
+
+ ICMP6_FILTER_SETBLOCKALL(filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
+
+ frr_with_privs (&pimd_privs) {
+ pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
+ vrf->vrf_id, vrf->name);
+ if (pim->gm_socket < 0) {
+ zlog_err("(VRF %s) could not create MLD socket: %m",
+ vrf->name);
+ return;
+ }
+
+ ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
+ filter, sizeof(filter));
+ if (ret)
+ zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
+ vrf->name);
+
+ /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
+ * RX filtering in Linux. It only means "receive all groups
+ * that something on the system has joined". To actually
+ * receive *all* MLD packets - which is what we need -
+ * multicast routing must be enabled on the interface. And
+ * this only works for MLD packets specifically.
+ *
+ * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
+ * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
+ *
+ * Also note that the code there explicitly checks for the IPv6
+ * router alert MLD option (which is required by the RFC to be
+ * on MLD packets.) That implies trying to support hosts which
+ * erroneously don't add that option is just not possible.
+ */
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_info(
+ "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
+ vrf->name);
+ }
+
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
+}
+
+static void gm_vrf_socket_decref(struct pim_instance *pim)
+{
+ if (--pim->gm_socket_if_count)
+ return;
+
+ EVENT_OFF(pim->t_gm_recv);
+ close(pim->gm_socket);
+ pim->gm_socket = -1;
+}
+
+static void gm_start(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+
+ assert(pim_ifp);
+ assert(pim_ifp->pim);
+ assert(pim_ifp->mroute_vif_index >= 0);
+ assert(!pim_ifp->mld);
+
+ gm_vrf_socket_incref(pim_ifp->pim);
+
+ gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
+ gm_ifp->ifp = ifp;
+ pim_ifp->mld = gm_ifp;
+ gm_ifp->pim = pim_ifp->pim;
+ monotime(&gm_ifp->started);
+
+ zlog_info(log_ifp("starting MLD"));
+
+ if (pim_ifp->mld_version == 1)
+ gm_ifp->cur_version = GM_MLDV1;
+ else
+ gm_ifp->cur_version = GM_MLDV2;
+
+ gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
+ gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
+ gm_ifp->cur_query_intv_trig =
+ pim_ifp->gm_specific_query_max_response_time_dsec * 100;
+ gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
+ gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
+
+ gm_ifp->cfg_timing_fuzz.tv_sec = 0;
+ gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
+
+ gm_sgs_init(gm_ifp->sgs);
+ gm_subscribers_init(gm_ifp->subscribers);
+ gm_packet_expires_init(gm_ifp->expires);
+ gm_grp_pends_init(gm_ifp->grp_pends);
+ gm_gsq_pends_init(gm_ifp->gsq_pends);
+
+ frr_with_privs (&pimd_privs) {
+ struct ipv6_mreq mreq;
+ int ret;
+
+ /* all-MLDv2 group */
+ mreq.ipv6mr_multiaddr = gm_all_routers;
+ mreq.ipv6mr_interface = ifp->ifindex;
+ ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
+ IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
+ if (ret)
+ zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
+ ifp->name);
+ }
+}
+
+void gm_group_delete(struct gm_if *gm_ifp)
+{
+ struct gm_sg *sg;
+ struct gm_packet_state *pkt;
+ struct gm_grp_pending *pend_grp;
+ struct gm_gsq_pending *pend_gsq;
+ struct gm_subscriber *subscriber;
+
+ while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
+ gm_packet_drop(pkt, false);
+
+ while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
+ EVENT_OFF(pend_grp->t_expire);
+ XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
+ }
+
+ while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
+ EVENT_OFF(pend_gsq->t_send);
+ XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
+ }
+
+ while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
+ EVENT_OFF(sg->t_sg_expire);
+ assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
+ &sg->sgaddr);
+ assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
+ &sg->sgaddr);
+
+ gm_sg_free(sg);
+ }
+ while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
+ assertf(!gm_packets_count(subscriber->packets), "%pPA",
+ &subscriber->addr);
+ XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
+ }
+}
+
+void gm_ifp_teardown(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+
+ if (!pim_ifp || !pim_ifp->mld)
+ return;
+
+ gm_ifp = pim_ifp->mld;
+ gm_ifp->stopping = true;
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(log_ifp("MLD stop"));
+
+ EVENT_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_other_querier);
+ EVENT_OFF(gm_ifp->t_expire);
+
+ frr_with_privs (&pimd_privs) {
+ struct ipv6_mreq mreq;
+ int ret;
+
+ /* all-MLDv2 group */
+ mreq.ipv6mr_multiaddr = gm_all_routers;
+ mreq.ipv6mr_interface = ifp->ifindex;
+ ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
+ IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
+ if (ret)
+ zlog_err(
+ "(%s) failed to leave ff02::16 (all-MLDv2): %m",
+ ifp->name);
+ }
+
+ gm_vrf_socket_decref(gm_ifp->pim);
+
+ gm_group_delete(gm_ifp);
+
+ gm_grp_pends_fini(gm_ifp->grp_pends);
+ gm_packet_expires_fini(gm_ifp->expires);
+ gm_subscribers_fini(gm_ifp->subscribers);
+ gm_sgs_fini(gm_ifp->sgs);
+
+ XFREE(MTYPE_GM_IFACE, gm_ifp);
+ pim_ifp->mld = NULL;
+}
+
+static void gm_update_ll(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp = pim_ifp->mld;
+ bool was_querier;
+
+ was_querier =
+ !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
+ !pim_addr_is_any(gm_ifp->querier);
+
+ gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
+ if (was_querier)
+ gm_ifp->querier = pim_ifp->ll_lowest;
+ EVENT_OFF(gm_ifp->t_query);
+
+ if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
+ if (was_querier)
+ zlog_info(log_ifp(
+ "lost link-local address, stopping querier"));
+ return;
+ }
+
+ if (was_querier)
+ zlog_info(log_ifp("new link-local %pPA while querier"),
+ &gm_ifp->cur_ll_lowest);
+ else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
+ pim_addr_is_any(gm_ifp->querier)) {
+ zlog_info(log_ifp("new link-local %pPA, becoming querier"),
+ &gm_ifp->cur_ll_lowest);
+ gm_ifp->querier = gm_ifp->cur_ll_lowest;
+ } else
+ return;
+
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+ event_execute(router->master, gm_t_query, gm_ifp, 0, NULL);
+}
+
+void gm_ifp_update(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ bool changed = false;
+
+ if (!pim_ifp)
+ return;
+ if (!if_is_operative(ifp) || !pim_ifp->pim ||
+ pim_ifp->mroute_vif_index < 0) {
+ gm_ifp_teardown(ifp);
+ return;
+ }
+
+ /*
+ * If ipv6 mld is not enabled on interface, do not start mld activites.
+ */
+ if (!pim_ifp->gm_enable)
+ return;
+
+ if (!pim_ifp->mld) {
+ changed = true;
+ gm_start(ifp);
+ assume(pim_ifp->mld != NULL);
+ }
+
+ gm_ifp = pim_ifp->mld;
+ if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
+ gm_update_ll(ifp);
+
+ unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
+
+ if (gm_ifp->cur_query_intv != cfg_query_intv) {
+ gm_ifp->cur_query_intv = cfg_query_intv;
+ changed = true;
+ }
+
+ unsigned int cfg_query_intv_trig =
+ pim_ifp->gm_specific_query_max_response_time_dsec * 100;
+
+ if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
+ gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
+ changed = true;
+ }
+
+ unsigned int cfg_max_response =
+ pim_ifp->gm_query_max_response_time_dsec * 100;
+
+ if (gm_ifp->cur_max_resp != cfg_max_response)
+ gm_ifp->cur_max_resp = cfg_max_response;
+
+ if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
+ gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
+
+ enum gm_version cfg_version;
+
+ if (pim_ifp->mld_version == 1)
+ cfg_version = GM_MLDV1;
+ else
+ cfg_version = GM_MLDV2;
+ if (gm_ifp->cur_version != cfg_version) {
+ gm_ifp->cur_version = cfg_version;
+ changed = true;
+ }
+
+ if (changed) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(log_ifp(
+ "MLD querier config changed, querying"));
+ gm_bump_querier(gm_ifp);
+ }
+}
+
+/*
+ * CLI (show commands only)
+ */
+
+#include "lib/command.h"
+
+#include "pimd/pim6_mld_clippy.c"
+
+static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
+ int *err)
+{
+ struct vrf *ret;
+
+ if (!vrf_str)
+ return vrf_lookup_by_id(VRF_DEFAULT);
+ if (!strcmp(vrf_str, "all"))
+ return NULL;
+ ret = vrf_lookup_by_name(vrf_str);
+ if (ret)
+ return ret;
+
+ vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
+ *err = CMD_WARNING;
+ return NULL;
+}
+
+static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
+ struct gm_if *gm_ifp;
+ bool querier;
+ size_t i;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
+ return;
+ }
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
+ return;
+ }
+
+ querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
+
+ vty_out(vty, "Interface %s: MLD running\n", ifp->name);
+ vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
+ vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
+ vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
+ querier ? " (this system)" : "");
+ vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
+ vty_out(vty, " Other querier timer: %pTH\n",
+ gm_ifp->t_other_querier);
+ vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
+ vty_out(vty, " Query interval: %ums\n",
+ gm_ifp->cur_query_intv);
+ vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
+ vty_out(vty, " Last member query intv.: %ums\n",
+ gm_ifp->cur_query_intv_trig);
+ vty_out(vty, " %u expiry timers from general queries:\n",
+ gm_ifp->n_pending);
+ for (i = 0; i < gm_ifp->n_pending; i++) {
+ struct gm_general_pending *p = &gm_ifp->pending[i];
+
+ vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
+ &p->query, &p->expiry);
+ }
+ vty_out(vty, " %zu expiry timers from *,G queries\n",
+ gm_grp_pends_count(gm_ifp->grp_pends));
+ vty_out(vty, " %zu expiry timers from S,G queries\n",
+ gm_gsq_pends_count(gm_ifp->gsq_pends));
+ vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
+ gm_sgs_count(gm_ifp->sgs),
+ gm_subscribers_count(gm_ifp->subscribers),
+ gm_packet_expires_count(gm_ifp->expires));
+ vty_out(vty, "\n");
+}
+
+static void gm_show_if_one(struct vty *vty, struct interface *ifp,
+ json_object *js_if, struct ttable *tt)
+{
+ struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
+ struct gm_if *gm_ifp = pim_ifp->mld;
+ bool querier;
+
+ assume(js_if || tt);
+
+ querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
+
+ if (js_if) {
+ json_object_string_add(js_if, "name", ifp->name);
+ json_object_string_addf(js_if, "address", "%pPA",
+ &pim_ifp->primary_address);
+ json_object_string_add(js_if, "state", "up");
+ json_object_string_addf(js_if, "version", "%d",
+ gm_ifp->cur_version);
+ json_object_string_addf(js_if, "upTime", "%pTVMs",
+ &gm_ifp->started);
+ json_object_boolean_add(js_if, "querier", querier);
+ json_object_string_addf(js_if, "querierIp", "%pPA",
+ &gm_ifp->querier);
+ if (querier)
+ json_object_string_addf(js_if, "queryTimer", "%pTH",
+ gm_ifp->t_query);
+ else
+ json_object_string_addf(js_if, "otherQuerierTimer",
+ "%pTH",
+ gm_ifp->t_other_querier);
+ json_object_int_add(js_if, "timerRobustnessValue",
+ gm_ifp->cur_qrv);
+ json_object_int_add(js_if, "lastMemberQueryCount",
+ gm_ifp->cur_lmqc);
+ json_object_int_add(js_if, "timerQueryIntervalMsec",
+ gm_ifp->cur_query_intv);
+ json_object_int_add(js_if, "timerQueryResponseTimerMsec",
+ gm_ifp->cur_max_resp);
+ json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
+ gm_ifp->cur_query_intv_trig);
+ } else {
+ ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
+ ifp->name, "up", &pim_ifp->primary_address,
+ gm_ifp->cur_version, querier ? "local" : "other",
+ &gm_ifp->querier, gm_ifp->t_query,
+ &gm_ifp->started);
+ }
+}
+
+static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
+ bool detail, json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf = NULL;
+ struct pim_interface *pim_ifp;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ if (!js && !detail) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+ if (detail && !js) {
+ gm_show_if_one_detail(vty, ifp);
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->mld)
+ continue;
+
+ if (js) {
+ js_if = json_object_new_object();
+ /*
+ * If we have js as true and detail as false
+ * and if Coverity thinks that js_if is NULL
+ * because of a failed call to new then
+ * when we call gm_show_if_one below
+ * the tt can be deref'ed and as such
+ * FRR will crash. But since we know
+ * that json_object_new_object never fails
+ * then let's tell Coverity that this assumption
+ * is true. I'm not worried about fast path
+ * here at all.
+ */
+ assert(js_if);
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ }
+
+ gm_show_if_one(vty, ifp, js_if, tt);
+ }
+
+ /* Dump the generated table. */
+ if (!js && !detail) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
+ bool detail, json_object *js)
+{
+ if (vrf)
+ gm_show_if_vrf(vty, vrf, ifname, detail, js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_if_vrf(vty, vrf, ifname, detail, js);
+}
+
+DEFPY(gm_show_interface,
+ gm_show_interface_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD interface information\n"
+ "Interface name\n"
+ "Detailed output\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+ gm_show_if(vty, vrf, ifname, !!detail, js);
+ return vty_json(vty, js);
+}
+
+static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
+ json_object *js_if)
+{
+ struct gm_if_stats *stats = &gm_ifp->stats;
+ /* clang-format off */
+ struct {
+ const char *text;
+ const char *js_key;
+ uint64_t *val;
+ } *item, items[] = {
+ { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
+ { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
+ { "v1 done received", "rxV1Done", &stats->rx_old_leave },
+
+ { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
+ { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
+ { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
+ { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
+ { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
+ { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
+
+ { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
+ { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
+ { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
+ { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
+ { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
+ { "TX errors", "txErrors", &stats->tx_query_fail },
+
+ { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
+ { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
+ { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
+ { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
+ { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
+ { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
+ };
+ /* clang-format on */
+
+ for (item = items; item < items + array_size(items); item++) {
+ if (js_if)
+ json_object_int_add(js_if, item->js_key, *item->val);
+ else
+ vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
+ *item->val);
+ }
+}
+
+static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
+ const char *ifname, json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+
+ if (!ifp->info)
+ continue;
+ pim_ifp = ifp->info;
+ if (!pim_ifp->mld)
+ continue;
+ gm_ifp = pim_ifp->mld;
+
+ if (js) {
+ js_if = json_object_new_object();
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ } else {
+ vty_out(vty, "Interface: %s\n", ifp->name);
+ }
+ gm_show_stats_one(vty, gm_ifp, js_if);
+ if (!js)
+ vty_out(vty, "\n");
+ }
+}
+
+DEFPY(gm_show_interface_stats,
+ gm_show_interface_stats_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD statistics\n"
+ INTERFACE_STR
+ "Interface name\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+
+ if (vrf)
+ gm_show_stats_vrf(vty, vrf, ifname, js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_stats_vrf(vty, vrf, ifname, js);
+ return vty_json(vty, js);
+}
+
+static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
+ const struct prefix_ipv6 *groups,
+ const struct prefix_ipv6 *sources, bool detail,
+ json_object *js_if)
+{
+ struct gm_sg *sg, *sg_start;
+ json_object *js_group = NULL;
+ pim_addr js_grpaddr = PIMADDR_ANY;
+ struct gm_subscriber sub_ref = {}, *sub_untracked;
+
+ if (groups) {
+ struct gm_sg sg_ref = {};
+
+ sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
+ sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+ } else
+ sg_start = gm_sgs_first(gm_ifp->sgs);
+
+ sub_ref.addr = gm_dummy_untracked;
+ sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
+ /* NB: sub_untracked may be NULL if no untracked joins exist */
+
+ frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+ struct timeval *recent = NULL, *untracked = NULL;
+ json_object *js_src;
+
+ if (groups) {
+ struct prefix grp_p;
+
+ pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
+ if (!prefix_match(groups, &grp_p))
+ break;
+ }
+
+ if (sources) {
+ struct prefix src_p;
+
+ pim_addr_to_prefix(&src_p, sg->sgaddr.src);
+ if (!prefix_match(sources, &src_p))
+ continue;
+ }
+
+ if (sg->most_recent) {
+ struct gm_packet_state *packet;
+
+ packet = gm_packet_sg2state(sg->most_recent);
+ recent = &packet->received;
+ }
+
+ if (sub_untracked) {
+ struct gm_packet_state *packet;
+ struct gm_packet_sg *item;
+
+ item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
+ if (item) {
+ packet = gm_packet_sg2state(item);
+ untracked = &packet->received;
+ }
+ }
+
+ if (!js_if) {
+ FMT_NSTD_BEGIN; /* %.0p */
+ vty_out(vty,
+ "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
+ &sg->sgaddr.grp, &sg->sgaddr.src,
+ gm_states[sg->state], recent, untracked,
+ &sg->created);
+
+ if (!detail)
+ continue;
+
+ struct gm_packet_sg *item;
+ struct gm_packet_state *packet;
+
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ packet = gm_packet_sg2state(item);
+
+ if (packet->subscriber == sub_untracked)
+ continue;
+ vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
+ &packet->subscriber->addr, "(JOIN)",
+ &packet->received);
+ }
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ packet = gm_packet_sg2state(item);
+
+ if (packet->subscriber == sub_untracked)
+ continue;
+ vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
+ &packet->subscriber->addr, "(PRUNE)",
+ &packet->received);
+ }
+ FMT_NSTD_END; /* %.0p */
+ continue;
+ }
+ /* if (js_if) */
+
+ if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
+ js_group = json_object_new_object();
+ json_object_object_addf(js_if, js_group, "%pPA",
+ &sg->sgaddr.grp);
+ js_grpaddr = sg->sgaddr.grp;
+ }
+
+ js_src = json_object_new_object();
+ json_object_object_addf(js_group, js_src, "%pPAs",
+ &sg->sgaddr.src);
+
+ json_object_string_add(js_src, "state", gm_states[sg->state]);
+ json_object_string_addf(js_src, "created", "%pTVMs",
+ &sg->created);
+ json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
+
+ if (untracked)
+ json_object_string_addf(js_src, "untrackedLastSeen",
+ "%pTVMs", untracked);
+ if (!detail)
+ continue;
+
+ json_object *js_subs;
+ struct gm_packet_sg *item;
+ struct gm_packet_state *packet;
+
+ js_subs = json_object_new_object();
+ json_object_object_add(js_src, "joinedBy", js_subs);
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ packet = gm_packet_sg2state(item);
+ if (packet->subscriber == sub_untracked)
+ continue;
+
+ json_object *js_sub;
+
+ js_sub = json_object_new_object();
+ json_object_object_addf(js_subs, js_sub, "%pPA",
+ &packet->subscriber->addr);
+ json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
+ &packet->received);
+ }
+
+ js_subs = json_object_new_object();
+ json_object_object_add(js_src, "prunedBy", js_subs);
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ packet = gm_packet_sg2state(item);
+ if (packet->subscriber == sub_untracked)
+ continue;
+
+ json_object *js_sub;
+
+ js_sub = json_object_new_object();
+ json_object_object_addf(js_subs, js_sub, "%pPA",
+ &packet->subscriber->addr);
+ json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
+ &packet->received);
+ }
+ }
+}
+
+static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
+ const char *ifname,
+ const struct prefix_ipv6 *groups,
+ const struct prefix_ipv6 *sources, bool detail,
+ json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_string_add(js_vrf, "vrf", vrf->name);
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+
+ if (!ifp->info)
+ continue;
+ pim_ifp = ifp->info;
+ if (!pim_ifp->mld)
+ continue;
+ gm_ifp = pim_ifp->mld;
+
+ if (js) {
+ js_if = json_object_new_object();
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ }
+
+ if (!js && !ifname)
+ vty_out(vty, "\nOn interface %s:\n", ifp->name);
+
+ gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
+ }
+}
+
+DEFPY(gm_show_interface_joins,
+ gm_show_interface_joins_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD joined groups & sources\n"
+ INTERFACE_STR
+ "Interface name\n"
+ "Limit output to group range\n"
+ "Show groups covered by this prefix\n"
+ "Limit output to source range\n"
+ "Show sources covered by this prefix\n"
+ "Show details, including tracked receivers\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+ else
+ vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
+ "Source", "State", "LastSeen", "NonTrkSeen", "Created");
+
+ if (vrf)
+ gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
+ js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
+ !!detail, js);
+ return vty_json(vty, js);
+}
+
+static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
+{
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_group = NULL;
+ json_object *json_groups = NULL;
+ struct pim_instance *pim = vrf->info;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
+ json_object_int_add(json, "watermarkLimit",
+ pim->gm_watermark_limit);
+ } else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Group|Version|Uptime");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
+ vty_out(vty, "Watermark warn limit(%s): %u\n",
+ pim->gm_watermark_limit ? "Set" : "Not Set",
+ pim->gm_watermark_limit);
+ }
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (vrf, ifp) {
+
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ struct gm_sg *sg;
+
+ if (!pim_ifp)
+ continue;
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp)
+ continue;
+
+ /* scan mld groups */
+ frr_each (gm_sgs, gm_ifp->sgs, sg) {
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_pim_ifp_add(json_iface,
+ ifp);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_groups = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_groups);
+ }
+
+ json_group = json_object_new_object();
+ json_object_string_addf(json_group, "group",
+ "%pPAs",
+ &sg->sgaddr.grp);
+
+ json_object_int_add(json_group, "version",
+ pim_ifp->mld_version);
+ json_object_string_addf(json_group, "uptime",
+ "%pTVMs", &sg->created);
+ json_object_array_add(json_groups, json_group);
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
+ ifp->name, &sg->sgaddr.grp,
+ pim_ifp->mld_version,
+ &sg->created);
+ }
+ } /* scan gm groups */
+ } /* scan interfaces */
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+DEFPY(gm_show_mld_groups,
+ gm_show_mld_groups_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ MLD_GROUP_STR
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (vrf)
+ gm_show_groups(vty, vrf, !!json);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_groups(vty, vrf, !!json);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(gm_debug_show,
+ gm_debug_show_cmd,
+ "debug show mld interface IFNAME",
+ DEBUG_STR
+ SHOW_STR
+ MLD_STR
+ INTERFACE_STR
+ "interface name\n")
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+
+ ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
+ if (!ifp) {
+ vty_out(vty, "%% no such interface: %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
+ vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
+ vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
+ vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
+ vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
+
+ vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
+ for (size_t i = 0; i < gm_ifp->n_pending; i++) {
+ int64_t query, expiry;
+
+ query = monotime_since(&gm_ifp->pending[i].query, NULL);
+ expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
+
+ vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
+ i, query / 1000, expiry / 1000);
+ }
+
+ struct gm_sg *sg;
+ struct gm_packet_state *pkt;
+ struct gm_packet_sg *item;
+ struct gm_subscriber *subscriber;
+
+ vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
+ frr_each (gm_sgs, gm_ifp->sgs, sg) {
+ vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
+ sg->t_sg_expire);
+
+ vty_out(vty, "\t @pos:%zu\n",
+ gm_packet_sg_subs_count(sg->subs_positive));
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ pkt = gm_packet_sg2state(item);
+
+ vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &pkt->subscriber->addr, pkt->subscriber, pkt,
+ item->offset);
+
+ assert(item->sg == sg);
+ }
+ vty_out(vty, "\t @neg:%zu\n",
+ gm_packet_sg_subs_count(sg->subs_negative));
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ pkt = gm_packet_sg2state(item);
+
+ vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &pkt->subscriber->addr, pkt->subscriber, pkt,
+ item->offset);
+
+ assert(item->sg == sg);
+ }
+ }
+
+ vty_out(vty, "\n%zu subscribers:\n",
+ gm_subscribers_count(gm_ifp->subscribers));
+ frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
+ vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
+ subscriber, gm_packets_count(subscriber->packets));
+
+ frr_each (gm_packets, subscriber->packets, pkt) {
+ vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
+ pkt,
+ monotime_since(&pkt->received, NULL) *
+ 0.000001f,
+ pkt->n_active, pkt->n_sg);
+
+ for (size_t i = 0; i < pkt->n_sg; i++) {
+ item = pkt->items + i;
+
+ vty_out(vty, "\t\t[%zu]", i);
+
+ if (!item->sg) {
+ vty_out(vty, " inactive\n");
+ continue;
+ }
+
+ vty_out(vty, " %s%s %pSG nE=%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &item->sg->sgaddr, item->n_exclude);
+ }
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(gm_debug_iface_cfg,
+ gm_debug_iface_cfg_cmd,
+ "debug ipv6 mld {"
+ "robustness (0-7)|"
+ "query-max-response-time (1-8387584)"
+ "}",
+ DEBUG_STR
+ IPV6_STR
+ "Multicast Listener Discovery\n"
+ "QRV\nQRV\n"
+ "maxresp\nmaxresp\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ bool changed = false;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ vty_out(vty, "%% no PIM state for interface %pSQq\n",
+ ifp->name);
+ return CMD_WARNING;
+ }
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "%% no MLD state for interface %pSQq\n",
+ ifp->name);
+ return CMD_WARNING;
+ }
+
+ if (robustness_str && gm_ifp->cur_qrv != robustness) {
+ gm_ifp->cur_qrv = robustness;
+ changed = true;
+ }
+ if (query_max_response_time_str &&
+ gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
+ gm_ifp->cur_max_resp = query_max_response_time;
+ changed = true;
+ }
+
+ if (changed) {
+ vty_out(vty, "%% MLD querier config changed, bumping\n");
+ gm_bump_querier(gm_ifp);
+ }
+ return CMD_SUCCESS;
+}
+
+void gm_cli_init(void);
+
+void gm_cli_init(void)
+{
+ install_element(VIEW_NODE, &gm_show_interface_cmd);
+ install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
+ install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
+ install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
+
+ install_element(VIEW_NODE, &gm_debug_show_cmd);
+ install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
+}
diff --git a/pimd/pim6_mld.h b/pimd/pim6_mld.h
new file mode 100644
index 0000000..183ab2f
--- /dev/null
+++ b/pimd/pim6_mld.h
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIMv6 MLD querier
+ * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
+ */
+
+#ifndef PIM6_MLD_H
+#define PIM6_MLD_H
+
+#include "typesafe.h"
+#include "pim_addr.h"
+
+struct event;
+struct pim_instance;
+struct gm_packet_sg;
+struct gm_if;
+struct channel_oil;
+
+#define MLD_DEFAULT_VERSION 2
+
+/* see comment below on subs_negative/subs_positive */
+enum gm_sub_sense {
+ /* negative/pruning: S,G in EXCLUDE */
+ GM_SUB_NEG = 0,
+ /* positive/joining: *,G in EXCLUDE and S,G in INCLUDE */
+ GM_SUB_POS = 1,
+};
+
+enum gm_sg_state {
+ GM_SG_NOINFO = 0,
+ GM_SG_JOIN,
+ GM_SG_JOIN_EXPIRING,
+ /* remaining 3 only valid for S,G when *,G in EXCLUDE */
+ GM_SG_PRUNE,
+ GM_SG_NOPRUNE,
+ GM_SG_NOPRUNE_EXPIRING,
+};
+
+/* If the timer gm_t_sg_expire is started without a leave message being received,
+ * the sg->state should be moved to expiring states.
+ * When the timer expires, we do not expect the state to be in join state.
+ * If a JOIN message is received while the timer is running,
+ * the state will be moved to JOIN and this timer will be switched off.
+ * Hence the below state transition is done.
+ */
+#define GM_UPDATE_SG_STATE(sg) \
+ do { \
+ if (sg->state == GM_SG_JOIN) \
+ sg->state = GM_SG_JOIN_EXPIRING; \
+ else if (sg->state == GM_SG_NOPRUNE) \
+ sg->state = GM_SG_NOPRUNE_EXPIRING; \
+ } while (0)
+
+static inline bool gm_sg_state_want_join(enum gm_sg_state state)
+{
+ return state != GM_SG_NOINFO && state != GM_SG_PRUNE;
+}
+
+/* MLD (S,G) state (on an interface)
+ *
+ * group is always != ::, src is :: for (*,G) joins. sort order in RB tree is
+ * such that sources for a particular group can be iterated by starting at the
+ * group. For INCLUDE, no (*,G) entry exists, only (S,G).
+ */
+
+PREDECL_RBTREE_UNIQ(gm_packet_sg_subs);
+PREDECL_RBTREE_UNIQ(gm_sgs);
+struct gm_sg {
+ pim_sgaddr sgaddr;
+ struct gm_if *iface;
+ struct gm_sgs_item itm;
+
+ enum gm_sg_state state;
+ struct channel_oil *oil;
+ bool tib_joined;
+
+ struct timeval created;
+
+ /* if a group- or group-and-source specific query is running
+ * (implies we haven't received any report yet, since it's cancelled
+ * by that)
+ */
+ struct event *t_sg_expire;
+
+ /* last-member-left triggered queries (group/group-source specific)
+ *
+ * this timer will be running even if we aren't the elected querier,
+ * in case the election result changes midway through.
+ */
+ struct event *t_sg_query;
+
+ /* we must keep sending (QRV) queries even if we get a positive
+ * response, to make sure other routers are updated. query_sbit
+ * will be set in that case, since other routers need the *response*,
+ * not the *query*
+ */
+ uint8_t n_query;
+ bool query_sbit;
+
+ /* subs_positive tracks gm_packet_sg resulting in a JOIN, i.e. for
+ * (*,G) it has *EXCLUDE* items, for (S,G) it has *INCLUDE* items.
+ *
+ * subs_negative is always empty for (*,G) and tracks EXCLUDE items
+ * for (S,G). This means that an (S,G) entry is active as a PRUNE if
+ * len(src->subs_negative) == len(grp->subs_positive)
+ * && len(src->subs_positive) == 0
+ * (i.e. all receivers for the group opted to exclude this S,G and
+ * noone did an SSM join for the S,G)
+ */
+ union {
+ struct {
+ struct gm_packet_sg_subs_head subs_negative[1];
+ struct gm_packet_sg_subs_head subs_positive[1];
+ };
+ struct gm_packet_sg_subs_head subs[2];
+ };
+
+ /* If the elected querier is not ourselves, queries and reports might
+ * get reordered in rare circumstances, i.e. the report could arrive
+ * just a microsecond before the query kicks off the timer. This can
+ * then result in us thinking there are no more receivers since no
+ * report might be received during the query period.
+ *
+ * To avoid this, keep track of the most recent report for this (S,G)
+ * so we can do a quick check to add just a little bit of slack.
+ *
+ * EXCLUDE S,Gs are never in most_recent.
+ */
+ struct gm_packet_sg *most_recent;
+};
+int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b);
+DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
+
+/* host tracking entry. addr will be one of:
+ *
+ * :: - used by hosts during address acquisition
+ * ::1 - may show up on some OS for joins by the router itself
+ * link-local - regular operation by MLDv2 hosts
+ * ffff:..:ffff - MLDv1 entry (cannot be tracked due to report suppression)
+ *
+ * global scope IPv6 addresses can never show up here
+ */
+PREDECL_HASH(gm_subscribers);
+PREDECL_DLIST(gm_packets);
+struct gm_subscriber {
+ pim_addr addr;
+ struct gm_subscribers_item itm;
+
+ struct gm_if *iface;
+ size_t refcount;
+
+ struct gm_packets_head packets[1];
+
+ struct timeval created;
+};
+
+/*
+ * MLD join state is kept batched by packet. Since the timers for all items
+ * in a packet are the same, this reduces the number of timers we're keeping
+ * track of. It also eases tracking for EXCLUDE state groups because the
+ * excluded sources are in the same packet. (MLD does not support splitting
+ * that if it exceeds MTU, it's always a full replace for exclude.)
+ *
+ * Since packets may be partially superseded by newer packets, the "active"
+ * field is used to track this.
+ */
+
+/* gm_packet_sg is allocated as part of gm_packet_state, note the items[0]
+ * array at the end of that. gm_packet_sg is NEVER directly allocated with
+ * XMALLOC/XFREE.
+ */
+struct gm_packet_sg {
+ /* non-NULL as long as this gm_packet_sg is the most recent entry
+ * for (subscriber,S,G). Cleared to NULL when a newer packet by the
+ * subscriber replaces this item.
+ *
+ * (Old items are kept around so we don't need to realloc/resize
+ * gm_packet_state, which would mess up a whole lot of pointers)
+ */
+ struct gm_sg *sg;
+
+ /* gm_sg -> (subscriber, gm_packet_sg)
+ * only on RB-tree while sg != NULL, i.e. not superseded by newer.
+ */
+ struct gm_packet_sg_subs_item subs_itm;
+
+ bool is_src : 1; /* := (src != ::) */
+ bool is_excl : 1;
+
+ /* for getting back to struct gm_packet_state, cf.
+ * gm_packet_sg2state() below
+ */
+ uint16_t offset;
+
+ /* if this is a group entry in EXCLUDE state, n_exclude counts how
+ * many sources are on the exclude list here. They follow immediately
+ * after.
+ */
+ uint16_t n_exclude;
+};
+
+#define gm_packet_sg2state(sg) \
+ container_of(sg, struct gm_packet_state, items[sg->offset])
+
+PREDECL_DLIST(gm_packet_expires);
+struct gm_packet_state {
+ struct gm_if *iface;
+ struct gm_subscriber *subscriber;
+ struct gm_packets_item pkt_itm;
+
+ struct timeval received;
+ struct gm_packet_expires_item exp_itm;
+
+ /* n_active starts equal to n_sg; whenever active is set to false on
+ * an item it is decremented. When n_active == 0, the packet can be
+ * freed.
+ */
+ uint16_t n_sg, n_active;
+ struct gm_packet_sg items[0];
+};
+
+/* general queries are rather different from group/S,G specific queries; it's
+ * not particularly efficient or useful to try to shoehorn them into the S,G
+ * timers. Instead, we keep a history of recent queries and their implied
+ * expiries.
+ */
+struct gm_general_pending {
+ struct timeval query, expiry;
+};
+
+/* similarly, group queries also age out S,G entries for the group, but in
+ * this case we only keep one query for each group
+ *
+ * why is this not in the *,G gm_sg? There may not be one (for INCLUDE mode
+ * groups, or groups we don't know about.) Also, malicious clients could spam
+ * random group-specific queries to trigger resource exhaustion, so it makes
+ * sense to limit these.
+ */
+PREDECL_RBTREE_UNIQ(gm_grp_pends);
+struct gm_grp_pending {
+ struct gm_grp_pends_item itm;
+ struct gm_if *iface;
+ pim_addr grp;
+
+ struct timeval query;
+ struct event *t_expire;
+};
+
+/* guaranteed MTU for IPv6 is 1280 bytes. IPv6 header is 40 bytes, MLDv2
+ * query header is 24 bytes, RA option is 8 bytes - leaves 1208 bytes for the
+ * source list, which is 151 IPv6 addresses. But we may have some more IPv6
+ * extension headers (e.g. IPsec AH), so just cap to 128
+ */
+#define MLD_V2Q_MTU_MAX_SOURCES 128
+
+/* group-and-source-specific queries are bundled together, if some host joins
+ * multiple sources it's likely to drop all at the same time.
+ *
+ * Unlike gm_grp_pending, this is only used for aggregation since the S,G
+ * state is kept directly in the gm_sg structure.
+ */
+PREDECL_HASH(gm_gsq_pends);
+struct gm_gsq_pending {
+ struct gm_gsq_pends_item itm;
+
+ struct gm_if *iface;
+ struct event *t_send;
+
+ pim_addr grp;
+ bool s_bit;
+
+ size_t n_src;
+ pim_addr srcs[MLD_V2Q_MTU_MAX_SOURCES];
+};
+
+
+/* The size of this history is limited by QRV, i.e. there can't be more than
+ * 8 items here.
+ */
+#define GM_MAX_PENDING 8
+
+enum gm_version {
+ GM_NONE,
+ GM_MLDV1,
+ GM_MLDV2,
+};
+
+struct gm_if_stats {
+ uint64_t rx_drop_csum;
+ uint64_t rx_drop_srcaddr;
+ uint64_t rx_drop_dstaddr;
+ uint64_t rx_drop_ra;
+ uint64_t rx_drop_malformed;
+ uint64_t rx_trunc_report;
+
+ /* since the types are different, this is rx_old_* not of rx_*_old */
+ uint64_t rx_old_report;
+ uint64_t rx_old_leave;
+ uint64_t rx_new_report;
+
+ uint64_t rx_query_new_general;
+ uint64_t rx_query_new_group;
+ uint64_t rx_query_new_groupsrc;
+ uint64_t rx_query_new_sbit;
+ uint64_t rx_query_old_general;
+ uint64_t rx_query_old_group;
+
+ uint64_t tx_query_new_general;
+ uint64_t tx_query_new_group;
+ uint64_t tx_query_new_groupsrc;
+ uint64_t tx_query_old_general;
+ uint64_t tx_query_old_group;
+
+ uint64_t tx_query_fail;
+};
+
+struct gm_if {
+ struct interface *ifp;
+ struct pim_instance *pim;
+ struct event *t_query, *t_other_querier, *t_expire;
+
+ bool stopping;
+
+ uint8_t n_startup;
+
+ uint8_t cur_qrv;
+ unsigned int cur_query_intv; /* ms */
+ unsigned int cur_query_intv_trig; /* ms */
+ unsigned int cur_max_resp; /* ms */
+ enum gm_version cur_version;
+ int cur_lmqc; /* last member query count in ds */
+
+ /* this value (positive, default 10ms) defines our "timing tolerance":
+ * - added to deadlines for expiring joins
+ * - used to look backwards in time for queries, in case a report was
+ * reordered before the query
+ */
+ struct timeval cfg_timing_fuzz;
+
+ /* items in pending[] are sorted by expiry, pending[0] is earliest */
+ struct gm_general_pending pending[GM_MAX_PENDING];
+ uint8_t n_pending;
+ struct gm_grp_pends_head grp_pends[1];
+ struct gm_gsq_pends_head gsq_pends[1];
+
+ pim_addr querier;
+ pim_addr cur_ll_lowest;
+
+ struct gm_sgs_head sgs[1];
+ struct gm_subscribers_head subscribers[1];
+ struct gm_packet_expires_head expires[1];
+
+ struct timeval started;
+ struct gm_if_stats stats;
+};
+
+#if PIM_IPV == 6
+extern void gm_ifp_update(struct interface *ifp);
+extern void gm_ifp_teardown(struct interface *ifp);
+extern void gm_group_delete(struct gm_if *gm_ifp);
+#else
+static inline void gm_ifp_update(struct interface *ifp)
+{
+}
+
+static inline void gm_ifp_teardown(struct interface *ifp)
+{
+}
+#endif
+
+extern void gm_cli_init(void);
+bool in6_multicast_nofwd(const pim_addr *addr);
+
+#endif /* PIM6_MLD_H */
diff --git a/pimd/pim6_mld_protocol.h b/pimd/pim6_mld_protocol.h
new file mode 100644
index 0000000..08d7871
--- /dev/null
+++ b/pimd/pim6_mld_protocol.h
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MLD protocol definitions
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ */
+
+#ifndef _PIM6_MLD_PROTOCOL_H
+#define _PIM6_MLD_PROTOCOL_H
+
+#include <stdalign.h>
+#include <stdint.h>
+
+/* There is a struct icmp6_hdr provided by OS, but it includes 4 bytes of data.
+ * Not helpful for us if we want to put the MLD struct after it.
+ */
+
+struct icmp6_plain_hdr {
+ uint8_t icmp6_type;
+ uint8_t icmp6_code;
+ uint16_t icmp6_cksum;
+};
+static_assert(sizeof(struct icmp6_plain_hdr) == 4, "struct mismatch");
+static_assert(alignof(struct icmp6_plain_hdr) <= 4, "struct mismatch");
+
+/* for MLDv1 query, report and leave all use the same packet format */
+struct mld_v1_pkt {
+ uint16_t max_resp_code;
+ uint16_t rsvd0;
+ struct in6_addr grp;
+};
+static_assert(sizeof(struct mld_v1_pkt) == 20, "struct mismatch");
+static_assert(alignof(struct mld_v1_pkt) <= 4, "struct mismatch");
+
+
+struct mld_v2_query_hdr {
+ uint16_t max_resp_code;
+ uint16_t rsvd0;
+ struct in6_addr grp;
+ uint8_t flags;
+ uint8_t qqic;
+ uint16_t n_src;
+ struct in6_addr srcs[0];
+};
+static_assert(sizeof(struct mld_v2_query_hdr) == 24, "struct mismatch");
+static_assert(alignof(struct mld_v2_query_hdr) <= 4, "struct mismatch");
+
+
+struct mld_v2_report_hdr {
+ uint16_t rsvd;
+ uint16_t n_records;
+};
+static_assert(sizeof(struct mld_v2_report_hdr) == 4, "struct mismatch");
+static_assert(alignof(struct mld_v2_report_hdr) <= 4, "struct mismatch");
+
+
+struct mld_v2_rec_hdr {
+ uint8_t type;
+ uint8_t aux_len;
+ uint16_t n_src;
+ struct in6_addr grp;
+ struct in6_addr srcs[0];
+};
+static_assert(sizeof(struct mld_v2_rec_hdr) == 20, "struct mismatch");
+static_assert(alignof(struct mld_v2_rec_hdr) <= 4, "struct mismatch");
+
+/* clang-format off */
+enum icmp6_mld_type {
+ ICMP6_MLD_QUERY = 130,
+ ICMP6_MLD_V1_REPORT = 131,
+ ICMP6_MLD_V1_DONE = 132,
+ ICMP6_MLD_V2_REPORT = 143,
+};
+
+enum mld_v2_rec_type {
+ MLD_RECTYPE_IS_INCLUDE = 1,
+ MLD_RECTYPE_IS_EXCLUDE = 2,
+ MLD_RECTYPE_CHANGE_TO_INCLUDE = 3,
+ MLD_RECTYPE_CHANGE_TO_EXCLUDE = 4,
+ MLD_RECTYPE_ALLOW_NEW_SOURCES = 5,
+ MLD_RECTYPE_BLOCK_OLD_SOURCES = 6,
+};
+/* clang-format on */
+
+/* helper functions */
+
+static inline unsigned int mld_max_resp_decode(uint16_t wire)
+{
+ uint16_t code = ntohs(wire);
+ uint8_t exp;
+
+ if (code < 0x8000)
+ return code;
+ exp = (code >> 12) & 0x7;
+ return ((code & 0xfff) | 0x1000) << (exp + 3);
+}
+
+static inline uint16_t mld_max_resp_encode(uint32_t value)
+{
+ uint16_t code;
+ uint8_t exp;
+
+ if (value < 0x8000)
+ code = value;
+ else {
+ exp = 16 - __builtin_clz(value);
+ code = (value >> (exp + 3)) & 0xfff;
+ code |= 0x8000 | (exp << 12);
+ }
+ return htons(code);
+}
+
+#endif /* _PIM6_MLD_PROTOCOL_H */
diff --git a/pimd/pim_addr.c b/pimd/pim_addr.c
new file mode 100644
index 0000000..91a0bb8
--- /dev/null
+++ b/pimd/pim_addr.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM address generalizations
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ */
+
+#include <zebra.h>
+
+#include "pim_addr.h"
+#include "printfrr.h"
+#include "prefix.h"
+
+
+printfrr_ext_autoreg_p("PA", printfrr_pimaddr);
+static ssize_t printfrr_pimaddr(struct fbuf *buf, struct printfrr_eargs *ea,
+ const void *vptr)
+{
+ const pim_addr *addr = vptr;
+ bool use_star = false;
+
+ if (ea->fmt[0] == 's') {
+ use_star = true;
+ ea->fmt++;
+ }
+
+ if (!addr)
+ return bputs(buf, "(null)");
+
+ if (use_star && pim_addr_is_any(*addr))
+ return bputch(buf, '*');
+
+#if PIM_IPV == 4
+ return bprintfrr(buf, "%pI4", addr);
+#else
+ return bprintfrr(buf, "%pI6", addr);
+#endif
+}
+
+printfrr_ext_autoreg_p("SG", printfrr_sgaddr);
+static ssize_t printfrr_sgaddr(struct fbuf *buf, struct printfrr_eargs *ea,
+ const void *vptr)
+{
+ const pim_sgaddr *sga = vptr;
+
+ if (!sga)
+ return bputs(buf, "(null)");
+
+ return bprintfrr(buf, "(%pPAs,%pPAs)", &sga->src, &sga->grp);
+}
diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h
new file mode 100644
index 0000000..94c63bb
--- /dev/null
+++ b/pimd/pim_addr.h
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM address generalizations
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ */
+
+#ifndef _PIMD_PIM_ADDR_H
+#define _PIMD_PIM_ADDR_H
+
+#include "jhash.h"
+#include "prefix.h"
+
+/* clang-format off */
+
+#if PIM_IPV == 4
+typedef struct in_addr pim_addr;
+
+#define PIM_ADDRSTRLEN INET_ADDRSTRLEN
+#define PIM_AF AF_INET
+#define PIM_AFI AFI_IP
+#define PIM_PROTO_REG IPPROTO_RAW
+#define PIM_IPADDR IPADDR_V4
+#define ipaddr_pim ipaddr_v4
+#define PIM_MAX_BITLEN IPV4_MAX_BITLEN
+#define PIM_AF_NAME "ip"
+#define PIM_AF_DBG "pim"
+#define GM_AF_DBG "igmp"
+#define PIM_MROUTE_DBG "mroute"
+#define PIMREG "pimreg"
+#define GM "IGMP"
+#define IPPROTO_GM IPPROTO_IGMP
+
+#define PIM_ADDR_FUNCNAME(name) ipv4_##name
+
+union pimprefixptr {
+ prefixtype(pimprefixptr, struct prefix, p)
+ prefixtype(pimprefixptr, struct prefix_ipv4, p4)
+} TRANSPARENT_UNION;
+
+union pimprefixconstptr {
+ prefixtype(pimprefixconstptr, const struct prefix, p)
+ prefixtype(pimprefixconstptr, const struct prefix_ipv4, p4)
+} TRANSPARENT_UNION;
+
+#else
+typedef struct in6_addr pim_addr;
+
+#define PIM_ADDRSTRLEN INET6_ADDRSTRLEN
+#define PIM_AF AF_INET6
+#define PIM_AFI AFI_IP6
+#define PIM_PROTO_REG IPPROTO_PIM
+#define PIM_IPADDR IPADDR_V6
+#define ipaddr_pim ipaddr_v6
+#define PIM_MAX_BITLEN IPV6_MAX_BITLEN
+#define PIM_AF_NAME "ipv6"
+#define PIM_AF_DBG "pimv6"
+#define GM_AF_DBG "mld"
+#define PIM_MROUTE_DBG "mroute6"
+#define PIMREG "pim6reg"
+#define GM "MLD"
+#define IPPROTO_GM IPPROTO_ICMPV6
+
+#define PIM_ADDR_FUNCNAME(name) ipv6_##name
+
+union pimprefixptr {
+ prefixtype(pimprefixptr, struct prefix, p)
+ prefixtype(pimprefixptr, struct prefix_ipv6, p6)
+} TRANSPARENT_UNION;
+
+union pimprefixconstptr {
+ prefixtype(pimprefixconstptr, const struct prefix, p)
+ prefixtype(pimprefixconstptr, const struct prefix_ipv6, p6)
+} TRANSPARENT_UNION;
+#endif
+
+/* for assignment/initialization (C99 compound literal)
+ * named PIMADDR_ANY (not PIM_ADDR_ANY) to match INADDR_ANY
+ */
+#define PIMADDR_ANY (pim_addr){ }
+
+/* clang-format on */
+
+static inline bool pim_addr_is_any(pim_addr addr)
+{
+ pim_addr zero = {};
+
+ return memcmp(&addr, &zero, sizeof(zero)) == 0;
+}
+
+static inline int pim_addr_cmp(pim_addr a, pim_addr b)
+{
+ return memcmp(&a, &b, sizeof(a));
+}
+
+static inline void pim_addr_to_prefix(union pimprefixptr out, pim_addr in)
+{
+ out.p->family = PIM_AF;
+ out.p->prefixlen = PIM_MAX_BITLEN;
+ memcpy(out.p->u.val, &in, sizeof(in));
+}
+
+static inline pim_addr pim_addr_from_prefix(union pimprefixconstptr in)
+{
+ pim_addr ret;
+
+ if (in.p->family != PIM_AF)
+ return PIMADDR_ANY;
+
+ memcpy(&ret, in.p->u.val, sizeof(ret));
+ return ret;
+}
+
+static inline uint8_t pim_addr_scope(const pim_addr addr)
+{
+ return PIM_ADDR_FUNCNAME(mcast_scope)(&addr);
+}
+
+static inline bool pim_addr_nofwd(const pim_addr addr)
+{
+ return PIM_ADDR_FUNCNAME(mcast_nofwd)(&addr);
+}
+
+static inline bool pim_addr_ssm(const pim_addr addr)
+{
+ return PIM_ADDR_FUNCNAME(mcast_ssm)(&addr);
+}
+
+/* don't use this struct directly, use the pim_sgaddr typedef */
+struct _pim_sgaddr {
+ pim_addr grp;
+ pim_addr src;
+};
+
+typedef struct _pim_sgaddr pim_sgaddr;
+
+static inline int pim_sgaddr_cmp(const pim_sgaddr a, const pim_sgaddr b)
+{
+ /* memcmp over the entire struct = memcmp(grp) + memcmp(src) */
+ return memcmp(&a, &b, sizeof(a));
+}
+
+static inline uint32_t pim_sgaddr_hash(const pim_sgaddr a, uint32_t initval)
+{
+ return jhash2((uint32_t *)&a, sizeof(a) / sizeof(uint32_t), initval);
+}
+
+#ifdef _FRR_ATTRIBUTE_PRINTFRR
+#pragma FRR printfrr_ext "%pPA" (pim_addr *)
+#pragma FRR printfrr_ext "%pSG" (pim_sgaddr *)
+#endif
+
+/*
+ * There is no pim_sgaddr2str(). This is intentional. Instead, use:
+ * snprintfrr(buf, sizeof(buf), "%pPA", sgaddr)
+ * (and note that snprintfrr is implicit for vty_out and zlog_*)
+ */
+
+#endif /* _PIMD_PIM_ADDR_H */
diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c
new file mode 100644
index 0000000..86d9a74
--- /dev/null
+++ b/pimd/pim_assert.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "if.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_str.h"
+#include "pim_tlv.h"
+#include "pim_msg.h"
+#include "pim_pim.h"
+#include "pim_int.h"
+#include "pim_time.h"
+#include "pim_iface.h"
+#include "pim_hello.h"
+#include "pim_macro.h"
+#include "pim_assert.h"
+#include "pim_zebra.h"
+#include "pim_ifchannel.h"
+
+static int assert_action_a3(struct pim_ifchannel *ch);
+static void assert_action_a2(struct pim_ifchannel *ch,
+ struct pim_assert_metric winner_metric);
+static void assert_action_a6(struct pim_ifchannel *ch,
+ struct pim_assert_metric winner_metric);
+
+void pim_ifassert_winner_set(struct pim_ifchannel *ch,
+ enum pim_ifassert_state new_state, pim_addr winner,
+ struct pim_assert_metric winner_metric)
+{
+ struct pim_interface *pim_ifp = ch->interface->info;
+ int winner_changed = !!pim_addr_cmp(ch->ifassert_winner, winner);
+ int metric_changed = !pim_assert_metric_match(
+ &ch->ifassert_winner_metric, &winner_metric);
+ enum pim_rpf_result rpf_result;
+ struct pim_rpf old_rpf;
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ if (ch->ifassert_state != new_state) {
+ zlog_debug(
+ "%s: (S,G)=%s assert state changed from %s to %s on interface %s",
+ __func__, ch->sg_str,
+ pim_ifchannel_ifassert_name(ch->ifassert_state),
+ pim_ifchannel_ifassert_name(new_state),
+ ch->interface->name);
+ }
+
+ if (winner_changed)
+ zlog_debug(
+ "%s: (S,G)=%s assert winner changed from %pPAs to %pPAs on interface %s",
+ __func__, ch->sg_str, &ch->ifassert_winner,
+ &winner, ch->interface->name);
+ } /* PIM_DEBUG_PIM_EVENTS */
+
+ ch->ifassert_state = new_state;
+ ch->ifassert_winner = winner;
+ ch->ifassert_winner_metric = winner_metric;
+ ch->ifassert_creation = pim_time_monotonic_sec();
+
+ if (winner_changed || metric_changed) {
+ if (winner_changed) {
+ old_rpf.source_nexthop.interface =
+ ch->upstream->rpf.source_nexthop.interface;
+ rpf_result = pim_rpf_update(pim_ifp->pim, ch->upstream,
+ &old_rpf, __func__);
+ if (rpf_result == PIM_RPF_CHANGED ||
+ (rpf_result == PIM_RPF_FAILURE &&
+ old_rpf.source_nexthop.interface))
+ pim_zebra_upstream_rpf_changed(
+ pim_ifp->pim, ch->upstream, &old_rpf);
+ /* update kernel multicast forwarding cache (MFC) */
+ if (ch->upstream->rpf.source_nexthop.interface &&
+ ch->upstream->channel_oil)
+ pim_upstream_mroute_iif_update(
+ ch->upstream->channel_oil, __func__);
+ }
+ pim_upstream_update_join_desired(pim_ifp->pim, ch->upstream);
+ pim_ifchannel_update_could_assert(ch);
+ pim_ifchannel_update_assert_tracking_desired(ch);
+ }
+}
+
+static void on_trace(const char *label, struct interface *ifp, pim_addr src)
+{
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: from %pPAs on %s", label, &src, ifp->name);
+}
+
+static int preferred_assert(const struct pim_ifchannel *ch,
+ const struct pim_assert_metric *recv_metric)
+{
+ return pim_assert_metric_better(recv_metric,
+ &ch->ifassert_winner_metric);
+}
+
+static int acceptable_assert(const struct pim_assert_metric *my_metric,
+ const struct pim_assert_metric *recv_metric)
+{
+ return pim_assert_metric_better(recv_metric, my_metric);
+}
+
+static int inferior_assert(const struct pim_assert_metric *my_metric,
+ const struct pim_assert_metric *recv_metric)
+{
+ return pim_assert_metric_better(my_metric, recv_metric);
+}
+
+static int cancel_assert(const struct pim_assert_metric *recv_metric)
+{
+ return (recv_metric->metric_preference
+ == PIM_ASSERT_METRIC_PREFERENCE_MAX)
+ && (recv_metric->route_metric == PIM_ASSERT_ROUTE_METRIC_MAX);
+}
+
+static void if_could_assert_do_a1(const char *caller, struct pim_ifchannel *ch)
+{
+ if (PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags)) {
+ if (assert_action_a1(ch)) {
+ zlog_warn(
+ "%s: %s: (S,G)=%s assert_action_a1 failure on interface %s",
+ __func__, caller, ch->sg_str,
+ ch->interface->name);
+ /* log warning only */
+ }
+ }
+}
+
+static int dispatch_assert(struct interface *ifp, pim_addr source_addr,
+ pim_addr group_addr,
+ struct pim_assert_metric recv_metric)
+{
+ struct pim_ifchannel *ch;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source_addr;
+ sg.grp = group_addr;
+ ch = pim_ifchannel_add(ifp, &sg, 0, 0);
+
+ switch (ch->ifassert_state) {
+ case PIM_IFASSERT_NOINFO:
+ if (recv_metric.rpt_bit_flag) {
+ /* RPT bit set */
+ if_could_assert_do_a1(__func__, ch);
+ } else {
+ /* RPT bit clear */
+ if (inferior_assert(&ch->ifassert_my_metric,
+ &recv_metric)) {
+ if_could_assert_do_a1(__func__, ch);
+ } else if (acceptable_assert(&ch->ifassert_my_metric,
+ &recv_metric)) {
+ if (PIM_IF_FLAG_TEST_ASSERT_TRACKING_DESIRED(
+ ch->flags)) {
+ assert_action_a6(ch, recv_metric);
+ }
+ }
+ }
+ break;
+ case PIM_IFASSERT_I_AM_WINNER:
+ if (preferred_assert(ch, &recv_metric)) {
+ assert_action_a2(ch, recv_metric);
+ } else {
+ if (inferior_assert(&ch->ifassert_my_metric,
+ &recv_metric)) {
+ assert_action_a3(ch);
+ }
+ }
+ break;
+ case PIM_IFASSERT_I_AM_LOSER:
+ if (!pim_addr_cmp(recv_metric.ip_address,
+ ch->ifassert_winner)) {
+ /* Assert from current winner */
+
+ if (cancel_assert(&recv_metric)) {
+ assert_action_a5(ch);
+ } else {
+ if (inferior_assert(&ch->ifassert_my_metric,
+ &recv_metric)) {
+ assert_action_a5(ch);
+ } else if (acceptable_assert(
+ &ch->ifassert_my_metric,
+ &recv_metric)) {
+ if (!recv_metric.rpt_bit_flag) {
+ assert_action_a2(ch,
+ recv_metric);
+ }
+ }
+ }
+ } else if (preferred_assert(ch, &recv_metric)) {
+ assert_action_a2(ch, recv_metric);
+ }
+ break;
+ default: {
+ zlog_warn(
+ "%s: (S,G)=%s invalid assert state %d on interface %s",
+ __func__, ch->sg_str, ch->ifassert_state, ifp->name);
+ }
+ return -2;
+ }
+
+ return 0;
+}
+
+int pim_assert_recv(struct interface *ifp, struct pim_neighbor *neigh,
+ pim_addr src_addr, uint8_t *buf, int buf_size)
+{
+ pim_sgaddr sg;
+ pim_addr msg_source_addr;
+ bool wrong_af = false;
+ struct pim_assert_metric msg_metric;
+ int offset;
+ uint8_t *curr;
+ int curr_size;
+ struct pim_interface *pim_ifp = NULL;
+
+ on_trace(__func__, ifp, src_addr);
+
+ curr = buf;
+ curr_size = buf_size;
+
+ /*
+ Parse assert group addr
+ */
+ memset(&sg, 0, sizeof(sg));
+ offset = pim_parse_addr_group(&sg, curr, curr_size);
+ if (offset < 1) {
+ zlog_warn(
+ "%s: pim_parse_addr_group() failure: from %pPAs on %s",
+ __func__, &src_addr, ifp->name);
+ return -1;
+ }
+ curr += offset;
+ curr_size -= offset;
+
+ /*
+ Parse assert source addr
+ */
+ offset = pim_parse_addr_ucast(&msg_source_addr, curr, curr_size,
+ &wrong_af);
+ if (offset < 1 || wrong_af) {
+ zlog_warn(
+ "%s: pim_parse_addr_ucast() failure: from %pPAs on %s",
+ __func__, &src_addr, ifp->name);
+ return -2;
+ }
+ curr += offset;
+ curr_size -= offset;
+
+ if (curr_size < 8) {
+ zlog_warn(
+ "%s: preference/metric size is less than 8 bytes: size=%d from %pPAs on interface %s",
+ __func__, curr_size, &src_addr, ifp->name);
+ return -3;
+ }
+
+ /*
+ Parse assert metric preference
+ */
+
+ msg_metric.metric_preference = pim_read_uint32_host(curr);
+
+ msg_metric.rpt_bit_flag = msg_metric.metric_preference
+ & 0x80000000; /* save highest bit */
+ msg_metric.metric_preference &= ~0x80000000; /* clear highest bit */
+
+ curr += 4;
+
+ /*
+ Parse assert route metric
+ */
+
+ msg_metric.route_metric = pim_read_uint32_host(curr);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: from %pPAs on %s: (S,G)=(%pPAs,%pPAs) pref=%u metric=%u rpt_bit=%u",
+ __func__, &src_addr, ifp->name, &msg_source_addr,
+ &sg.grp, msg_metric.metric_preference,
+ msg_metric.route_metric,
+ PIM_FORCE_BOOLEAN(msg_metric.rpt_bit_flag));
+
+ msg_metric.ip_address = src_addr;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+ ++pim_ifp->pim_ifstat_assert_recv;
+
+ return dispatch_assert(ifp, msg_source_addr, sg.grp, msg_metric);
+}
+
+/*
+ RFC 4601: 4.6.3. Assert Metrics
+
+ Assert metrics are defined as:
+
+ When comparing assert_metrics, the rpt_bit_flag, metric_preference,
+ and route_metric field are compared in order, where the first lower
+ value wins. If all fields are equal, the primary IP address of the
+ router that sourced the Assert message is used as a tie-breaker,
+ with the highest IP address winning.
+*/
+int pim_assert_metric_better(const struct pim_assert_metric *m1,
+ const struct pim_assert_metric *m2)
+{
+ if (m1->rpt_bit_flag < m2->rpt_bit_flag)
+ return 1;
+ if (m1->rpt_bit_flag > m2->rpt_bit_flag)
+ return 0;
+
+ if (m1->metric_preference < m2->metric_preference)
+ return 1;
+ if (m1->metric_preference > m2->metric_preference)
+ return 0;
+
+ if (m1->route_metric < m2->route_metric)
+ return 1;
+ if (m1->route_metric > m2->route_metric)
+ return 0;
+
+ return pim_addr_cmp(m1->ip_address, m2->ip_address) > 0;
+}
+
+int pim_assert_metric_match(const struct pim_assert_metric *m1,
+ const struct pim_assert_metric *m2)
+{
+ if (m1->rpt_bit_flag != m2->rpt_bit_flag)
+ return 0;
+ if (m1->metric_preference != m2->metric_preference)
+ return 0;
+ if (m1->route_metric != m2->route_metric)
+ return 0;
+
+ return !pim_addr_cmp(m1->ip_address, m2->ip_address);
+}
+
+int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
+ pim_addr group_addr, pim_addr source_addr,
+ uint32_t metric_preference, uint32_t route_metric,
+ uint32_t rpt_bit_flag)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ uint8_t *buf_pastend = pim_msg + buf_size;
+ uint8_t *pim_msg_curr;
+ int pim_msg_size;
+ int remain;
+
+ pim_msg_curr =
+ pim_msg + PIM_MSG_HEADER_LEN; /* skip room for pim header */
+
+ /* Encode group */
+ remain = buf_pastend - pim_msg_curr;
+ pim_msg_curr = pim_msg_addr_encode_group(pim_msg_curr, group_addr);
+ if (!pim_msg_curr) {
+ zlog_warn(
+ "%s: failure encoding group address %pPA: space left=%d",
+ __func__, &group_addr, remain);
+ return -1;
+ }
+
+ /* Encode source */
+ remain = buf_pastend - pim_msg_curr;
+ pim_msg_curr = pim_msg_addr_encode_ucast(pim_msg_curr, source_addr);
+ if (!pim_msg_curr) {
+ zlog_warn(
+ "%s: failure encoding source address %pPA: space left=%d",
+ __func__, &source_addr, remain);
+ return -2;
+ }
+
+ /* Metric preference */
+ pim_write_uint32(pim_msg_curr,
+ rpt_bit_flag ? metric_preference | 0x80000000
+ : metric_preference);
+ pim_msg_curr += 4;
+
+ /* Route metric */
+ pim_write_uint32(pim_msg_curr, route_metric);
+ pim_msg_curr += 4;
+
+ /*
+ Add PIM header
+ */
+ pim_msg_size = pim_msg_curr - pim_msg;
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ PIM_MSG_TYPE_ASSERT, false);
+
+ return pim_msg_size;
+}
+
+static int pim_assert_do(struct pim_ifchannel *ch,
+ struct pim_assert_metric metric)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ uint8_t pim_msg[1000];
+ int pim_msg_size;
+
+ ifp = ch->interface;
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: channel%s has no associated interface!",
+ __func__, ch->sg_str);
+ return -1;
+ }
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: channel %s pim not enabled on interface: %s",
+ __func__, ch->sg_str, ifp->name);
+ return -1;
+ }
+
+ pim_msg_size =
+ pim_assert_build_msg(pim_msg, sizeof(pim_msg), ifp, ch->sg.grp,
+ ch->sg.src, metric.metric_preference,
+ metric.route_metric, metric.rpt_bit_flag);
+ if (pim_msg_size < 1) {
+ zlog_warn(
+ "%s: failure building PIM assert message: msg_size=%d",
+ __func__, pim_msg_size);
+ return -2;
+ }
+
+ /*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Thus, if a router needs to send a Join/Prune or Assert message on
+ an interface on which it has not yet sent a Hello message with the
+ currently configured IP address, then it MUST immediately send the
+ relevant Hello message without waiting for the Hello Timer to
+ expire, followed by the Join/Prune or Assert message.
+ */
+ pim_hello_require(ifp);
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: to %s: (S,G)=%s pref=%u metric=%u rpt_bit=%u",
+ __func__, ifp->name, ch->sg_str,
+ metric.metric_preference, metric.route_metric,
+ PIM_FORCE_BOOLEAN(metric.rpt_bit_flag));
+ }
+ if (!pim_ifp->pim_passive_enable)
+ ++pim_ifp->pim_ifstat_assert_send;
+
+ if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ ifp)) {
+ zlog_warn("%s: could not send PIM message on interface %s",
+ __func__, ifp->name);
+ return -3;
+ }
+
+ return 0;
+}
+
+int pim_assert_send(struct pim_ifchannel *ch)
+{
+ return pim_assert_do(ch, ch->ifassert_my_metric);
+}
+
+/*
+ RFC 4601: 4.6.4. AssertCancel Messages
+
+ An AssertCancel(S,G) is an infinite metric assert with the RPT bit
+ set that names S as the source.
+ */
+static int pim_assert_cancel(struct pim_ifchannel *ch)
+{
+ struct pim_assert_metric metric;
+
+ metric.rpt_bit_flag = 0;
+ metric.metric_preference = PIM_ASSERT_METRIC_PREFERENCE_MAX;
+ metric.route_metric = PIM_ASSERT_ROUTE_METRIC_MAX;
+ metric.ip_address = ch->sg.src;
+
+ return pim_assert_do(ch, metric);
+}
+
+static void on_assert_timer(struct event *t)
+{
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+
+ ch = EVENT_ARG(t);
+
+ ifp = ch->interface;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: (S,G)=%s timer expired on interface %s",
+ __func__, ch->sg_str, ifp->name);
+ }
+
+ ch->t_ifassert_timer = NULL;
+
+ switch (ch->ifassert_state) {
+ case PIM_IFASSERT_I_AM_WINNER:
+ assert_action_a3(ch);
+ break;
+ case PIM_IFASSERT_I_AM_LOSER:
+ assert_action_a5(ch);
+ break;
+ case PIM_IFASSERT_NOINFO: {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: (S,G)=%s invalid assert state %d on interface %s",
+ __func__, ch->sg_str, ch->ifassert_state,
+ ifp->name);
+ }
+ }
+}
+
+static void assert_timer_off(struct pim_ifchannel *ch)
+{
+ if (PIM_DEBUG_PIM_TRACE) {
+ if (ch->t_ifassert_timer) {
+ zlog_debug(
+ "%s: (S,G)=%s cancelling timer on interface %s",
+ __func__, ch->sg_str, ch->interface->name);
+ }
+ }
+ EVENT_OFF(ch->t_ifassert_timer);
+}
+
+static void pim_assert_timer_set(struct pim_ifchannel *ch, int interval)
+{
+ assert_timer_off(ch);
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: (S,G)=%s starting %u sec timer on interface %s",
+ __func__, ch->sg_str, interval, ch->interface->name);
+ }
+
+ event_add_timer(router->master, on_assert_timer, ch, interval,
+ &ch->t_ifassert_timer);
+}
+
+static void pim_assert_timer_reset(struct pim_ifchannel *ch)
+{
+ pim_assert_timer_set(ch,
+ PIM_ASSERT_TIME - PIM_ASSERT_OVERRIDE_INTERVAL);
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A1: Send Assert(S,G).
+ Set Assert Timer to (Assert_Time - Assert_Override_Interval).
+ Store self as AssertWinner(S,G,I).
+ Store spt_assert_metric(S,I) as AssertWinnerMetric(S,G,I).
+*/
+int assert_action_a1(struct pim_ifchannel *ch)
+{
+ struct interface *ifp = ch->interface;
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: (S,G)=%s multicast not enabled on interface %s",
+ __func__, ch->sg_str, ifp->name);
+ return -1; /* must return since pim_ifp is used below */
+ }
+
+ /* Switch to I_AM_WINNER before performing action_a3 below */
+ pim_ifassert_winner_set(
+ ch, PIM_IFASSERT_I_AM_WINNER, pim_ifp->primary_address,
+ pim_macro_spt_assert_metric(&ch->upstream->rpf,
+ pim_ifp->primary_address));
+
+ if (assert_action_a3(ch)) {
+ zlog_warn(
+ "%s: (S,G)=%s assert_action_a3 failure on interface %s",
+ __func__, ch->sg_str, ifp->name);
+ /* warning only */
+ }
+
+ if (ch->ifassert_state != PIM_IFASSERT_I_AM_WINNER) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s not in expected PIM_IFASSERT_I_AM_WINNER state",
+ __func__, ch->sg_str);
+ }
+
+ return 0;
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A2: Store new assert winner as AssertWinner(S,G,I) and assert
+ winner metric as AssertWinnerMetric(S,G,I).
+ Set Assert Timer to Assert_Time.
+*/
+static void assert_action_a2(struct pim_ifchannel *ch,
+ struct pim_assert_metric winner_metric)
+{
+ pim_ifassert_winner_set(ch, PIM_IFASSERT_I_AM_LOSER,
+ winner_metric.ip_address, winner_metric);
+
+ pim_assert_timer_set(ch, PIM_ASSERT_TIME);
+
+ if (ch->ifassert_state != PIM_IFASSERT_I_AM_LOSER) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s not in expected PIM_IFASSERT_I_AM_LOSER state",
+ __func__, ch->sg_str);
+ }
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A3: Send Assert(S,G).
+ Set Assert Timer to (Assert_Time - Assert_Override_Interval).
+*/
+static int assert_action_a3(struct pim_ifchannel *ch)
+{
+ if (ch->ifassert_state != PIM_IFASSERT_I_AM_WINNER) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s expected to be in PIM_IFASSERT_I_AM_WINNER state",
+ __func__, ch->sg_str);
+ return -1;
+ }
+
+ pim_assert_timer_reset(ch);
+
+ if (pim_assert_send(ch)) {
+ zlog_warn("%s: (S,G)=%s failure sending assert on interface %s",
+ __func__, ch->sg_str, ch->interface->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A4: Send AssertCancel(S,G).
+ Delete assert info (AssertWinner(S,G,I) and
+ AssertWinnerMetric(S,G,I) will then return their default
+ values).
+*/
+void assert_action_a4(struct pim_ifchannel *ch)
+{
+ if (pim_assert_cancel(ch)) {
+ zlog_warn("%s: failure sending AssertCancel%s on interface %s",
+ __func__, ch->sg_str, ch->interface->name);
+ /* log warning only */
+ }
+
+ assert_action_a5(ch);
+
+ if (ch->ifassert_state != PIM_IFASSERT_NOINFO) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s not in PIM_IFASSERT_NOINFO state as expected",
+ __func__, ch->sg_str);
+ }
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A5: Delete assert info (AssertWinner(S,G,I) and
+ AssertWinnerMetric(S,G,I) will then return their default values).
+*/
+void assert_action_a5(struct pim_ifchannel *ch)
+{
+ reset_ifassert_state(ch);
+ if (ch->ifassert_state != PIM_IFASSERT_NOINFO) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s not in PIM_IFSSERT_NOINFO state as expected",
+ __func__, ch->sg_str);
+ }
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ (S,G) Assert State machine Actions
+
+ A6: Store new assert winner as AssertWinner(S,G,I) and assert
+ winner metric as AssertWinnerMetric(S,G,I).
+ Set Assert Timer to Assert_Time.
+ If (I is RPF_interface(S)) AND (UpstreamJPState(S,G) == true)
+ set SPTbit(S,G) to true.
+*/
+static void assert_action_a6(struct pim_ifchannel *ch,
+ struct pim_assert_metric winner_metric)
+{
+ assert_action_a2(ch, winner_metric);
+
+ /*
+ If (I is RPF_interface(S)) AND (UpstreamJPState(S,G) == true) set
+ SPTbit(S,G) to true.
+ */
+ if (ch->upstream->rpf.source_nexthop.interface == ch->interface)
+ if (ch->upstream->join_state == PIM_UPSTREAM_JOINED)
+ ch->upstream->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+
+ if (ch->ifassert_state != PIM_IFASSERT_I_AM_LOSER) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_warn(
+ "%s: channel%s not in PIM_IFASSERT_I_AM_LOSER state as expected",
+ __func__, ch->sg_str);
+ }
+}
diff --git a/pimd/pim_assert.h b/pimd/pim_assert.h
new file mode 100644
index 0000000..41f32ea
--- /dev/null
+++ b/pimd/pim_assert.h
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_ASSERT_H
+#define PIM_ASSERT_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+struct pim_ifchannel;
+struct pim_neighbor;
+
+enum pim_ifassert_state {
+ PIM_IFASSERT_NOINFO,
+ PIM_IFASSERT_I_AM_WINNER,
+ PIM_IFASSERT_I_AM_LOSER
+};
+
+struct pim_assert_metric {
+ uint32_t rpt_bit_flag;
+ uint32_t metric_preference;
+ uint32_t route_metric;
+ pim_addr ip_address; /* neighbor router that sourced the Assert
+ message */
+};
+
+/*
+ RFC 4601: 4.11. Timer Values
+
+ Note that for historical reasons, the Assert message lacks a
+ Holdtime field. Thus, changing the Assert Time from the default
+ value is not recommended.
+ */
+#define PIM_ASSERT_OVERRIDE_INTERVAL (3) /* seconds */
+#define PIM_ASSERT_TIME (180) /* seconds */
+
+#define PIM_ASSERT_METRIC_PREFERENCE_MAX (0xFFFFFFFF)
+#define PIM_ASSERT_ROUTE_METRIC_MAX (0xFFFFFFFF)
+
+void pim_ifassert_winner_set(struct pim_ifchannel *ch,
+ enum pim_ifassert_state new_state, pim_addr winner,
+ struct pim_assert_metric winner_metric);
+
+int pim_assert_recv(struct interface *ifp, struct pim_neighbor *neigh,
+ pim_addr src_addr, uint8_t *buf, int buf_size);
+
+int pim_assert_metric_better(const struct pim_assert_metric *m1,
+ const struct pim_assert_metric *m2);
+int pim_assert_metric_match(const struct pim_assert_metric *m1,
+ const struct pim_assert_metric *m2);
+
+int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
+ pim_addr group_addr, pim_addr source_addr,
+ uint32_t metric_preference, uint32_t route_metric,
+ uint32_t rpt_bit_flag);
+
+int pim_assert_send(struct pim_ifchannel *ch);
+
+int assert_action_a1(struct pim_ifchannel *ch);
+void assert_action_a4(struct pim_ifchannel *ch);
+void assert_action_a5(struct pim_ifchannel *ch);
+
+#endif /* PIM_ASSERT_H */
diff --git a/pimd/pim_bfd.c b/pimd/pim_bfd.c
new file mode 100644
index 0000000..43d9f08
--- /dev/null
+++ b/pimd/pim_bfd.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pim_bfd.c: PIM BFD handling routines
+ *
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Chirag Shah
+ */
+
+#include <zebra.h>
+
+#include "lib/json.h"
+#include "command.h"
+#include "vty.h"
+#include "zclient.h"
+
+#include "pim_instance.h"
+#include "pim_neighbor.h"
+#include "pim_vty.h"
+#include "pim_iface.h"
+#include "pim_bfd.h"
+#include "bfd.h"
+#include "pimd.h"
+#include "pim_zebra.h"
+
+/*
+ * pim_bfd_write_config - Write the interface BFD configuration.
+ */
+void pim_bfd_write_config(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->bfd_config.enabled)
+ return;
+
+#if HAVE_BFDD == 0
+ if (pim_ifp->bfd_config.detection_multiplier != BFD_DEF_DETECT_MULT
+ || pim_ifp->bfd_config.min_rx != BFD_DEF_MIN_RX
+ || pim_ifp->bfd_config.min_tx != BFD_DEF_MIN_TX)
+ vty_out(vty, " " PIM_AF_NAME " pim bfd %d %d %d\n",
+ pim_ifp->bfd_config.detection_multiplier,
+ pim_ifp->bfd_config.min_rx, pim_ifp->bfd_config.min_tx);
+ else
+#endif /* ! HAVE_BFDD */
+ vty_out(vty, " " PIM_AF_NAME " pim bfd\n");
+
+ if (pim_ifp->bfd_config.profile)
+ vty_out(vty, " " PIM_AF_NAME " pim bfd profile %s\n",
+ pim_ifp->bfd_config.profile);
+}
+
+static void pim_neighbor_bfd_cb(struct bfd_session_params *bsp,
+ const struct bfd_session_status *bss, void *arg)
+{
+ struct pim_neighbor *nbr = arg;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: status %s old_status %s", __func__,
+ bfd_get_status_str(bss->state),
+ bfd_get_status_str(bss->previous_state));
+ }
+
+ if (bss->state == BFD_STATUS_DOWN
+ && bss->previous_state == BFD_STATUS_UP)
+ pim_neighbor_delete(nbr->interface, nbr, "BFD Session Expired");
+}
+
+/*
+ * pim_bfd_info_nbr_create - Create/update BFD information for a neighbor.
+ */
+void pim_bfd_info_nbr_create(struct pim_interface *pim_ifp,
+ struct pim_neighbor *neigh)
+{
+ /* Check if Pim Interface BFD is enabled */
+ if (!pim_ifp || !pim_ifp->bfd_config.enabled)
+ return;
+
+ if (neigh->bfd_session == NULL)
+ neigh->bfd_session = bfd_sess_new(pim_neighbor_bfd_cb, neigh);
+
+ bfd_sess_set_timers(
+ neigh->bfd_session, pim_ifp->bfd_config.detection_multiplier,
+ pim_ifp->bfd_config.min_rx, pim_ifp->bfd_config.min_tx);
+#if PIM_IPV == 4
+ bfd_sess_set_ipv4_addrs(neigh->bfd_session, NULL, &neigh->source_addr);
+#else
+ bfd_sess_set_ipv6_addrs(neigh->bfd_session, NULL, &neigh->source_addr);
+#endif
+ bfd_sess_set_interface(neigh->bfd_session, neigh->interface->name);
+ bfd_sess_set_vrf(neigh->bfd_session, neigh->interface->vrf->vrf_id);
+ bfd_sess_set_profile(neigh->bfd_session, pim_ifp->bfd_config.profile);
+ bfd_sess_install(neigh->bfd_session);
+}
+
+/*
+ * pim_bfd_reg_dereg_all_nbr - Register/Deregister all neighbors associated
+ * with a interface with BFD through
+ * zebra for starting/stopping the monitoring of
+ * the neighbor rechahability.
+ */
+void pim_bfd_reg_dereg_all_nbr(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = NULL;
+ struct listnode *node = NULL;
+ struct pim_neighbor *neigh = NULL;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, node, neigh)) {
+ if (pim_ifp->bfd_config.enabled)
+ pim_bfd_info_nbr_create(pim_ifp, neigh);
+ else
+ bfd_sess_free(&neigh->bfd_session);
+ }
+}
+
+void pim_bfd_init(void)
+{
+ bfd_protocol_integration_init(pim_zebra_zclient_get(), router->master);
+}
diff --git a/pimd/pim_bfd.h b/pimd/pim_bfd.h
new file mode 100644
index 0000000..3d8e29a
--- /dev/null
+++ b/pimd/pim_bfd.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pim_bfd.h: PIM BFD definitions and structures
+ *
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Chirag Shah
+ */
+
+#ifndef PIM_BFD_H
+#define PIM_BFD_H
+
+#include "if.h"
+
+/**
+ * Initializes PIM BFD integration code.
+ */
+void pim_bfd_init(void);
+
+/**
+ * Write configuration to `show running-config`.
+ *
+ * \param vty the vty output pointer.
+ * \param ifp the interface pointer that has the configuration.
+ */
+void pim_bfd_write_config(struct vty *vty, struct interface *ifp);
+
+/**
+ * Enables or disables all peers BFD sessions.
+ *
+ * \param ifp interface pointer.
+ * \param enable session state to set.
+ */
+void pim_bfd_reg_dereg_all_nbr(struct interface *ifp);
+
+/**
+ * Create and configure peer BFD session if it does not exist. It will use
+ * the interface configured parameters as the peer configuration.
+ *
+ * \param pim_ifp the interface configuration pointer.
+ * \param neigh the neighbor configuration pointer.
+ */
+void pim_bfd_info_nbr_create(struct pim_interface *pim_ifp,
+ struct pim_neighbor *neigh);
+
+#endif /* _PIM_BFD_H */
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
new file mode 100644
index 0000000..df91619
--- /dev/null
+++ b/pimd/pim_bsm.c
@@ -0,0 +1,1453 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pim_bsm.c: PIM BSM handling routines
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "if.h"
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_instance.h"
+#include "pim_neighbor.h"
+#include "pim_rpf.h"
+#include "pim_hello.h"
+#include "pim_pim.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_time.h"
+#include "pim_zebra.h"
+#include "pim_util.h"
+
+/* Functions forward declaration */
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time);
+
+/* Memory Types */
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info");
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_INFO, "PIM BSR advertised RP info");
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_FRAG, "PIM BSM fragment");
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet");
+
+/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
+#define MAX_IP_HDR_LEN 24
+
+/* pim_bsm_write_config - Write the interface pim bsm configuration.*/
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp) {
+ if (!pim_ifp->bsm_enable)
+ vty_out(vty, " no " PIM_AF_NAME " pim bsm\n");
+ if (!pim_ifp->ucast_bsm_accept)
+ vty_out(vty, " no " PIM_AF_NAME " pim unicast-bsm\n");
+ }
+}
+
+static void pim_bsm_rpinfo_free(struct bsm_rpinfo *bsrp_info)
+{
+ EVENT_OFF(bsrp_info->g2rp_timer);
+ XFREE(MTYPE_PIM_BSRP_INFO, bsrp_info);
+}
+
+static void pim_bsm_rpinfos_free(struct bsm_rpinfos_head *head)
+{
+ struct bsm_rpinfo *bsrp_info;
+
+ while ((bsrp_info = bsm_rpinfos_pop(head)))
+ pim_bsm_rpinfo_free(bsrp_info);
+}
+
+static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
+{
+ pim_bsm_rpinfos_free(bsgrp_node->bsrp_list);
+ pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
+ XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
+}
+
+static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
+{
+ struct route_node *rn;
+
+ rn = route_node_lookup(rt, grp);
+ if (rn) {
+ rn->info = NULL;
+ route_unlock_node(rn);
+ route_unlock_node(rn);
+ }
+}
+
+static void pim_bsm_frag_free(struct bsm_frag *bsfrag)
+{
+ XFREE(MTYPE_PIM_BSM_FRAG, bsfrag);
+}
+
+static void pim_bsm_frags_free(struct bsm_scope *scope)
+{
+ struct bsm_frag *bsfrag;
+
+ while ((bsfrag = bsm_frags_pop(scope->bsm_frags)))
+ pim_bsm_frag_free(bsfrag);
+}
+
+int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,
+ const struct bsm_rpinfo *node2)
+{
+ /* RP election Algo :
+ * Step-1 : Loweset Rp priority will have higher precedance.
+ * Step-2 : If priority same then higher hash val will have
+ * higher precedance.
+ * Step-3 : If Hash val is same then highest rp address will
+ * become elected RP.
+ */
+ if (node1->rp_prio < node2->rp_prio)
+ return -1;
+ if (node1->rp_prio > node2->rp_prio)
+ return 1;
+ if (node1->hash < node2->hash)
+ return 1;
+ if (node1->hash > node2->hash)
+ return -1;
+ return pim_addr_cmp(node2->rp_address, node1->rp_address);
+}
+
+static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_get(rt, grp);
+ if (!rn) {
+ zlog_warn("%s: route node creation failed", __func__);
+ return NULL;
+ }
+ bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
+
+ rn->info = bsgrp;
+ bsm_rpinfos_init(bsgrp->bsrp_list);
+ bsm_rpinfos_init(bsgrp->partial_bsrp_list);
+
+ prefix_copy(&bsgrp->group, grp);
+ return bsgrp;
+}
+
+static void pim_on_bs_timer(struct event *t)
+{
+ struct route_node *rn;
+ struct bsm_scope *scope;
+ struct bsgrp_node *bsgrp_node;
+ struct bsm_rpinfo *bsrp;
+
+ scope = EVENT_ARG(t);
+ EVENT_OFF(scope->bs_timer);
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Bootstrap Timer expired for scope: %d",
+ __func__, scope->sz_id);
+
+ pim_nht_bsr_del(scope->pim, scope->current_bsr);
+ /* Reset scope zone data */
+ scope->state = ACCEPT_ANY;
+ scope->current_bsr = PIMADDR_ANY;
+ scope->current_bsr_prio = 0;
+ scope->current_bsr_first_ts = 0;
+ scope->current_bsr_last_ts = 0;
+ scope->bsm_frag_tag = 0;
+ pim_bsm_frags_free(scope);
+
+ for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
+
+ bsgrp_node = (struct bsgrp_node *)rn->info;
+ if (!bsgrp_node) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: bsgrp_node is null", __func__);
+ continue;
+ }
+ /* Give grace time for rp to continue for another hold time */
+ bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
+ if (bsrp)
+ pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
+
+ /* clear pending list */
+ pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
+ bsgrp_node->pend_rp_cnt = 0;
+ }
+}
+
+static void pim_bs_timer_stop(struct bsm_scope *scope)
+{
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : BS timer being stopped of sz: %d", __func__,
+ scope->sz_id);
+ EVENT_OFF(scope->bs_timer);
+}
+
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
+{
+ if (!scope) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid scope(NULL).", __func__);
+ return;
+ }
+ EVENT_OFF(scope->bs_timer);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : starting bs timer for scope %d with timeout %d secs",
+ __func__, scope->sz_id, bs_timeout);
+ event_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+ &scope->bs_timer);
+}
+
+static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
+{
+ pim_bs_timer_start(scope, bs_timeout);
+}
+
+void pim_bsm_proc_init(struct pim_instance *pim)
+{
+ memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
+
+ pim->global_scope.sz_id = PIM_GBL_SZ_ID;
+ pim->global_scope.bsrp_table = route_table_init();
+ pim->global_scope.accept_nofwd_bsm = true;
+ pim->global_scope.state = NO_INFO;
+ pim->global_scope.pim = pim;
+ bsm_frags_init(pim->global_scope.bsm_frags);
+ pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
+}
+
+void pim_bsm_proc_free(struct pim_instance *pim)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ pim_bs_timer_stop(&pim->global_scope);
+ pim_bsm_frags_free(&pim->global_scope);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = rn->info;
+ if (!bsgrp)
+ continue;
+ pim_free_bsgrp_data(bsgrp);
+ }
+
+ route_table_finish(pim->global_scope.bsrp_table);
+}
+
+static bool is_hold_time_elapsed(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->elapse_time < bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static void pim_on_g2rp_timer(struct event *t)
+{
+ struct bsm_rpinfo *bsrp;
+ struct bsm_rpinfo *bsrp_node;
+ struct bsgrp_node *bsgrp_node;
+ struct pim_instance *pim;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ uint16_t elapse;
+ pim_addr bsrp_addr;
+
+ bsrp = EVENT_ARG(t);
+ EVENT_OFF(bsrp->g2rp_timer);
+ bsgrp_node = bsrp->bsgrp_node;
+
+ /* elapse time is the hold time of expired node */
+ elapse = bsrp->rp_holdtime;
+ bsrp_addr = bsrp->rp_address;
+
+ /* update elapse for all bsrp nodes */
+ frr_each_safe (bsm_rpinfos, bsgrp_node->bsrp_list, bsrp_node) {
+ bsrp_node->elapse_time += elapse;
+
+ if (is_hold_time_elapsed(bsrp_node)) {
+ bsm_rpinfos_del(bsgrp_node->bsrp_list, bsrp_node);
+ pim_bsm_rpinfo_free(bsrp_node);
+ }
+ }
+
+ /* Get the next elected rp node */
+ bsrp = bsm_rpinfos_first(bsgrp_node->bsrp_list);
+ pim = bsgrp_node->scope->pim;
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (!rn) {
+ zlog_warn("%s: Route node doesn't exist", __func__);
+ return;
+ }
+
+ rp_info = (struct rp_info *)rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ return;
+ }
+
+ if (rp_info->rp_src != RP_SRC_STATIC) {
+ /* If new rp available, change it else delete the existing */
+ if (bsrp) {
+ pim_g2rp_timer_start(
+ bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
+ pim_rp_change(pim, bsrp->rp_address, bsgrp_node->group,
+ RP_SRC_BSR);
+ } else {
+ pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
+ RP_SRC_BSR);
+ }
+ }
+
+ if (!bsm_rpinfos_count(bsgrp_node->bsrp_list)
+ && !bsm_rpinfos_count(bsgrp_node->partial_bsrp_list)) {
+ pim_free_bsgrp_node(pim->global_scope.bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ }
+}
+
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
+{
+ if (!bsrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid brsp(NULL).", __func__);
+ return;
+ }
+ EVENT_OFF(bsrp->g2rp_timer);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : starting g2rp timer for grp: %pFX - rp: %pPAs with timeout %d secs(Actual Hold time : %d secs)",
+ __func__, &bsrp->bsgrp_node->group, &bsrp->rp_address,
+ hold_time, bsrp->rp_holdtime);
+
+ event_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+ &bsrp->g2rp_timer);
+}
+
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time)
+{
+ pim_g2rp_timer_start(bsrp, hold_time);
+}
+
+static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
+{
+ if (!bsrp)
+ return;
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pPAs",
+ __func__, &bsrp->bsgrp_node->group,
+ &bsrp->rp_address);
+
+ EVENT_OFF(bsrp->g2rp_timer);
+}
+
+static bool is_hold_time_zero(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
+{
+ struct bsm_rpinfo *active;
+ struct bsm_rpinfo *pend;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ struct pim_instance *pim;
+ struct rp_info *rp_all;
+ struct prefix group_all;
+ bool had_rp_node = true;
+
+ pim = bsgrp_node->scope->pim;
+ active = bsm_rpinfos_first(bsgrp_node->bsrp_list);
+
+ /* Remove nodes with hold time 0 & check if list still has a head */
+ frr_each_safe (bsm_rpinfos, bsgrp_node->partial_bsrp_list, pend) {
+ if (is_hold_time_zero(pend)) {
+ bsm_rpinfos_del(bsgrp_node->partial_bsrp_list, pend);
+ pim_bsm_rpinfo_free(pend);
+ }
+ }
+
+ pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
+
+ if (!pim_get_all_mcast_group(&group_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &group_all);
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (pend)
+ pim_g2rp_timer_start(pend, pend->rp_holdtime);
+
+ /* if rp node doesn't exist or exist but not configured(rp_all),
+ * install the rp from head(if exists) of partial list. List is
+ * is sorted such that head is the elected RP for the group.
+ */
+ if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group) &&
+ pim_rpf_addr_is_inaddr_any(&rp_all->rp))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist", __func__);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ } else {
+ rp_info = (struct rp_info *)rn->info;
+ if (!rp_info) {
+ route_unlock_node(rn);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address,
+ bsgrp_node->group, NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ }
+ }
+
+ /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
+ if ((!had_rp_node) && (!pend)) {
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
+ /* This means we searched and got rp node, needs unlock */
+ route_unlock_node(rn);
+
+ if (active && pend) {
+ if (pim_addr_cmp(active->rp_address, pend->rp_address))
+ pim_rp_change(pim, pend->rp_address,
+ bsgrp_node->group, RP_SRC_BSR);
+ }
+
+ /* Possible when the first BSM has group with 0 rp count */
+ if ((!active) && (!pend)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug(
+ "%s: Both bsrp and partial list are empty",
+ __func__);
+ }
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ /* Possible when a group with 0 rp count received in BSM */
+ if ((active) && (!pend)) {
+ pim_rp_del(pim, active->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s:Pend List is null,del grp node",
+ __func__);
+ }
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
+ /* We need to unlock rn this case */
+ route_unlock_node(rn);
+ /* there is a chance that static rp exist and bsrp cleaned
+ * so clean bsgrp node if pending list empty
+ */
+ if (!pend) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: Partial list is empty, static rp exists",
+ __func__);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ /* swap the list & delete all nodes in partial list (old bsrp_list)
+ * before swap
+ * active is head of bsrp list
+ * pend is head of partial list
+ * After swap
+ * active is head of partial list
+ * pend is head of bsrp list
+ * So check appriate head after swap and clean the new partial list
+ */
+ bsm_rpinfos_swap_all(bsgrp_node->bsrp_list,
+ bsgrp_node->partial_bsrp_list);
+
+ if (active)
+ pim_g2rp_timer_stop(active);
+ pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
+}
+
+static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
+ uint32_t bsr_prio)
+{
+ if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))
+ return true;
+
+ if (bsr_prio > pim->global_scope.current_bsr_prio)
+ return true;
+
+ else if (bsr_prio == pim->global_scope.current_bsr_prio) {
+ if (pim_addr_cmp(bsr, pim->global_scope.current_bsr) >= 0)
+ return true;
+ else
+ return false;
+ } else
+ return false;
+}
+
+static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr,
+ uint32_t bsr_prio)
+{
+ if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {
+ pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
+ pim_nht_bsr_add(pim, bsr);
+
+ pim->global_scope.current_bsr = bsr;
+ pim->global_scope.current_bsr_first_ts =
+ pim_time_monotonic_sec();
+ pim->global_scope.state = ACCEPT_PREFERRED;
+ }
+ pim->global_scope.current_bsr_prio = bsr_prio;
+ pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
+}
+
+void pim_bsm_clear(struct pim_instance *pim)
+{
+ struct route_node *rn;
+ struct route_node *rpnode;
+ struct bsgrp_node *bsgrp;
+ pim_addr nht_p;
+ struct prefix g_all;
+ struct rp_info *rp_all;
+ struct pim_upstream *up;
+ struct rp_info *rp_info;
+ bool upstream_updated = false;
+
+ pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
+
+ /* Reset scope zone data */
+ pim->global_scope.accept_nofwd_bsm = false;
+ pim->global_scope.state = ACCEPT_ANY;
+ pim->global_scope.current_bsr = PIMADDR_ANY;
+ pim->global_scope.current_bsr_prio = 0;
+ pim->global_scope.current_bsr_first_ts = 0;
+ pim->global_scope.current_bsr_last_ts = 0;
+ pim->global_scope.bsm_frag_tag = 0;
+ pim_bsm_frags_free(&pim->global_scope);
+
+ pim_bs_timer_stop(&pim->global_scope);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = rn->info;
+ if (!bsgrp)
+ continue;
+
+ rpnode = route_node_lookup(pim->rp_table, &bsgrp->group);
+
+ if (!rpnode) {
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
+ &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ continue;
+ }
+
+ rp_info = (struct rp_info *)rpnode->info;
+
+ if ((!rp_info) || (rp_info->rp_src != RP_SRC_BSR)) {
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
+ &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ continue;
+ }
+
+ /* Deregister addr with Zebra NHT */
+ nht_p = rp_info->rp.rpf_addr;
+
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
+ __func__, &nht_p);
+ }
+
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
+
+ if (!pim_get_all_mcast_group(&g_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &g_all);
+
+ if (rp_all == rp_info) {
+ rp_all->rp.rpf_addr = PIMADDR_ANY;
+ rp_all->i_am_rp = 0;
+ } else {
+ /* Delete the rp_info from rp-list */
+ listnode_delete(pim->rp_list, rp_info);
+
+ /* Delete the rp node from rp_table */
+ rpnode->info = NULL;
+ route_unlock_node(rpnode);
+ route_unlock_node(rpnode);
+ XFREE(MTYPE_PIM_RP, rp_info);
+ }
+
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table, &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ }
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* Find the upstream (*, G) whose upstream address is same as
+ * the RP
+ */
+ if (!pim_addr_is_any(up->sg.src))
+ continue;
+
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ /* RP not found for the group grp */
+ if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
+ pim_upstream_rpf_clear(pim, up);
+ pim_rp_set_upstream_addr(pim, &up->upstream_addr,
+ up->sg.src, up->sg.grp);
+ } else {
+ /* RP found for the group grp */
+ pim_upstream_update(pim, up);
+ upstream_updated = true;
+ }
+ }
+
+ if (upstream_updated)
+ pim_zebra_update_all_interfaces(pim);
+}
+
+static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
+ pim_addr dst_addr)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim interface not available for %s",
+ __func__, ifp->name);
+ return false;
+ }
+
+ if (pim_ifp->pim_sock_fd == -1) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim sock not available for %s",
+ __func__, ifp->name);
+ return false;
+ }
+
+ if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
+ dst_addr, buf, len, ifp)) {
+ zlog_warn("%s: Could not send BSM message on interface: %s",
+ __func__, ifp->name);
+ return false;
+ }
+
+ if (!pim_ifp->pim_passive_enable)
+ pim_ifp->pim_ifstat_bsm_tx++;
+
+ pim_ifp->pim->bsm_sent++;
+ return true;
+}
+
+static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
+ uint32_t pim_mtu, pim_addr dst_addr, bool no_fwd)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct bsmmsg_grpinfo *grpinfo, *curgrp;
+ uint8_t *firstgrp_ptr;
+ uint8_t *pkt;
+ uint8_t *pak_start;
+ uint32_t parsed_len = 0;
+ uint32_t this_pkt_rem;
+ uint32_t copy_byte_count;
+ uint32_t this_pkt_len;
+ uint8_t total_rp_cnt;
+ uint8_t this_rp_cnt;
+ uint8_t frag_rp_cnt;
+ uint8_t rp_fit_cnt;
+ bool pak_pending = false;
+
+ /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
+ if (pim_mtu < (PIM_MIN_BSM_LEN)) {
+ zlog_warn(
+ "%s: mtu(pim mtu: %d) size less than minimum bootstrap len",
+ __func__, pim_mtu);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: mtu (pim mtu:%d) less than minimum bootstrap len",
+ __func__, pim_mtu);
+ return false;
+ }
+
+ pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
+
+ pkt = pak_start;
+
+ /* Fill PIM header later before sending packet to calc checksum */
+ pkt += PIM_MSG_HEADER_LEN;
+ buf += PIM_MSG_HEADER_LEN;
+
+ /* copy bsm header to new packet at offset of pim hdr */
+ memcpy(pkt, buf, PIM_BSM_HDR_LEN);
+ pkt += PIM_BSM_HDR_LEN;
+ buf += PIM_BSM_HDR_LEN;
+ parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
+
+ /* Store the position of first grp ptr, which can be reused for
+ * next packet to start filling group. old bsm header and pim hdr
+ * remains. So need not be filled again for next packet onwards.
+ */
+ firstgrp_ptr = pkt;
+
+ /* we received mtu excluding IP hdr len as param
+ * now this_pkt_rem is mtu excluding
+ * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
+ */
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
+
+ /* For each group till the packet length parsed */
+ while (parsed_len < len) {
+ /* pkt ---> fragment's current pointer
+ * buf ---> input buffer's current pointer
+ * mtu ---> size of the pim packet - PIM header
+ * curgrp ---> current group on the fragment
+ * grpinfo ---> current group on the input buffer
+ * this_pkt_rem ---> bytes remaing on the current fragment
+ * rp_fit_cnt ---> num of rp for current grp that
+ * fits this frag
+ * total_rp_cnt ---> total rp present for the group in the buf
+ * frag_rp_cnt ---> no of rp for the group to be fit in
+ * the frag
+ * this_rp_cnt ---> how many rp have we parsed
+ */
+ grpinfo = (struct bsmmsg_grpinfo *)buf;
+ memcpy(pkt, buf, PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ parsed_len += PIM_BSM_GRP_LEN;
+ pkt += PIM_BSM_GRP_LEN;
+ buf += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+
+ /* initialize rp count and total_rp_cnt before the rp loop */
+ this_rp_cnt = 0;
+ total_rp_cnt = grpinfo->frag_rp_count;
+
+ /* Loop till all RPs for the group parsed */
+ while (this_rp_cnt < total_rp_cnt) {
+ /* All RP from a group processed here.
+ * group is pointed by grpinfo.
+ * At this point make sure buf pointing to a RP
+ * within a group
+ */
+ rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
+
+ /* calculate how many rp am i going to copy in
+ * this frag
+ */
+ if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
+ frag_rp_cnt = total_rp_cnt - this_rp_cnt;
+ else
+ frag_rp_cnt = rp_fit_cnt;
+
+ /* populate the frag rp count for the current grp */
+ curgrp->frag_rp_count = frag_rp_cnt;
+ copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
+
+ /* copy all the rp that we are fitting in this
+ * frag for the grp
+ */
+ memcpy(pkt, buf, copy_byte_count);
+ this_rp_cnt += frag_rp_cnt;
+ buf += copy_byte_count;
+ pkt += copy_byte_count;
+ parsed_len += copy_byte_count;
+ this_pkt_rem -= copy_byte_count;
+
+ /* Either we couldn't fit all rp for the group or the
+ * mtu reached
+ */
+ if ((this_rp_cnt < total_rp_cnt)
+ || (this_pkt_rem
+ < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
+ /* No space to fit in more rp, send this pkt */
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(
+ pim_ifp->primary_address, dst_addr,
+ pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
+ dst_addr);
+
+ /* Construct next fragment. Reuse old packet */
+ pkt = firstgrp_ptr;
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
+ + PIM_MSG_HEADER_LEN);
+
+ /* If pkt can't accommodate next group + at
+ * least one rp, we must break out of this inner
+ * loop and process next RP
+ */
+ if (total_rp_cnt == this_rp_cnt)
+ break;
+
+ /* If some more RPs for the same group pending,
+ * fill grp hdr
+ */
+ memcpy(pkt, (uint8_t *)grpinfo,
+ PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ pkt += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+ pak_pending = false;
+ } else {
+ /* We filled something but not yet sent out */
+ pak_pending = true;
+ }
+ } /* while RP count */
+ } /*while parsed len */
+
+ /* Send if we have any unsent packet */
+ if (pak_pending) {
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
+ dst_addr);
+ }
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
+ return true;
+}
+
+static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
+ uint32_t len, int sz)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ pim_addr dst_addr;
+ uint32_t pim_mtu;
+ bool no_fwd = false;
+ bool ret = false;
+
+ /* For now only global scope zone is supported, so send on all
+ * pim interfaces in the vrf
+ */
+ dst_addr = qpim_all_pim_routers_addr;
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if ((!pim_ifp) || (!pim_ifp->bsm_enable))
+ continue;
+
+ /*
+ * RFC 5059 Sec 3.4:
+ * When a Bootstrap message is forwarded, it is forwarded out
+ * of every multicast-capable interface that has PIM neighbors.
+ *
+ * So skipping pim interfaces with no neighbors.
+ */
+ if (listcount(pim_ifp->pim_neighbor_list) == 0)
+ continue;
+
+ pim_hello_require(ifp);
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ if (pim_mtu < len) {
+ ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
+ dst_addr, no_fwd);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: pim_bsm_frag_send returned %s",
+ __func__, ret ? "TRUE" : "FALSE");
+ } else {
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_send_intf returned false",
+ __func__);
+ }
+ }
+ }
+}
+
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
+{
+ pim_addr dst_addr;
+ struct pim_interface *pim_ifp;
+ struct bsm_scope *scope;
+ struct bsm_frag *bsfrag;
+ uint32_t pim_mtu;
+ bool no_fwd = true;
+ bool ret = false;
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: New neighbor %pPA seen on %s", __func__,
+ &neigh->source_addr, ifp->name);
+
+ pim_ifp = ifp->info;
+
+ /* DR only forwards BSM packet */
+ if (!pim_addr_cmp(pim_ifp->pim_dr_addr, pim_ifp->primary_address)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: It is not DR, so don't forward BSM packet",
+ __func__);
+ }
+
+ if (!pim_ifp->bsm_enable) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM proc not enabled on %s", __func__,
+ ifp->name);
+ return ret;
+ }
+
+ scope = &pim_ifp->pim->global_scope;
+
+ if (!bsm_frags_count(scope->bsm_frags)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM list for the scope is empty",
+ __func__);
+ return ret;
+ }
+
+ if (!pim_ifp->ucast_bsm_accept) {
+ dst_addr = qpim_all_pim_routers_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM mcast to %pPA", __func__,
+ &neigh->source_addr);
+ } else {
+ dst_addr = neigh->source_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM ucast to %pPA", __func__,
+ &neigh->source_addr);
+ }
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ pim_hello_require(ifp);
+
+ frr_each (bsm_frags, scope->bsm_frags, bsfrag) {
+ if (pim_mtu < bsfrag->size) {
+ ret = pim_bsm_frag_send(bsfrag->data, bsfrag->size, ifp,
+ pim_mtu, dst_addr, no_fwd);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __func__);
+ }
+ } else {
+ /* Pim header needs to be constructed */
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ bsfrag->data, bsfrag->size,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ ret = pim_bsm_send_intf(bsfrag->data, bsfrag->size, ifp,
+ dst_addr);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __func__);
+ }
+ }
+ }
+ return ret;
+}
+
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_lookup(scope->bsrp_table, grp);
+ if (!rn) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist for the group",
+ __func__);
+ return NULL;
+ }
+ bsgrp = rn->info;
+ route_unlock_node(rn);
+
+ return bsgrp;
+}
+
+static uint32_t hash_calc_on_grp_rp(struct prefix group, pim_addr rp,
+ uint8_t hashmasklen)
+{
+ uint64_t temp;
+ uint32_t hash;
+ uint32_t grpaddr;
+ uint32_t rp_add;
+ uint32_t mask = 0xffffffff;
+
+ /* mask to be made zero if hashmasklen is 0 because mask << 32
+ * may not give 0. hashmasklen can be 0 to 32.
+ */
+ if (hashmasklen == 0)
+ mask = 0;
+
+ /* in_addr stores ip in big endian, hence network byte order
+ * convert to uint32 before processing hash
+ */
+#if PIM_IPV == 4
+ grpaddr = ntohl(group.u.prefix4.s_addr);
+#else
+ grpaddr = group.u.prefix6.s6_addr32[0] ^ group.u.prefix6.s6_addr32[1] ^
+ group.u.prefix6.s6_addr32[2] ^ group.u.prefix6.s6_addr32[3];
+#endif
+ /* Avoid shifting by 32 bit on a 32 bit register */
+ if (hashmasklen)
+ grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
+ else
+ grpaddr = grpaddr & mask;
+
+#if PIM_IPV == 4
+ rp_add = ntohl(rp.s_addr);
+#else
+ rp_add = rp.s6_addr32[0] ^ rp.s6_addr32[1] ^ rp.s6_addr32[2] ^
+ rp.s6_addr32[3];
+#endif
+ temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
+ + 12345;
+ hash = temp & (0x7fffffff);
+ return hash;
+}
+
+static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
+ struct bsgrp_node *grpnode,
+ struct bsmmsg_rpinfo *rp)
+{
+ struct bsm_rpinfo *bsm_rpinfo;
+ uint8_t hashMask_len = pim->global_scope.hashMasklen;
+
+ /*memory allocation for bsm_rpinfo */
+ bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_INFO, sizeof(*bsm_rpinfo));
+
+ bsm_rpinfo->rp_prio = rp->rp_pri;
+ bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
+ bsm_rpinfo->rp_address = rp->rpaddr.addr;
+ bsm_rpinfo->elapse_time = 0;
+
+ /* Back pointer to the group node. */
+ bsm_rpinfo->bsgrp_node = grpnode;
+
+ /* update hash for this rp node */
+ bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
+ hashMask_len);
+ if (bsm_rpinfos_add(grpnode->partial_bsrp_list, bsm_rpinfo) == NULL) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, bs_rpinfo node added to the partial bs_rplist.",
+ __func__);
+ return true;
+ }
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: list node not added", __func__);
+
+ XFREE(MTYPE_PIM_BSRP_INFO, bsm_rpinfo);
+ return false;
+}
+
+static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
+ struct bsgrp_node *bsgrp,
+ uint16_t bsm_frag_tag,
+ uint32_t total_rp_count)
+{
+ if (bsgrp->pend_rp_cnt) {
+ /* received bsm is different packet ,
+ * it is not same fragment.
+ */
+ if (bsm_frag_tag != bsgrp->frag_tag) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s,Received a new BSM ,so clear the pending bs_rpinfo list.",
+ __func__);
+ pim_bsm_rpinfos_free(bsgrp->partial_bsrp_list);
+ bsgrp->pend_rp_cnt = total_rp_count;
+ }
+ } else
+ bsgrp->pend_rp_cnt = total_rp_count;
+
+ bsgrp->frag_tag = bsm_frag_tag;
+}
+
+/* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
+static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
+ int buflen, uint16_t bsm_frag_tag)
+{
+ struct bsmmsg_grpinfo grpinfo;
+ struct bsmmsg_rpinfo rpinfo;
+ struct prefix group;
+ struct bsgrp_node *bsgrp = NULL;
+ int frag_rp_cnt = 0;
+ int offset = 0;
+ int ins_count = 0;
+ pim_addr grp_addr;
+
+ while (buflen > offset) {
+ if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: buflen received %d is less than the internal data structure of the packet would suggest",
+ __func__, buflen);
+ return false;
+ }
+ /* Extract Group tlv from BSM */
+ memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+ grp_addr = grpinfo.group.addr;
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Group %pPAs Rpcount:%d Fragment-Rp-count:%d",
+ __func__, &grp_addr, grpinfo.rp_count,
+ grpinfo.frag_rp_count);
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ group.family = PIM_AF;
+ if (grpinfo.group.mask > PIM_MAX_BITLEN) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, prefix length specified: %d is too long",
+ __func__, grpinfo.group.mask);
+ return false;
+ }
+
+ pim_addr_to_prefix(&group, grp_addr);
+ group.prefixlen = grpinfo.group.mask;
+
+ /* Get the Group node for the BSM rp table */
+ bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
+
+ if (grpinfo.rp_count == 0) {
+ struct bsm_rpinfo *old_rpinfo;
+
+ /* BSR explicitly no longer has RPs for this group */
+ if (!bsgrp)
+ continue;
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Rp count is zero for group: %pPAs",
+ __func__, &grp_addr);
+
+ old_rpinfo = bsm_rpinfos_first(bsgrp->bsrp_list);
+ if (old_rpinfo)
+ pim_rp_del(scope->pim, old_rpinfo->rp_address,
+ group, NULL, RP_SRC_BSR);
+
+ pim_free_bsgrp_node(scope->bsrp_table, &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ continue;
+ }
+
+ if (!bsgrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s, Create new BSM Group node.",
+ __func__);
+
+ /* create a new node to be added to the tree. */
+ bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
+ &group);
+
+ if (!bsgrp) {
+ zlog_debug(
+ "%s, Failed to get the BSM group node.",
+ __func__);
+ continue;
+ }
+
+ bsgrp->scope = scope;
+ }
+
+ pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
+ grpinfo.rp_count);
+ frag_rp_cnt = grpinfo.frag_rp_count;
+ ins_count = 0;
+
+ while (frag_rp_cnt--) {
+ if (offset + (int)sizeof(struct bsmmsg_rpinfo)
+ > buflen) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, buflen received: %u is less than the internal data structure of the packet would suggest",
+ __func__, buflen);
+ return false;
+ }
+
+ /* Extract RP address tlv from BSM */
+ memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
+ rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ if (PIM_DEBUG_BSM) {
+ pim_addr rp_addr;
+
+ rp_addr = rpinfo.rpaddr.addr;
+ zlog_debug(
+ "%s, Rp address - %pPAs; pri:%d hold:%d",
+ __func__, &rp_addr, rpinfo.rp_pri,
+ rpinfo.rp_holdtime);
+ }
+
+ /* Call Install api to update grp-rp mappings */
+ if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
+ ins_count++;
+ }
+
+ bsgrp->pend_rp_cnt -= ins_count;
+
+ if (!bsgrp->pend_rp_cnt) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
+ __func__);
+ /* replace the bsrp_list with pending list */
+ pim_instate_pend_list(bsgrp);
+ }
+ }
+ return true;
+}
+
+int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd)
+{
+ struct bsm_hdr *bshdr;
+ int sz = PIM_GBL_SZ_ID;
+ struct bsmmsg_grpinfo *msg_grp;
+ struct pim_interface *pim_ifp = NULL;
+ struct bsm_frag *bsfrag;
+ struct pim_instance *pim;
+ uint16_t frag_tag;
+ pim_addr bsr_addr;
+ bool empty_bsm = false;
+
+ /* BSM Packet acceptance validation */
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: multicast not enabled on interface %s",
+ __func__, ifp->name);
+ return -1;
+ }
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+ pim_ifp->pim_ifstat_bsm_rx++;
+ pim = pim_ifp->pim;
+ pim->bsm_rcvd++;
+
+ /* Drop if bsm processing is disabled on interface */
+ if (!pim_ifp->bsm_enable) {
+ zlog_warn("%s: BSM not enabled on interface %s", __func__,
+ ifp->name);
+ pim_ifp->pim_ifstat_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct bsm_hdr))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: received buffer length of %d which is too small to properly decode",
+ __func__, buf_size);
+ return -1;
+ }
+
+ bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
+ if (bshdr->hm_len > PIM_MAX_BITLEN) {
+ zlog_warn(
+ "Bad hashmask length for %s; got %hhu, expected value in range 0-32",
+ PIM_AF_NAME, bshdr->hm_len);
+ pim->bsm_dropped++;
+ return -1;
+ }
+ pim->global_scope.hashMasklen = bshdr->hm_len;
+ frag_tag = ntohs(bshdr->frag_tag);
+ /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+ memcpy(&bsr_addr, &bshdr->bsr_addr.addr, sizeof(bsr_addr));
+
+ /* Identify empty BSM */
+ if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
+ empty_bsm = true;
+
+ if (!empty_bsm) {
+ msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
+ + PIM_BSM_HDR_LEN);
+ /* Currently we don't support scope zoned BSM */
+ if (msg_grp->group.sz) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Administratively scoped range BSM received",
+ __func__);
+ pim_ifp->pim_ifstat_bsm_invalid_sz++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+ }
+
+ /* Drop if bsr is not preferred bsr */
+ if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Received a non-preferred BSM",
+ __func__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (no_fwd) {
+ /* only accept no-forward BSM if quick refresh on startup */
+ if ((pim->global_scope.accept_nofwd_bsm)
+ || (frag_tag == pim->global_scope.bsm_frag_tag)) {
+ pim->global_scope.accept_nofwd_bsm = false;
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false",
+ __func__, &bsr_addr);
+ pim->bsm_dropped++;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ return -1;
+ }
+ }
+
+ /* BSM packet is seen, so resetting accept_nofwd_bsm to false */
+ if (pim->global_scope.accept_nofwd_bsm)
+ pim->global_scope.accept_nofwd_bsm = false;
+
+ if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) {
+ /* Multicast BSMs are only accepted if source interface & IP
+ * match RPF towards the BSR's IP address, or they have
+ * no-forward set
+ */
+ if (!no_fwd &&
+ !pim_nht_bsr_rpf_check(pim, bsr_addr, ifp, sg->src)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "BSM check: RPF to BSR %pPAs is not %pPA%%%s",
+ &bsr_addr, &sg->src, ifp->name);
+ pim->bsm_dropped++;
+ return -1;
+ }
+ } else if (if_address_is_local(&sg->grp, PIM_AF, pim->vrf->vrf_id)) {
+ /* Unicast BSM received - if ucast bsm not enabled on
+ * the interface, drop it
+ */
+ if (!pim_ifp->ucast_bsm_accept) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Unicast BSM not enabled on interface %s",
+ __func__, ifp->name);
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid destination address",
+ __func__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (empty_bsm) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Empty Pref BSM received", __func__);
+ }
+ /* Parse Update bsm rp table and install/uninstall rp if required */
+ if (!pim_bsm_parse_install_g2rp(
+ &pim_ifp->pim->global_scope,
+ (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
+ (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
+ frag_tag)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s, Parsing BSM failed.", __func__);
+ }
+ pim->bsm_dropped++;
+ return -1;
+ }
+ /* Restart the bootstrap timer */
+ pim_bs_timer_restart(&pim_ifp->pim->global_scope,
+ PIM_BSR_DEFAULT_TIMEOUT);
+
+ /* If new BSM received, clear the old bsm database */
+ if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
+ __func__,
+ pim_ifp->pim->global_scope.bsm_frag_tag,
+ frag_tag);
+ }
+ pim_bsm_frags_free(&pim_ifp->pim->global_scope);
+ pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
+ }
+
+ /* update the scope information from bsm */
+ pim_bsm_update(pim, bsr_addr, bshdr->bsr_prio);
+
+ if (!no_fwd) {
+ pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+ bsfrag = XCALLOC(MTYPE_PIM_BSM_FRAG,
+ sizeof(struct bsm_frag) + buf_size);
+
+ bsfrag->size = buf_size;
+ memcpy(bsfrag->data, buf, buf_size);
+ bsm_frags_add_tail(pim_ifp->pim->global_scope.bsm_frags,
+ bsfrag);
+ }
+
+ return 0;
+}
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
new file mode 100644
index 0000000..fb09e3b
--- /dev/null
+++ b/pimd/pim_bsm.h
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pim_bsm.h: PIM BSM handling related
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ */
+
+#ifndef __PIM_BSM_H__
+#define __PIM_BSM_H__
+
+#include "if.h"
+#include "vty.h"
+#include "typesafe.h"
+#include "table.h"
+#include "pim_rp.h"
+#include "pim_msg.h"
+
+/* Defines */
+#define PIM_GBL_SZ_ID 0 /* global scope zone id set to 0 */
+#define PIM_BS_TIME 60 /* RFC 5059 - Sec 5 */
+#define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */
+
+/* These structures are only encoded IPv4 specific */
+#define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr)
+#define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo)
+#define PIM_BSM_RP_LEN sizeof(struct bsmmsg_rpinfo)
+
+#define PIM_MIN_BSM_LEN \
+ (PIM_HDR_LEN + PIM_BSM_HDR_LEN + PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN)
+
+/* Datastructures
+ * ==============
+ */
+
+/* Non candidate BSR states */
+enum ncbsr_state {
+ NO_INFO = 0,
+ ACCEPT_ANY,
+ ACCEPT_PREFERRED
+};
+
+PREDECL_DLIST(bsm_frags);
+
+/* BSM scope - bsm processing is per scope */
+struct bsm_scope {
+ int sz_id; /* scope zone id */
+ enum ncbsr_state state; /* non candidate BSR state */
+ bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */
+ pim_addr current_bsr; /* current elected BSR for the sz */
+ uint32_t current_bsr_prio; /* current BSR priority */
+ int64_t current_bsr_first_ts; /* current BSR elected time */
+ int64_t current_bsr_last_ts; /* Last BSM received from E-BSR */
+ uint16_t bsm_frag_tag; /* Last received frag tag from E-BSR */
+ uint8_t hashMasklen; /* Mask in hash calc RFC 7761 4.7.2 */
+ struct pim_instance *pim; /* Back pointer to pim instance */
+
+ /* current set of fragments for forwarding */
+ struct bsm_frags_head bsm_frags[1];
+
+ struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
+ struct event *bs_timer; /* Boot strap timer */
+};
+
+/* BSM packet (= fragment) - this is stored as list in bsm_frags inside scope
+ * This is used for forwarding to new neighbors or restarting mcast routers
+ */
+struct bsm_frag {
+ struct bsm_frags_item item;
+
+ uint32_t size; /* size of the packet */
+ uint8_t data[0]; /* Actual packet (dyn size) */
+};
+
+DECLARE_DLIST(bsm_frags, struct bsm_frag, item);
+
+PREDECL_SORTLIST_UNIQ(bsm_rpinfos);
+
+/* This is the group node of the bsrp table in scope.
+ * this node maintains the list of rp for the group.
+ */
+struct bsgrp_node {
+ struct prefix group; /* Group range */
+ struct bsm_scope *scope; /* Back ptr to scope */
+
+ /* RPs advertised by BSR, and temporary list while receiving new set */
+ struct bsm_rpinfos_head bsrp_list[1];
+ struct bsm_rpinfos_head partial_bsrp_list[1];
+
+ int pend_rp_cnt; /* Total RP - Received RP */
+ uint16_t frag_tag; /* frag tag to identify the fragment */
+};
+
+/* Items on [partial_]bsrp_list above.
+ * Holds info of each candidate RP received for the bsgrp_node's prefix.
+ */
+struct bsm_rpinfo {
+ struct bsm_rpinfos_item item;
+
+ uint32_t hash; /* Hash Value as per RFC 7761 4.7.2 */
+ uint32_t elapse_time; /* upd at expiry of elected RP node */
+ uint16_t rp_prio; /* RP priority */
+ uint16_t rp_holdtime; /* RP holdtime - g2rp timer value */
+ pim_addr rp_address; /* RP Address */
+ struct bsgrp_node *bsgrp_node; /* Back ptr to bsgrp_node */
+ struct event *g2rp_timer; /* Run only for elected RP node */
+};
+
+extern int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *a,
+ const struct bsm_rpinfo *b);
+DECLARE_SORTLIST_UNIQ(bsm_rpinfos, struct bsm_rpinfo, item, pim_bsm_rpinfo_cmp);
+
+/* Structures to extract Bootstrap Message header and Grp to RP Mappings
+ * =====================================================================
+ * BSM Format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum | PIM HDR
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Fragment Tag | Hash Mask Len | BSR Priority | BS HDR(1)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | BSR Address (Encoded-Unicast format) | BS HDR(2)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 1 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count 1 | Frag RP Cnt 1 | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 2 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address n (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count n | Frag RP Cnt n | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct bsm_hdr {
+ uint16_t frag_tag;
+ uint8_t hm_len;
+ uint8_t bsr_prio;
+#if PIM_IPV == 4
+ struct pim_encoded_ipv4_unicast bsr_addr;
+#else
+ struct pim_encoded_ipv6_unicast bsr_addr;
+#endif
+} __attribute__((packed));
+
+struct bsmmsg_grpinfo {
+#if PIM_IPV == 4
+ struct pim_encoded_group_ipv4 group;
+#else
+ struct pim_encoded_group_ipv6 group;
+#endif
+ uint8_t rp_count;
+ uint8_t frag_rp_count;
+ uint16_t reserved;
+} __attribute__((packed));
+
+struct bsmmsg_rpinfo {
+#if PIM_IPV == 4
+ struct pim_encoded_ipv4_unicast rpaddr;
+#else
+ struct pim_encoded_ipv6_unicast rpaddr;
+#endif
+ uint16_t rp_holdtime;
+ uint8_t rp_pri;
+ uint8_t reserved;
+} __attribute__((packed));
+
+/* API */
+void pim_bsm_proc_init(struct pim_instance *pim);
+void pim_bsm_proc_free(struct pim_instance *pim);
+void pim_bsm_clear(struct pim_instance *pim);
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp);
+int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd);
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp);
+#endif
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
new file mode 100644
index 0000000..2e90cf9
--- /dev/null
+++ b/pimd/pim_cmd.c
@@ -0,0 +1,6734 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "lib/json.h"
+#include "command.h"
+#include "if.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "plist.h"
+#include "hash.h"
+#include "nexthop.h"
+#include "vrf.h"
+#include "ferr.h"
+
+#include "pimd.h"
+#include "pim_mroute.h"
+#include "pim_cmd.h"
+#include "pim_iface.h"
+#include "pim_vty.h"
+#include "pim_mroute.h"
+#include "pim_str.h"
+#include "pim_igmp.h"
+#include "pim_igmpv3.h"
+#include "pim_sock.h"
+#include "pim_time.h"
+#include "pim_util.h"
+#include "pim_oil.h"
+#include "pim_neighbor.h"
+#include "pim_pim.h"
+#include "pim_ifchannel.h"
+#include "pim_hello.h"
+#include "pim_msg.h"
+#include "pim_upstream.h"
+#include "pim_rpf.h"
+#include "pim_macro.h"
+#include "pim_ssmpingd.h"
+#include "pim_zebra.h"
+#include "pim_static.h"
+#include "pim_rp.h"
+#include "pim_zlookup.h"
+#include "pim_msdp.h"
+#include "pim_ssm.h"
+#include "pim_nht.h"
+#include "pim_bfd.h"
+#include "pim_vxlan.h"
+#include "pim_mlag.h"
+#include "bfd.h"
+#include "pim_bsm.h"
+#include "lib/northbound_cli.h"
+#include "pim_errors.h"
+#include "pim_nb.h"
+#include "pim_addr.h"
+#include "pim_cmd_common.h"
+
+#include "pimd/pim_cmd_clippy.c"
+
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = pim_debug_config_write,
+};
+
+static struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[],
+ const int argc, int *idx, bool uj)
+{
+ struct vrf *vrf;
+
+ if (argv_find(argv, argc, "NAME", idx))
+ vrf = vrf_lookup_by_name(argv[*idx]->arg);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf) {
+ if (uj)
+ vty_json_empty(vty);
+ else
+ vty_out(vty, "Specified VRF: %s does not exist\n",
+ argv[*idx]->arg);
+ }
+
+ return vrf;
+}
+
+static void pim_show_assert_helper(struct vty *vty,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch, time_t now)
+{
+ char winner_str[INET_ADDRSTRLEN];
+ struct in_addr ifaddr;
+ char uptime[10];
+ char timer[10];
+ char buf[PREFIX_STRLEN];
+
+ ifaddr = pim_ifp->primary_address;
+
+ pim_inet4_dump("<assrt_win?>", ch->ifassert_winner, winner_str,
+ sizeof(winner_str));
+
+ pim_time_uptime(uptime, sizeof(uptime), now - ch->ifassert_creation);
+ pim_time_timer_to_mmss(timer, sizeof(timer), ch->t_ifassert_timer);
+
+ vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-6s %-15s %-8s %-5s\n",
+ ch->interface->name,
+ inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
+ &ch->sg.grp, pim_ifchannel_ifassert_name(ch->ifassert_state),
+ winner_str, uptime, timer);
+}
+
+static void pim_show_assert(struct pim_instance *pim, struct vty *vty)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+ time_t now;
+
+ now = pim_time_monotonic_sec();
+
+ vty_out(vty,
+ "Interface Address Source Group State Winner Uptime Timer\n");
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ if (ch->ifassert_state == PIM_IFASSERT_NOINFO)
+ continue;
+
+ pim_show_assert_helper(vty, pim_ifp, ch, now);
+ } /* scan interface channels */
+ }
+}
+
+static void pim_show_assert_internal_helper(struct vty *vty,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch)
+{
+ struct in_addr ifaddr;
+ char buf[PREFIX_STRLEN];
+
+ ifaddr = pim_ifp->primary_address;
+
+ vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %-3s %-3s %-4s\n",
+ ch->interface->name,
+ inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
+ &ch->sg.grp,
+ PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags) ? "yes" : "no",
+ pim_macro_ch_could_assert_eval(ch) ? "yes" : "no",
+ PIM_IF_FLAG_TEST_ASSERT_TRACKING_DESIRED(ch->flags) ? "yes"
+ : "no",
+ pim_macro_assert_tracking_desired_eval(ch) ? "yes" : "no");
+}
+
+static void pim_show_assert_internal(struct pim_instance *pim, struct vty *vty)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+
+ vty_out(vty,
+ "CA: CouldAssert\n"
+ "ECA: Evaluate CouldAssert\n"
+ "ATD: AssertTrackingDesired\n"
+ "eATD: Evaluate AssertTrackingDesired\n\n");
+
+ vty_out(vty,
+ "Interface Address Source Group CA eCA ATD eATD\n");
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_show_assert_internal_helper(vty, pim_ifp, ch);
+ } /* scan interface channels */
+ }
+}
+
+static void pim_show_assert_metric_helper(struct vty *vty,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch)
+{
+ char addr_str[INET_ADDRSTRLEN];
+ struct pim_assert_metric am;
+ struct in_addr ifaddr;
+ char buf[PREFIX_STRLEN];
+
+ ifaddr = pim_ifp->primary_address;
+
+ am = pim_macro_spt_assert_metric(&ch->upstream->rpf,
+ pim_ifp->primary_address);
+
+ pim_inet4_dump("<addr?>", am.ip_address, addr_str, sizeof(addr_str));
+
+ vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %4u %6u %-15s\n",
+ ch->interface->name,
+ inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
+ &ch->sg.grp, am.rpt_bit_flag ? "yes" : "no",
+ am.metric_preference, am.route_metric, addr_str);
+}
+
+static void pim_show_assert_metric(struct pim_instance *pim, struct vty *vty)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+
+ vty_out(vty,
+ "Interface Address Source Group RPT Pref Metric Address \n");
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_show_assert_metric_helper(vty, pim_ifp, ch);
+ } /* scan interface channels */
+ }
+}
+
+static void pim_show_assert_winner_metric_helper(struct vty *vty,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch)
+{
+ char addr_str[INET_ADDRSTRLEN];
+ struct pim_assert_metric *am;
+ struct in_addr ifaddr;
+ char pref_str[16];
+ char metr_str[16];
+ char buf[PREFIX_STRLEN];
+
+ ifaddr = pim_ifp->primary_address;
+
+ am = &ch->ifassert_winner_metric;
+
+ pim_inet4_dump("<addr?>", am->ip_address, addr_str, sizeof(addr_str));
+
+ if (am->metric_preference == PIM_ASSERT_METRIC_PREFERENCE_MAX)
+ snprintf(pref_str, sizeof(pref_str), "INFI");
+ else
+ snprintf(pref_str, sizeof(pref_str), "%4u",
+ am->metric_preference);
+
+ if (am->route_metric == PIM_ASSERT_ROUTE_METRIC_MAX)
+ snprintf(metr_str, sizeof(metr_str), "INFI");
+ else
+ snprintf(metr_str, sizeof(metr_str), "%6u", am->route_metric);
+
+ vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %-4s %-6s %-15s\n",
+ ch->interface->name,
+ inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
+ &ch->sg.grp, am->rpt_bit_flag ? "yes" : "no", pref_str,
+ metr_str, addr_str);
+}
+
+static void pim_show_assert_winner_metric(struct pim_instance *pim,
+ struct vty *vty)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+
+ vty_out(vty,
+ "Interface Address Source Group RPT Pref Metric Address \n");
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_show_assert_winner_metric_helper(vty, pim_ifp, ch);
+ } /* scan interface channels */
+ }
+}
+
+static void igmp_show_interfaces(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct interface *ifp;
+ time_t now;
+ char buf[PREFIX_STRLEN];
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj)
+ json = json_object_new_object();
+ else
+ vty_out(vty,
+ "Interface State Address V Querier QuerierIp Query Timer Uptime\n");
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct listnode *sock_node;
+ struct gm_sock *igmp;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
+ igmp)) {
+ char uptime[10];
+ char query_hhmmss[10];
+
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - igmp->sock_creation);
+ pim_time_timer_to_hhmmss(query_hhmmss,
+ sizeof(query_hhmmss),
+ igmp->t_igmp_query_timer);
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+ json_object_string_add(json_row, "upTime",
+ uptime);
+ json_object_int_add(json_row, "version",
+ pim_ifp->igmp_version);
+
+ if (igmp->t_igmp_query_timer) {
+ json_object_boolean_true_add(json_row,
+ "querier");
+ json_object_string_add(json_row,
+ "queryTimer",
+ query_hhmmss);
+ }
+ json_object_string_addf(json_row, "querierIp",
+ "%pI4",
+ &igmp->querier_addr);
+
+ json_object_object_add(json, ifp->name,
+ json_row);
+
+ if (igmp->mtrace_only) {
+ json_object_boolean_true_add(
+ json_row, "mtraceOnly");
+ }
+ } else {
+ vty_out(vty,
+ "%-16s %5s %15s %d %7s %17pI4 %11s %8s\n",
+ ifp->name,
+ if_is_up(ifp)
+ ? (igmp->mtrace_only ? "mtrc"
+ : "up")
+ : "down",
+ inet_ntop(AF_INET, &igmp->ifaddr, buf,
+ sizeof(buf)),
+ pim_ifp->igmp_version,
+ igmp->t_igmp_query_timer ? "local"
+ : "other",
+ &igmp->querier_addr, query_hhmmss,
+ uptime);
+ }
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void igmp_show_interfaces_single(struct pim_instance *pim,
+ struct vty *vty, const char *ifname,
+ bool uj)
+{
+ struct gm_sock *igmp;
+ struct interface *ifp;
+ struct listnode *sock_node;
+ struct pim_interface *pim_ifp;
+ char uptime[10];
+ char query_hhmmss[10];
+ char other_hhmmss[10];
+ int found_ifname = 0;
+ int sqi;
+ long gmi_msec; /* Group Membership Interval */
+ long lmqt_msec;
+ long ohpi_msec;
+ long oqpi_msec; /* Other Querier Present Interval */
+ long qri_msec;
+ time_t now;
+ int lmqc;
+
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+
+ now = pim_time_monotonic_sec();
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (strcmp(ifname, "detail") && strcmp(ifname, ifp->name))
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
+ igmp)) {
+ found_ifname = 1;
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - igmp->sock_creation);
+ pim_time_timer_to_hhmmss(query_hhmmss,
+ sizeof(query_hhmmss),
+ igmp->t_igmp_query_timer);
+ pim_time_timer_to_hhmmss(other_hhmmss,
+ sizeof(other_hhmmss),
+ igmp->t_other_querier_timer);
+
+ gmi_msec = PIM_IGMP_GMI_MSEC(
+ igmp->querier_robustness_variable,
+ igmp->querier_query_interval,
+ pim_ifp->gm_query_max_response_time_dsec);
+
+ sqi = PIM_IGMP_SQI(pim_ifp->gm_default_query_interval);
+
+ oqpi_msec = PIM_IGMP_OQPI_MSEC(
+ igmp->querier_robustness_variable,
+ igmp->querier_query_interval,
+ pim_ifp->gm_query_max_response_time_dsec);
+
+ lmqt_msec = PIM_IGMP_LMQT_MSEC(
+ pim_ifp->gm_specific_query_max_response_time_dsec,
+ pim_ifp->gm_last_member_query_count);
+
+ ohpi_msec =
+ PIM_IGMP_OHPI_DSEC(
+ igmp->querier_robustness_variable,
+ igmp->querier_query_interval,
+ pim_ifp->gm_query_max_response_time_dsec) *
+ 100;
+
+ qri_msec =
+ pim_ifp->gm_query_max_response_time_dsec * 100;
+ lmqc = pim_ifp->gm_last_member_query_count;
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+ json_object_string_add(json_row, "upTime",
+ uptime);
+ json_object_string_add(json_row, "querier",
+ igmp->t_igmp_query_timer
+ ? "local"
+ : "other");
+ json_object_string_addf(json_row, "querierIp",
+ "%pI4",
+ &igmp->querier_addr);
+ json_object_int_add(json_row, "queryStartCount",
+ igmp->startup_query_count);
+ json_object_string_add(json_row,
+ "queryQueryTimer",
+ query_hhmmss);
+ json_object_string_add(json_row,
+ "queryOtherTimer",
+ other_hhmmss);
+ json_object_int_add(json_row, "version",
+ pim_ifp->igmp_version);
+ json_object_int_add(
+ json_row,
+ "timerGroupMembershipIntervalMsec",
+ gmi_msec);
+ json_object_int_add(json_row,
+ "lastMemberQueryCount",
+ lmqc);
+ json_object_int_add(json_row,
+ "timerLastMemberQueryMsec",
+ lmqt_msec);
+ json_object_int_add(
+ json_row,
+ "timerOlderHostPresentIntervalMsec",
+ ohpi_msec);
+ json_object_int_add(
+ json_row,
+ "timerOtherQuerierPresentIntervalMsec",
+ oqpi_msec);
+ json_object_int_add(
+ json_row, "timerQueryInterval",
+ igmp->querier_query_interval);
+ json_object_int_add(
+ json_row,
+ "timerQueryResponseIntervalMsec",
+ qri_msec);
+ json_object_int_add(
+ json_row, "timerRobustnessVariable",
+ igmp->querier_robustness_variable);
+ json_object_int_add(json_row,
+ "timerStartupQueryInterval",
+ sqi);
+
+ json_object_object_add(json, ifp->name,
+ json_row);
+
+ if (igmp->mtrace_only) {
+ json_object_boolean_true_add(
+ json_row, "mtraceOnly");
+ }
+ } else {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "State : %s\n",
+ if_is_up(ifp) ? (igmp->mtrace_only ?
+ "mtrace"
+ : "up")
+ : "down");
+ vty_out(vty, "Address : %pI4\n",
+ &pim_ifp->primary_address);
+ vty_out(vty, "Uptime : %s\n", uptime);
+ vty_out(vty, "Version : %d\n",
+ pim_ifp->igmp_version);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ vty_out(vty, "Querier\n");
+ vty_out(vty, "-------\n");
+ vty_out(vty, "Querier : %s\n",
+ igmp->t_igmp_query_timer ? "local"
+ : "other");
+ vty_out(vty, "QuerierIp : %pI4",
+ &igmp->querier_addr);
+ if (pim_ifp->primary_address.s_addr
+ == igmp->querier_addr.s_addr)
+ vty_out(vty, " (this router)\n");
+ else
+ vty_out(vty, "\n");
+
+ vty_out(vty, "Start Count : %d\n",
+ igmp->startup_query_count);
+ vty_out(vty, "Query Timer : %s\n",
+ query_hhmmss);
+ vty_out(vty, "Other Timer : %s\n",
+ other_hhmmss);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ vty_out(vty, "Timers\n");
+ vty_out(vty, "------\n");
+ vty_out(vty,
+ "Group Membership Interval : %lis\n",
+ gmi_msec / 1000);
+ vty_out(vty,
+ "Last Member Query Count : %d\n",
+ lmqc);
+ vty_out(vty,
+ "Last Member Query Time : %lis\n",
+ lmqt_msec / 1000);
+ vty_out(vty,
+ "Older Host Present Interval : %lis\n",
+ ohpi_msec / 1000);
+ vty_out(vty,
+ "Other Querier Present Interval : %lis\n",
+ oqpi_msec / 1000);
+ vty_out(vty,
+ "Query Interval : %ds\n",
+ igmp->querier_query_interval);
+ vty_out(vty,
+ "Query Response Interval : %lis\n",
+ qri_msec / 1000);
+ vty_out(vty,
+ "Robustness Variable : %d\n",
+ igmp->querier_robustness_variable);
+ vty_out(vty,
+ "Startup Query Interval : %ds\n",
+ sqi);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ pim_print_ifp_flags(vty, ifp);
+ }
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+ else if (!found_ifname)
+ vty_out(vty, "%% No such interface\n");
+}
+
+static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct interface *ifp;
+ time_t now;
+ json_object *json = NULL;
+ json_object *json_iface = NULL;
+ json_object *json_grp = NULL;
+ json_object *json_grp_arr = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "vrf",
+ vrf_id_to_name(pim->vrf->vrf_id));
+ } else {
+ vty_out(vty,
+ "Interface Address Source Group Socket Uptime \n");
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct listnode *join_node;
+ struct gm_join *ij;
+ struct in_addr pri_addr;
+ char pri_addr_str[INET_ADDRSTRLEN];
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (!pim_ifp->gm_join_list)
+ continue;
+
+ pri_addr = pim_find_primary_addr(ifp);
+ pim_inet4_dump("<pri?>", pri_addr, pri_addr_str,
+ sizeof(pri_addr_str));
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, join_node,
+ ij)) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ char uptime[10];
+
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - ij->sock_creation);
+ pim_inet4_dump("<grp?>", ij->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<src?>", ij->source_addr, source_str,
+ sizeof(source_str));
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_string_add(
+ json_iface, "name", ifp->name);
+ json_object_object_add(json, ifp->name,
+ json_iface);
+ json_grp_arr = json_object_new_array();
+ json_object_object_add(json_iface,
+ "groups",
+ json_grp_arr);
+ }
+
+ json_grp = json_object_new_object();
+ json_object_string_add(json_grp, "source",
+ source_str);
+ json_object_string_add(json_grp, "group",
+ group_str);
+ json_object_string_add(json_grp, "primaryAddr",
+ pri_addr_str);
+ json_object_int_add(json_grp, "sockFd",
+ ij->sock_fd);
+ json_object_string_add(json_grp, "upTime",
+ uptime);
+ json_object_array_add(json_grp_arr, json_grp);
+ } else {
+ vty_out(vty,
+ "%-16s %-15s %-15s %-15s %6d %8s\n",
+ ifp->name, pri_addr_str, source_str,
+ group_str, ij->sock_fd, uptime);
+ }
+ } /* for (pim_ifp->gm_join_list) */
+
+ } /* for (iflist) */
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool uj)
+{
+ struct interface *ifp;
+ struct igmp_stats igmp_stats;
+ bool found_ifname = false;
+ json_object *json = NULL;
+
+ igmp_stats_init(&igmp_stats);
+
+ if (uj)
+ json = json_object_new_object();
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct listnode *sock_node, *source_node, *group_node;
+ struct gm_sock *igmp;
+ struct gm_group *group;
+ struct gm_source *src;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (ifname && strcmp(ifname, ifp->name))
+ continue;
+
+ found_ifname = true;
+
+ igmp_stats.joins_failed += pim_ifp->igmp_ifstat_joins_failed;
+ igmp_stats.joins_sent += pim_ifp->igmp_ifstat_joins_sent;
+ igmp_stats.total_groups +=
+ pim_ifp->gm_group_list
+ ? listcount(pim_ifp->gm_group_list)
+ : 0;
+ igmp_stats.peak_groups += pim_ifp->igmp_peak_group_count;
+
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, group_node,
+ group)) {
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list,
+ source_node, src)) {
+ if (pim_addr_is_any(src->source_addr))
+ continue;
+
+ igmp_stats.total_source_groups++;
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
+ igmp)) {
+ igmp_stats_add(&igmp_stats, &igmp->igmp_stats);
+ }
+ }
+
+ if (!found_ifname) {
+ if (uj)
+ vty_json(vty, json);
+ else
+ vty_out(vty, "%% No such interface\n");
+ return;
+ }
+
+ if (uj) {
+ json_object *json_row = json_object_new_object();
+
+ json_object_string_add(json_row, "name",
+ ifname ? ifname : "global");
+ json_object_int_add(json_row, "queryV1", igmp_stats.query_v1);
+ json_object_int_add(json_row, "queryV2", igmp_stats.query_v2);
+ json_object_int_add(json_row, "queryV3", igmp_stats.query_v3);
+ json_object_int_add(json_row, "leaveV2", igmp_stats.leave_v2);
+ json_object_int_add(json_row, "reportV1", igmp_stats.report_v1);
+ json_object_int_add(json_row, "reportV2", igmp_stats.report_v2);
+ json_object_int_add(json_row, "reportV3", igmp_stats.report_v3);
+ json_object_int_add(json_row, "mtraceResponse",
+ igmp_stats.mtrace_rsp);
+ json_object_int_add(json_row, "mtraceRequest",
+ igmp_stats.mtrace_req);
+ json_object_int_add(json_row, "unsupported",
+ igmp_stats.unsupported);
+ json_object_int_add(json_row, "totalReceivedMessages",
+ igmp_stats.total_recv_messages);
+ json_object_int_add(json_row, "peakGroups",
+ igmp_stats.peak_groups);
+ json_object_int_add(json_row, "totalGroups",
+ igmp_stats.total_groups);
+ json_object_int_add(json_row, "totalSourceGroups",
+ igmp_stats.total_source_groups);
+ json_object_int_add(json_row, "joinsFailed",
+ igmp_stats.joins_failed);
+ json_object_int_add(json_row, "joinsSent",
+ igmp_stats.joins_sent);
+ json_object_int_add(json_row, "generalQueriesSent",
+ igmp_stats.general_queries_sent);
+ json_object_int_add(json_row, "groupQueriesSent",
+ igmp_stats.group_queries_sent);
+ json_object_object_add(json, ifname ? ifname : "global",
+ json_row);
+ vty_json(vty, json);
+ } else {
+ vty_out(vty, "IGMP statistics\n");
+ vty_out(vty, "Interface : %s\n",
+ ifname ? ifname : "global");
+ vty_out(vty, "V1 query : %u\n",
+ igmp_stats.query_v1);
+ vty_out(vty, "V2 query : %u\n",
+ igmp_stats.query_v2);
+ vty_out(vty, "V3 query : %u\n",
+ igmp_stats.query_v3);
+ vty_out(vty, "V2 leave : %u\n",
+ igmp_stats.leave_v2);
+ vty_out(vty, "V1 report : %u\n",
+ igmp_stats.report_v1);
+ vty_out(vty, "V2 report : %u\n",
+ igmp_stats.report_v2);
+ vty_out(vty, "V3 report : %u\n",
+ igmp_stats.report_v3);
+ vty_out(vty, "mtrace response : %u\n",
+ igmp_stats.mtrace_rsp);
+ vty_out(vty, "mtrace request : %u\n",
+ igmp_stats.mtrace_req);
+ vty_out(vty, "unsupported : %u\n",
+ igmp_stats.unsupported);
+ vty_out(vty, "total received messages : %u\n",
+ igmp_stats.total_recv_messages);
+ vty_out(vty, "joins failed : %u\n",
+ igmp_stats.joins_failed);
+ vty_out(vty, "joins sent : %u\n",
+ igmp_stats.joins_sent);
+ vty_out(vty, "general queries sent : %u\n",
+ igmp_stats.general_queries_sent);
+ vty_out(vty, "group queries sent : %u\n",
+ igmp_stats.group_queries_sent);
+ vty_out(vty, "peak groups : %u\n",
+ igmp_stats.peak_groups);
+ vty_out(vty, "total groups : %u\n",
+ igmp_stats.total_groups);
+ vty_out(vty, "total source groups : %u\n",
+ igmp_stats.total_source_groups);
+ }
+}
+
+static void igmp_source_json_helper(struct gm_source *src,
+ json_object *json_sources, char *source_str,
+ char *mmss, char *uptime)
+{
+ json_object *json_source = NULL;
+
+ json_source = json_object_new_object();
+ if (!json_source)
+ return;
+
+ json_object_string_add(json_source, "source", source_str);
+ json_object_string_add(json_source, "timer", mmss);
+ json_object_boolean_add(json_source, "forwarded",
+ IGMP_SOURCE_TEST_FORWARDING(src->source_flags));
+ json_object_string_add(json_source, "uptime", uptime);
+ json_object_array_add(json_sources, json_source);
+}
+
+static void igmp_group_print(struct interface *ifp, struct vty *vty, bool uj,
+ json_object *json, struct gm_group *grp,
+ time_t now, bool detail)
+{
+ json_object *json_iface = NULL;
+ json_object *json_group = NULL;
+ json_object *json_groups = NULL;
+ char group_str[INET_ADDRSTRLEN];
+ char hhmmss[PIM_TIME_STRLEN];
+ char uptime[PIM_TIME_STRLEN];
+
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+ pim_time_timer_to_hhmmss(hhmmss, sizeof(hhmmss), grp->t_group_timer);
+ pim_time_uptime(uptime, sizeof(uptime), now - grp->group_creation);
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name, &json_iface);
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ if (!json_iface)
+ return;
+ json_object_pim_ifp_add(json_iface, ifp);
+ json_object_object_add(json, ifp->name, json_iface);
+ json_groups = json_object_new_array();
+ if (!json_groups)
+ return;
+ json_object_object_add(json_iface, "groups",
+ json_groups);
+ }
+
+ json_object_object_get_ex(json_iface, "groups", &json_groups);
+ if (json_groups) {
+ json_group = json_object_new_object();
+ if (!json_group)
+ return;
+
+ json_object_string_add(json_group, "group", group_str);
+ if (grp->igmp_version == IGMP_DEFAULT_VERSION)
+ json_object_string_add(
+ json_group, "mode",
+ grp->group_filtermode_isexcl
+ ? "EXCLUDE"
+ : "INCLUDE");
+
+ json_object_string_add(json_group, "timer", hhmmss);
+ json_object_int_add(
+ json_group, "sourcesCount",
+ grp->group_source_list
+ ? listcount(grp->group_source_list)
+ : 0);
+ json_object_int_add(json_group, "version",
+ grp->igmp_version);
+ json_object_string_add(json_group, "uptime", uptime);
+ json_object_array_add(json_groups, json_group);
+
+ if (detail) {
+ struct listnode *srcnode;
+ struct gm_source *src;
+ json_object *json_sources = NULL;
+
+ json_sources = json_object_new_array();
+ if (!json_sources)
+ return;
+
+ json_object_object_add(json_group, "sources",
+ json_sources);
+
+ for (ALL_LIST_ELEMENTS_RO(
+ grp->group_source_list, srcnode,
+ src)) {
+ char source_str[INET_ADDRSTRLEN];
+ char mmss[PIM_TIME_STRLEN];
+ char src_uptime[PIM_TIME_STRLEN];
+
+ pim_inet4_dump(
+ "<source?>", src->source_addr,
+ source_str, sizeof(source_str));
+ pim_time_timer_to_mmss(
+ mmss, sizeof(mmss),
+ src->t_source_timer);
+ pim_time_uptime(
+ src_uptime, sizeof(src_uptime),
+ now - src->source_creation);
+
+ igmp_source_json_helper(
+ src, json_sources, source_str,
+ mmss, src_uptime);
+ }
+ }
+ }
+ } else {
+ if (detail) {
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
+ srcnode, src)) {
+ char source_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<source?>", src->source_addr,
+ source_str, sizeof(source_str));
+
+ vty_out(vty,
+ "%-16s %-15s %4s %8s %-15s %d %8s\n",
+ ifp->name, group_str,
+ grp->igmp_version == 3
+ ? (grp->group_filtermode_isexcl
+ ? "EXCL"
+ : "INCL")
+ : "----",
+ hhmmss, source_str, grp->igmp_version,
+ uptime);
+ }
+ return;
+ }
+
+ vty_out(vty, "%-16s %-15s %4s %8s %4d %d %8s\n", ifp->name,
+ group_str,
+ grp->igmp_version == 3
+ ? (grp->group_filtermode_isexcl ? "EXCL"
+ : "INCL")
+ : "----",
+ hhmmss,
+ grp->group_source_list
+ ? listcount(grp->group_source_list)
+ : 0,
+ grp->igmp_version, uptime);
+ }
+}
+
+static void igmp_show_groups_interface_single(struct pim_instance *pim,
+ struct vty *vty, bool uj,
+ const char *ifname,
+ const char *grp_str, bool detail)
+{
+ struct interface *ifp;
+ time_t now;
+ json_object *json = NULL;
+ struct pim_interface *pim_ifp = NULL;
+ struct gm_group *grp;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj) {
+ json = json_object_new_object();
+ if (!json)
+ return;
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
+ json_object_int_add(json, "watermarkLimit",
+ pim->gm_watermark_limit);
+ } else {
+ vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
+ vty_out(vty, "Watermark warn limit(%s): %u\n",
+ pim->gm_watermark_limit ? "Set" : "Not Set",
+ pim->gm_watermark_limit);
+
+ if (!detail)
+ vty_out(vty,
+ "Interface Group Mode Timer Srcs V Uptime\n");
+ else
+ vty_out(vty,
+ "Interface Group Mode Timer Source V Uptime\n");
+ }
+
+ ifp = if_lookup_by_name(ifname, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ if (grp_str) {
+ struct in_addr group_addr;
+ struct gm_sock *igmp;
+
+ if (inet_pton(AF_INET, grp_str, &group_addr) == 1) {
+ igmp = pim_igmp_sock_lookup_ifaddr(
+ pim_ifp->gm_socket_list,
+ pim_ifp->primary_address);
+ if (igmp) {
+ grp = find_group_by_addr(igmp, group_addr);
+ if (grp)
+ igmp_group_print(ifp, vty, uj, json,
+ grp, now, detail);
+ }
+ }
+ } else {
+ struct listnode *grpnode;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode, grp))
+ igmp_group_print(ifp, vty, uj, json, grp, now, detail);
+ }
+
+ if (uj) {
+ if (detail)
+ vty_json_no_pretty(vty, json);
+ else
+ vty_json(vty, json);
+ }
+}
+
+static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj,
+ const char *grp_str, bool detail)
+{
+ struct interface *ifp;
+ time_t now;
+ json_object *json = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj) {
+ json = json_object_new_object();
+ if (!json)
+ return;
+ json_object_int_add(json, "totalGroups", pim->gm_group_count);
+ json_object_int_add(json, "watermarkLimit",
+ pim->gm_watermark_limit);
+ } else {
+ vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
+ vty_out(vty, "Watermark warn limit(%s): %u\n",
+ pim->gm_watermark_limit ? "Set" : "Not Set",
+ pim->gm_watermark_limit);
+ if (!detail)
+ vty_out(vty,
+ "Interface Group Mode Timer Srcs V Uptime\n");
+ else
+ vty_out(vty,
+ "Interface Group Mode Timer Source V Uptime\n");
+ }
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode;
+ struct gm_group *grp;
+
+ if (!pim_ifp)
+ continue;
+
+ if (grp_str) {
+ struct in_addr group_addr;
+ struct gm_sock *igmp;
+
+ if (inet_pton(AF_INET, grp_str, &group_addr) == 1) {
+ igmp = pim_igmp_sock_lookup_ifaddr(
+ pim_ifp->gm_socket_list,
+ pim_ifp->primary_address);
+ if (igmp) {
+ grp = find_group_by_addr(igmp,
+ group_addr);
+ if (grp)
+ igmp_group_print(ifp, vty, uj,
+ json, grp, now,
+ detail);
+ }
+ }
+ } else {
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list,
+ grpnode, grp))
+ igmp_group_print(ifp, vty, uj, json, grp, now,
+ detail);
+ }
+ } /* scan interfaces */
+
+ if (uj) {
+ if (detail)
+ vty_json_no_pretty(vty, json);
+ else
+ vty_json(vty, json);
+ }
+}
+
+static void igmp_show_group_retransmission(struct pim_instance *pim,
+ struct vty *vty)
+{
+ struct interface *ifp;
+
+ vty_out(vty,
+ "Interface Group RetTimer Counter RetSrcs\n");
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode;
+ struct gm_group *grp;
+
+ if (!pim_ifp)
+ continue;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ grp)) {
+ char group_str[INET_ADDRSTRLEN];
+ char grp_retr_mmss[10];
+ struct listnode *src_node;
+ struct gm_source *src;
+ int grp_retr_sources = 0;
+
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+ pim_time_timer_to_mmss(
+ grp_retr_mmss, sizeof(grp_retr_mmss),
+ grp->t_group_query_retransmit_timer);
+
+
+ /* count group sources with retransmission state
+ */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
+ src_node, src)) {
+ if (src->source_query_retransmit_count > 0) {
+ ++grp_retr_sources;
+ }
+ }
+
+ vty_out(vty, "%-16s %-15s %-8s %7d %7d\n", ifp->name,
+ group_str, grp_retr_mmss,
+ grp->group_specific_query_retransmit_count,
+ grp_retr_sources);
+
+ } /* scan igmp groups */
+ } /* scan interfaces */
+}
+
+static void igmp_sources_print(struct interface *ifp, char *group_str,
+ struct gm_source *src, time_t now,
+ json_object *json, struct vty *vty, bool uj)
+{
+ json_object *json_iface = NULL;
+ json_object *json_group = NULL;
+ json_object *json_sources = NULL;
+ char source_str[INET_ADDRSTRLEN];
+ char mmss[PIM_TIME_STRLEN];
+ char uptime[PIM_TIME_STRLEN];
+
+ pim_inet4_dump("<source?>", src->source_addr, source_str,
+ sizeof(source_str));
+ pim_time_timer_to_mmss(mmss, sizeof(mmss), src->t_source_timer);
+ pim_time_uptime(uptime, sizeof(uptime), now - src->source_creation);
+
+ if (uj) {
+ json_object_object_get_ex(json, ifp->name, &json_iface);
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ if (!json_iface)
+ return;
+ json_object_string_add(json_iface, "name", ifp->name);
+ json_object_object_add(json, ifp->name, json_iface);
+ }
+
+ json_object_object_get_ex(json_iface, group_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ if (!json_group)
+ return;
+ json_object_string_add(json_group, "group", group_str);
+ json_object_object_add(json_iface, group_str,
+ json_group);
+ json_sources = json_object_new_array();
+ if (!json_sources)
+ return;
+ json_object_object_add(json_group, "sources",
+ json_sources);
+ }
+
+ json_object_object_get_ex(json_group, "sources", &json_sources);
+ if (json_sources)
+ igmp_source_json_helper(src, json_sources, source_str,
+ mmss, uptime);
+ } else {
+ vty_out(vty, "%-16s %-15s %-15s %5s %3s %8s\n", ifp->name,
+ group_str, source_str, mmss,
+ IGMP_SOURCE_TEST_FORWARDING(src->source_flags) ? "Y"
+ : "N",
+ uptime);
+ }
+}
+
+static void igmp_show_sources_interface_single(struct pim_instance *pim,
+ struct vty *vty, bool uj,
+ const char *ifname,
+ const char *grp_str)
+{
+ struct interface *ifp;
+ time_t now;
+ json_object *json = NULL;
+ struct pim_interface *pim_ifp;
+ struct gm_group *grp;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj) {
+ json = json_object_new_object();
+ if (!json)
+ return;
+ } else {
+ vty_out(vty,
+ "Interface Group Source Timer Fwd Uptime \n");
+ }
+
+ ifp = if_lookup_by_name(ifname, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ if (grp_str) {
+ struct in_addr group_addr;
+ struct gm_sock *igmp;
+ struct listnode *srcnode;
+ struct gm_source *src;
+ char group_str[INET_ADDRSTRLEN];
+ int res;
+
+ res = inet_pton(AF_INET, grp_str, &group_addr);
+ if (res <= 0) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
+ pim_ifp->primary_address);
+ if (!igmp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+
+ grp = find_group_by_addr(igmp, group_addr);
+ if (!grp) {
+ if (uj)
+ vty_json(vty, json);
+ return;
+ }
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list, srcnode, src))
+ igmp_sources_print(ifp, group_str, src, now, json, vty,
+ uj);
+ } else {
+ struct listnode *grpnode;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ grp)) {
+ char group_str[INET_ADDRSTRLEN];
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
+ srcnode, src))
+ igmp_sources_print(ifp, group_str, src, now,
+ json, vty, uj);
+
+ } /* scan igmp groups */
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void igmp_show_sources(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct interface *ifp;
+ time_t now;
+ json_object *json = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (uj) {
+ json = json_object_new_object();
+ if (!json)
+ return;
+ } else {
+ vty_out(vty,
+ "Interface Group Source Timer Fwd Uptime\n");
+ }
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode;
+ struct gm_group *grp;
+
+ if (!pim_ifp)
+ continue;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ grp)) {
+ char group_str[INET_ADDRSTRLEN];
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
+ srcnode, src))
+ igmp_sources_print(ifp, group_str, src, now,
+ json, vty, uj);
+ } /* scan igmp groups */
+ } /* scan interfaces */
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void igmp_show_source_retransmission(struct pim_instance *pim,
+ struct vty *vty)
+{
+ struct interface *ifp;
+
+ vty_out(vty,
+ "Interface Group Source Counter\n");
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode;
+ struct gm_group *grp;
+
+ if (!pim_ifp)
+ continue;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ grp)) {
+ char group_str[INET_ADDRSTRLEN];
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ pim_inet4_dump("<group?>", grp->group_addr, group_str,
+ sizeof(group_str));
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
+ srcnode, src)) {
+ char source_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<source?>", src->source_addr,
+ source_str, sizeof(source_str));
+
+ vty_out(vty, "%-16s %-15s %-15s %7d\n",
+ ifp->name, group_str, source_str,
+ src->source_query_retransmit_count);
+
+ } /* scan group sources */
+ } /* scan igmp groups */
+ } /* scan interfaces */
+}
+
+static void clear_igmp_interfaces(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp)
+ pim_if_addr_del_all_igmp(ifp);
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp)
+ pim_if_addr_add_all(ifp);
+}
+
+static void clear_interfaces(struct pim_instance *pim)
+{
+ clear_igmp_interfaces(pim);
+ clear_pim_interfaces(pim);
+}
+
+#define PIM_GET_PIM_INTERFACE(pim_ifp, ifp) \
+ pim_ifp = ifp->info; \
+ if (!pim_ifp) { \
+ vty_out(vty, \
+ "%% Enable PIM and/or IGMP on this interface first\n"); \
+ return CMD_WARNING_CONFIG_FAILED; \
+ }
+
+/**
+ * Compatibility function to keep the legacy mesh group CLI behavior:
+ * Delete group when there are no more configurations in it.
+ *
+ * NOTE:
+ * Don't forget to call `nb_cli_apply_changes` after this.
+ */
+static void pim_cli_legacy_mesh_group_behavior(struct vty *vty,
+ const char *gname)
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+ char xpath_member_value[XPATH_MAXLEN];
+ const struct lyd_node *member_dnode;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return;
+
+ /* Get mesh group base XPath. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+ /* Group must exists, otherwise just quit. */
+ if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value))
+ return;
+
+ /* Group members check: */
+ strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
+ strlcat(xpath_member_value, "/members", sizeof(xpath_member_value));
+ if (yang_dnode_exists(vty->candidate_config->dnode,
+ xpath_member_value)) {
+ member_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ xpath_member_value);
+ if (!member_dnode || !yang_is_last_list_dnode(member_dnode))
+ return;
+ }
+
+ /* Source address check: */
+ strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
+ strlcat(xpath_member_value, "/source", sizeof(xpath_member_value));
+ if (yang_dnode_exists(vty->candidate_config->dnode, xpath_member_value))
+ return;
+
+ /* No configurations found: delete it. */
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+}
+
+DEFUN (clear_ip_interfaces,
+ clear_ip_interfaces_cmd,
+ "clear ip interfaces [vrf NAME]",
+ CLEAR_STR
+ IP_STR
+ "Reset interfaces\n"
+ VRF_CMD_HELP_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ clear_interfaces(vrf->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (clear_ip_igmp_interfaces,
+ clear_ip_igmp_interfaces_cmd,
+ "clear ip igmp [vrf NAME] interfaces",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_IGMP_STR
+ VRF_CMD_HELP_STR
+ "Reset IGMP interfaces\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ clear_igmp_interfaces(vrf->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ip_pim_statistics,
+ clear_ip_pim_statistics_cmd,
+ "clear ip pim statistics [vrf NAME]$name",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM statistics\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_statistics(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ip_mroute,
+ clear_ip_mroute_cmd,
+ "clear ip mroute [vrf NAME]$name",
+ CLEAR_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR)
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_mroute(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ip_pim_interfaces,
+ clear_ip_pim_interfaces_cmd,
+ "clear ip pim [vrf NAME] interfaces",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM interfaces\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_interfaces(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ip_pim_interface_traffic,
+ clear_ip_pim_interface_traffic_cmd,
+ "clear ip pim [vrf NAME] interface traffic",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM interfaces\n"
+ "Reset Protocol Packet counters\n")
+{
+ return clear_pim_interface_traffic(vrf, vty);
+}
+
+DEFPY (clear_ip_pim_oil,
+ clear_ip_pim_oil_cmd,
+ "clear ip pim [vrf NAME]$name oil",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Rescan PIM OIL (output interface list)\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_scan_oil(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (clear_ip_pim_bsr_db,
+ clear_ip_pim_bsr_db_cmd,
+ "clear ip pim [vrf NAME] bsr-data",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset pim bsr data\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_bsm_clear(vrf->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_interface,
+ show_ip_igmp_interface_cmd,
+ "show ip igmp [vrf NAME] interface [detail|WORD] [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (argv_find(argv, argc, "detail", &idx)
+ || argv_find(argv, argc, "WORD", &idx))
+ igmp_show_interfaces_single(vrf->info, vty, argv[idx]->arg, uj);
+ else
+ igmp_show_interfaces(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_interface_vrf_all,
+ show_ip_igmp_interface_vrf_all_cmd,
+ "show ip igmp vrf all interface [detail|WORD] [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ if (argv_find(argv, argc, "detail", &idx)
+ || argv_find(argv, argc, "WORD", &idx))
+ igmp_show_interfaces_single(vrf->info, vty,
+ argv[idx]->arg, uj);
+ else
+ igmp_show_interfaces(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_join,
+ show_ip_igmp_join_cmd,
+ "show ip igmp [vrf NAME] join [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP static join information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ igmp_show_interface_join(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_join_vrf_all,
+ show_ip_igmp_join_vrf_all_cmd,
+ "show ip igmp vrf all join [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP static join information\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ igmp_show_interface_join(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_ip_igmp_groups,
+ show_ip_igmp_groups_cmd,
+ "show ip igmp [vrf NAME$vrf_name] groups [INTERFACE$ifname [GROUP$grp_str]] [detail$detail] [json$json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ IGMP_GROUP_STR
+ "Interface name\n"
+ "Group address\n"
+ "Detailed Information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (ifname)
+ igmp_show_groups_interface_single(vrf->info, vty, !!json,
+ ifname, grp_str, !!detail);
+ else
+ igmp_show_groups(vrf->info, vty, !!json, NULL, !!detail);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_ip_igmp_groups_vrf_all,
+ show_ip_igmp_groups_vrf_all_cmd,
+ "show ip igmp vrf all groups [GROUP$grp_str] [detail$detail] [json$json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ IGMP_GROUP_STR
+ "Group address\n"
+ "Detailed Information\n"
+ JSON_STR)
+{
+ bool uj = !!json;
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ igmp_show_groups(vrf->info, vty, uj, grp_str, !!detail);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_groups_retransmissions,
+ show_ip_igmp_groups_retransmissions_cmd,
+ "show ip igmp [vrf NAME] groups retransmissions",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ IGMP_GROUP_STR
+ "IGMP group retransmissions\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ igmp_show_group_retransmission(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_ip_igmp_sources,
+ show_ip_igmp_sources_cmd,
+ "show ip igmp [vrf NAME$vrf_name] sources [INTERFACE$ifname [GROUP$grp_str]] [json$json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ IGMP_SOURCE_STR
+ "Interface name\n"
+ "Group address\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (ifname)
+ igmp_show_sources_interface_single(vrf->info, vty, !!json,
+ ifname, grp_str);
+ else
+ igmp_show_sources(vrf->info, vty, !!json);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_sources_retransmissions,
+ show_ip_igmp_sources_retransmissions_cmd,
+ "show ip igmp [vrf NAME] sources retransmissions",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ IGMP_SOURCE_STR
+ "IGMP source retransmissions\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ igmp_show_source_retransmission(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_igmp_statistics,
+ show_ip_igmp_statistics_cmd,
+ "show ip igmp [vrf NAME] statistics [interface WORD] [json]",
+ SHOW_STR
+ IP_STR
+ IGMP_STR
+ VRF_CMD_HELP_STR
+ "IGMP statistics\n"
+ "interface\n"
+ "IGMP interface\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (argv_find(argv, argc, "WORD", &idx))
+ igmp_show_statistics(vrf->info, vty, argv[idx]->arg, uj);
+ else
+ igmp_show_statistics(vrf->info, vty, NULL, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_mlag_summary,
+ show_ip_pim_mlag_summary_cmd,
+ "show ip pim mlag summary [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "MLAG\n"
+ "status and stats\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ char role_buf[MLAG_ROLE_STRSIZE];
+ char addr_buf[INET_ADDRSTRLEN];
+
+ if (uj) {
+ json_object *json = NULL;
+ json_object *json_stat = NULL;
+
+ json = json_object_new_object();
+ if (router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)
+ json_object_boolean_true_add(json, "mlagConnUp");
+ if (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)
+ json_object_boolean_true_add(json, "mlagPeerConnUp");
+ if (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)
+ json_object_boolean_true_add(json, "mlagPeerZebraUp");
+ json_object_string_add(json, "mlagRole",
+ mlag_role2str(router->mlag_role,
+ role_buf, sizeof(role_buf)));
+ inet_ntop(AF_INET, &router->local_vtep_ip,
+ addr_buf, INET_ADDRSTRLEN);
+ json_object_string_add(json, "localVtepIp", addr_buf);
+ inet_ntop(AF_INET, &router->anycast_vtep_ip,
+ addr_buf, INET_ADDRSTRLEN);
+ json_object_string_add(json, "anycastVtepIp", addr_buf);
+ json_object_string_add(json, "peerlinkRif",
+ router->peerlink_rif);
+
+ json_stat = json_object_new_object();
+ json_object_int_add(json_stat, "mlagConnFlaps",
+ router->mlag_stats.mlagd_session_downs);
+ json_object_int_add(json_stat, "mlagPeerConnFlaps",
+ router->mlag_stats.peer_session_downs);
+ json_object_int_add(json_stat, "mlagPeerZebraFlaps",
+ router->mlag_stats.peer_zebra_downs);
+ json_object_int_add(json_stat, "mrouteAddRx",
+ router->mlag_stats.msg.mroute_add_rx);
+ json_object_int_add(json_stat, "mrouteAddTx",
+ router->mlag_stats.msg.mroute_add_tx);
+ json_object_int_add(json_stat, "mrouteDelRx",
+ router->mlag_stats.msg.mroute_del_rx);
+ json_object_int_add(json_stat, "mrouteDelTx",
+ router->mlag_stats.msg.mroute_del_tx);
+ json_object_int_add(json_stat, "mlagStatusUpdates",
+ router->mlag_stats.msg.mlag_status_updates);
+ json_object_int_add(json_stat, "peerZebraStatusUpdates",
+ router->mlag_stats.msg.peer_zebra_status_updates);
+ json_object_int_add(json_stat, "pimStatusUpdates",
+ router->mlag_stats.msg.pim_status_updates);
+ json_object_int_add(json_stat, "vxlanUpdates",
+ router->mlag_stats.msg.vxlan_updates);
+ json_object_object_add(json, "connStats", json_stat);
+
+ vty_json(vty, json);
+ return CMD_SUCCESS;
+ }
+
+ vty_out(vty, "MLAG daemon connection: %s\n",
+ (router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)
+ ? "up" : "down");
+ vty_out(vty, "MLAG peer state: %s\n",
+ (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)
+ ? "up" : "down");
+ vty_out(vty, "Zebra peer state: %s\n",
+ (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)
+ ? "up" : "down");
+ vty_out(vty, "MLAG role: %s\n",
+ mlag_role2str(router->mlag_role, role_buf, sizeof(role_buf)));
+ inet_ntop(AF_INET, &router->local_vtep_ip,
+ addr_buf, INET_ADDRSTRLEN);
+ vty_out(vty, "Local VTEP IP: %s\n", addr_buf);
+ inet_ntop(AF_INET, &router->anycast_vtep_ip,
+ addr_buf, INET_ADDRSTRLEN);
+ vty_out(vty, "Anycast VTEP IP: %s\n", addr_buf);
+ vty_out(vty, "Peerlink: %s\n", router->peerlink_rif);
+ vty_out(vty, "Session flaps: mlagd: %d mlag-peer: %d zebra-peer: %d\n",
+ router->mlag_stats.mlagd_session_downs,
+ router->mlag_stats.peer_session_downs,
+ router->mlag_stats.peer_zebra_downs);
+ vty_out(vty, "Message Statistics:\n");
+ vty_out(vty, " mroute adds: rx: %d, tx: %d\n",
+ router->mlag_stats.msg.mroute_add_rx,
+ router->mlag_stats.msg.mroute_add_tx);
+ vty_out(vty, " mroute dels: rx: %d, tx: %d\n",
+ router->mlag_stats.msg.mroute_del_rx,
+ router->mlag_stats.msg.mroute_del_tx);
+ vty_out(vty, " peer zebra status updates: %d\n",
+ router->mlag_stats.msg.peer_zebra_status_updates);
+ vty_out(vty, " PIM status updates: %d\n",
+ router->mlag_stats.msg.pim_status_updates);
+ vty_out(vty, " VxLAN updates: %d\n",
+ router->mlag_stats.msg.vxlan_updates);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_assert,
+ show_ip_pim_assert_cmd,
+ "show ip pim [vrf NAME] assert",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface assert\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_assert(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_assert_internal,
+ show_ip_pim_assert_internal_cmd,
+ "show ip pim [vrf NAME] assert-internal",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface internal assert state\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_assert_internal(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_assert_metric,
+ show_ip_pim_assert_metric_cmd,
+ "show ip pim [vrf NAME] assert-metric",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface assert metric\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_assert_metric(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_assert_winner_metric,
+ show_ip_pim_assert_winner_metric_cmd,
+ "show ip pim [vrf NAME] assert-winner-metric",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface assert winner metric\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_assert_winner_metric(vrf->info, vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ip_pim_interface,
+ show_ip_pim_interface_cmd,
+ "show ip pim [mlag$mlag] [vrf NAME] interface [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "MLAG\n"
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_cmd_helper(vrf, vty, !!json, !!mlag,
+ interface);
+}
+
+DEFPY (show_ip_pim_interface_vrf_all,
+ show_ip_pim_interface_vrf_all_cmd,
+ "show ip pim [mlag$mlag] vrf all interface [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "MLAG\n"
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Detailed output\n"
+ "interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_vrf_all_cmd_helper(vty, !!json, !!mlag,
+ interface);
+}
+
+DEFPY (show_ip_pim_join,
+ show_ip_pim_join_cmd,
+ "show ip pim [vrf NAME] join [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface join information\n"
+ "The Source or Group\n"
+ "The Group\n"
+ JSON_STR)
+{
+ return pim_show_join_cmd_helper(vrf, vty, s_or_g, g, json);
+}
+
+DEFPY (show_ip_pim_join_vrf_all,
+ show_ip_pim_join_vrf_all_cmd,
+ "show ip pim vrf all join [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface join information\n"
+ JSON_STR)
+{
+ return pim_show_join_vrf_all_cmd_helper(vty, json);
+}
+
+DEFPY (show_ip_pim_jp_agg,
+ show_ip_pim_jp_agg_cmd,
+ "show ip pim [vrf NAME] jp-agg",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "join prune aggregation list\n")
+{
+ return pim_show_jp_agg_list_cmd_helper(vrf, vty);
+}
+
+DEFPY (show_ip_pim_local_membership,
+ show_ip_pim_local_membership_cmd,
+ "show ip pim [vrf NAME] local-membership [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface local-membership\n"
+ JSON_STR)
+{
+ return pim_show_membership_cmd_helper(vrf, vty, !!json);
+}
+
+static void pim_show_mlag_up_entry_detail(struct vrf *vrf,
+ struct vty *vty,
+ struct pim_upstream *up,
+ char *src_str, char *grp_str,
+ json_object *json)
+{
+ if (json) {
+ json_object *json_row = NULL;
+ json_object *own_list = NULL;
+ json_object *json_group = NULL;
+
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+
+ own_list = json_object_new_array();
+ if (pim_up_mlag_is_local(up))
+ json_object_array_add(own_list,
+ json_object_new_string("local"));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
+ json_object_array_add(own_list,
+ json_object_new_string("peer"));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
+ json_object_array_add(
+ own_list, json_object_new_string("Interface"));
+ json_object_object_add(json_row, "owners", own_list);
+
+ json_object_int_add(json_row, "localCost",
+ pim_up_mlag_local_cost(up));
+ json_object_int_add(json_row, "peerCost",
+ pim_up_mlag_peer_cost(up));
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))
+ json_object_boolean_false_add(json_row, "df");
+ else
+ json_object_boolean_true_add(json_row, "df");
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ char own_str[6];
+
+ own_str[0] = '\0';
+ if (pim_up_mlag_is_local(up))
+ strlcat(own_str, "L", sizeof(own_str));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
+ strlcat(own_str, "P", sizeof(own_str));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
+ strlcat(own_str, "I", sizeof(own_str));
+ /* XXX - fixup, print paragraph output */
+ vty_out(vty,
+ "%-15s %-15s %-6s %-11u %-10d %2s\n",
+ src_str, grp_str, own_str,
+ pim_up_mlag_local_cost(up),
+ pim_up_mlag_peer_cost(up),
+ PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)
+ ? "n" : "y");
+ }
+}
+
+static void pim_show_mlag_up_detail(struct vrf *vrf,
+ struct vty *vty, const char *src_or_group,
+ const char *group, bool uj)
+{
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+ struct pim_upstream *up;
+ struct pim_instance *pim = vrf->info;
+ json_object *json = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+ else
+ vty_out(vty,
+ "Source Group Owner Local-cost Peer-cost DF\n");
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
+ && !(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
+ && !pim_up_mlag_is_local(up))
+ continue;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src);
+
+ /* XXX: strcmps are clearly inefficient. we should do uint comps
+ * here instead.
+ */
+ if (group) {
+ if (strcmp(src_str, src_or_group) ||
+ strcmp(grp_str, group))
+ continue;
+ } else {
+ if (strcmp(src_str, src_or_group) &&
+ strcmp(grp_str, src_or_group))
+ continue;
+ }
+ pim_show_mlag_up_entry_detail(vrf, vty, up,
+ src_str, grp_str, json);
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void pim_show_mlag_up_vrf(struct vrf *vrf, struct vty *vty, bool uj)
+{
+ json_object *json = NULL;
+ json_object *json_row;
+ struct pim_upstream *up;
+ struct pim_instance *pim = vrf->info;
+ json_object *json_group = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty,
+ "Source Group Owner Local-cost Peer-cost DF\n");
+ }
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
+ && !(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
+ && !pim_up_mlag_is_local(up))
+ continue;
+ if (uj) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+ json_object *own_list = NULL;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &up->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "vrf", vrf->name);
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+
+ own_list = json_object_new_array();
+ if (pim_up_mlag_is_local(up)) {
+
+ json_object_array_add(own_list,
+ json_object_new_string(
+ "local"));
+ }
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)) {
+ json_object_array_add(own_list,
+ json_object_new_string(
+ "peer"));
+ }
+ json_object_object_add(json_row, "owners", own_list);
+
+ json_object_int_add(json_row, "localCost",
+ pim_up_mlag_local_cost(up));
+ json_object_int_add(json_row, "peerCost",
+ pim_up_mlag_peer_cost(up));
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))
+ json_object_boolean_false_add(json_row, "df");
+ else
+ json_object_boolean_true_add(json_row, "df");
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ char own_str[6];
+
+ own_str[0] = '\0';
+ if (pim_up_mlag_is_local(up))
+ strlcat(own_str, "L", sizeof(own_str));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
+ strlcat(own_str, "P", sizeof(own_str));
+ if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
+ strlcat(own_str, "I", sizeof(own_str));
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-6s %-11u %-10u %2s\n",
+ &up->sg.src, &up->sg.grp, own_str,
+ pim_up_mlag_local_cost(up),
+ pim_up_mlag_peer_cost(up),
+ PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)
+ ? "n" : "y");
+ }
+ }
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void pim_show_mlag_help_string(struct vty *vty, bool uj)
+{
+ if (!uj) {
+ vty_out(vty, "Owner codes:\n");
+ vty_out(vty,
+ "L: EVPN-MLAG Entry, I:PIM-MLAG Entry, P: Peer Entry\n");
+ }
+}
+
+
+DEFUN(show_ip_pim_mlag_up, show_ip_pim_mlag_up_cmd,
+ "show ip pim [vrf NAME] mlag upstream [A.B.C.D [A.B.C.D]] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "MLAG\n"
+ "upstream\n"
+ "Unicast or Multicast address\n"
+ "Multicast address\n" JSON_STR)
+{
+ const char *src_or_group = NULL;
+ const char *group = NULL;
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf || !vrf->info) {
+ vty_out(vty, "%s: VRF or Info missing\n", __func__);
+ return CMD_WARNING;
+ }
+
+ if (uj)
+ argc--;
+
+ if (argv_find(argv, argc, "A.B.C.D", &idx)) {
+ src_or_group = argv[idx]->arg;
+ if (idx + 1 < argc)
+ group = argv[idx + 1]->arg;
+ }
+
+ pim_show_mlag_help_string(vty, uj);
+
+ if (src_or_group)
+ pim_show_mlag_up_detail(vrf, vty, src_or_group, group, uj);
+ else
+ pim_show_mlag_up_vrf(vrf, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+
+DEFUN(show_ip_pim_mlag_up_vrf_all, show_ip_pim_mlag_up_vrf_all_cmd,
+ "show ip pim vrf all mlag upstream [json]",
+ SHOW_STR IP_STR PIM_STR VRF_CMD_HELP_STR
+ "MLAG\n"
+ "upstream\n" JSON_STR)
+{
+ struct vrf *vrf;
+ bool uj = use_json(argc, argv);
+
+ pim_show_mlag_help_string(vty, uj);
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ pim_show_mlag_up_vrf(vrf, vty, uj);
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ip_pim_neighbor,
+ show_ip_pim_neighbor_cmd,
+ "show ip pim [vrf NAME] neighbor [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor information\n"
+ "Detailed output\n"
+ "Name of interface or neighbor\n"
+ JSON_STR)
+{
+ return pim_show_neighbors_cmd_helper(vrf, vty, json, interface);
+}
+
+DEFPY (show_ip_pim_neighbor_vrf_all,
+ show_ip_pim_neighbor_vrf_all_cmd,
+ "show ip pim vrf all neighbor [detail|WORD]$interface [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor information\n"
+ "Detailed output\n"
+ "Name of interface or neighbor\n"
+ JSON_STR)
+{
+ return pim_show_neighbors_vrf_all_cmd_helper(vty, json, interface);
+}
+
+DEFPY (show_ip_pim_secondary,
+ show_ip_pim_secondary_cmd,
+ "show ip pim [vrf NAME] secondary",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM neighbor addresses\n")
+{
+ return pim_show_secondary_helper(vrf, vty);
+}
+
+DEFPY (show_ip_pim_state,
+ show_ip_pim_state_cmd,
+ "show ip pim [vrf NAME] state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM state information\n"
+ "Unicast or Multicast address\n"
+ "Multicast address\n"
+ JSON_STR)
+{
+ return pim_show_state_helper(vrf, vty, s_or_g_str, g_str, !!json);
+}
+
+DEFPY (show_ip_pim_state_vrf_all,
+ show_ip_pim_state_vrf_all_cmd,
+ "show ip pim vrf all state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM state information\n"
+ "Unicast or Multicast address\n"
+ "Multicast address\n"
+ JSON_STR)
+{
+ return pim_show_state_vrf_all_helper(vty, s_or_g_str, g_str, !!json);
+}
+
+DEFPY (show_ip_pim_upstream,
+ show_ip_pim_upstream_cmd,
+ "show ip pim [vrf NAME] upstream [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream information\n"
+ "The Source or Group\n"
+ "The Group\n"
+ JSON_STR)
+{
+ return pim_show_upstream_helper(vrf, vty, s_or_g, g, !!json);
+}
+
+DEFPY (show_ip_pim_upstream_vrf_all,
+ show_ip_pim_upstream_vrf_all_cmd,
+ "show ip pim vrf all upstream [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream information\n"
+ JSON_STR)
+{
+ return pim_show_upstream_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ip_pim_channel,
+ show_ip_pim_channel_cmd,
+ "show ip pim [vrf NAME] channel [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM downstream channel info\n"
+ JSON_STR)
+{
+ return pim_show_channel_cmd_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_upstream_join_desired,
+ show_ip_pim_upstream_join_desired_cmd,
+ "show ip pim [vrf NAME] upstream-join-desired [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream join-desired\n"
+ JSON_STR)
+{
+ return pim_show_upstream_join_desired_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_upstream_rpf,
+ show_ip_pim_upstream_rpf_cmd,
+ "show ip pim [vrf NAME] upstream-rpf [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM upstream source rpf\n"
+ JSON_STR)
+{
+ return pim_show_upstream_rpf_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_rp,
+ show_ip_pim_rp_cmd,
+ "show ip pim [vrf NAME] rp-info [A.B.C.D/M$group] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM RP information\n"
+ "Multicast Group range\n"
+ JSON_STR)
+{
+ return pim_show_rp_helper(vrf, vty, group_str, (struct prefix *)group,
+ !!json);
+}
+
+DEFPY (show_ip_pim_rp_vrf_all,
+ show_ip_pim_rp_vrf_all_cmd,
+ "show ip pim vrf all rp-info [A.B.C.D/M$group] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM RP information\n"
+ "Multicast Group range\n"
+ JSON_STR)
+{
+ return pim_show_rp_vrf_all_helper(vty, group_str,
+ (struct prefix *)group, !!json);
+}
+
+DEFPY (show_ip_pim_rpf,
+ show_ip_pim_rpf_cmd,
+ "show ip pim [vrf NAME] rpf [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached source rpf information\n"
+ JSON_STR)
+{
+ return pim_show_rpf_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_rpf_vrf_all,
+ show_ip_pim_rpf_vrf_all_cmd,
+ "show ip pim vrf all rpf [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached source rpf information\n"
+ JSON_STR)
+{
+ return pim_show_rpf_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ip_pim_nexthop,
+ show_ip_pim_nexthop_cmd,
+ "show ip pim [vrf NAME] nexthop [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached nexthop rpf information\n"
+ JSON_STR)
+{
+ return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_nexthop_lookup,
+ show_ip_pim_nexthop_lookup_cmd,
+ "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source A.B.C.D$group",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached nexthop rpf lookup\n"
+ "Source/RP address\n"
+ "Multicast Group address\n")
+{
+ return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group);
+}
+
+DEFPY (show_ip_pim_interface_traffic,
+ show_ip_pim_interface_traffic_cmd,
+ "show ip pim [vrf NAME] interface traffic [WORD$if_name] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM interface information\n"
+ "Protocol Packet counters\n"
+ "Interface name\n"
+ JSON_STR)
+{
+ return pim_show_interface_traffic_helper(vrf, if_name, vty, !!json);
+}
+
+DEFPY (show_ip_pim_bsm_db,
+ show_ip_pim_bsm_db_cmd,
+ "show ip pim bsm-database [vrf NAME] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "PIM cached bsm packets information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_bsm_db_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_bsrp,
+ show_ip_pim_bsrp_cmd,
+ "show ip pim bsrp-info [vrf NAME] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "PIM cached group-rp mappings information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_pim_statistics,
+ show_ip_pim_statistics_cmd,
+ "show ip pim [vrf NAME] statistics [interface WORD$word] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM statistics\n"
+ INTERFACE_STR
+ "PIM interface\n"
+ JSON_STR)
+{
+ return pim_show_statistics_helper(vrf, vty, word, !!json);
+}
+
+DEFPY (show_ip_multicast,
+ show_ip_multicast_cmd,
+ "show ip multicast [vrf NAME]",
+ SHOW_STR
+ IP_STR
+ "Multicast global information\n"
+ VRF_CMD_HELP_STR)
+{
+ return pim_show_multicast_helper(vrf, vty);
+}
+
+DEFPY (show_ip_multicast_vrf_all,
+ show_ip_multicast_vrf_all_cmd,
+ "show ip multicast vrf all",
+ SHOW_STR
+ IP_STR
+ "Multicast global information\n"
+ VRF_CMD_HELP_STR)
+{
+ return pim_show_multicast_vrf_all_helper(vty);
+}
+
+DEFPY (show_ip_multicast_count,
+ show_ip_multicast_count_cmd,
+ "show ip multicast count [vrf NAME] [json$json]",
+ SHOW_STR
+ IP_STR
+ "Multicast global information\n"
+ "Data packet count\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_multicast_count_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_multicast_count_vrf_all,
+ show_ip_multicast_count_vrf_all_cmd,
+ "show ip multicast count vrf all [json$json]",
+ SHOW_STR
+ IP_STR
+ "Multicast global information\n"
+ "Data packet count\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_multicast_count_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ip_mroute,
+ show_ip_mroute_cmd,
+ "show ip mroute [vrf NAME] [A.B.C.D$s_or_g [A.B.C.D$g]] [fill$fill] [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "The Source or Group\n"
+ "The Group\n"
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_helper(vrf, vty, s_or_g, g, !!fill, !!json);
+}
+
+DEFPY (show_ip_mroute_vrf_all,
+ show_ip_mroute_vrf_all_cmd,
+ "show ip mroute vrf all [fill$fill] [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_vrf_all_helper(vty, !!fill, !!json);
+}
+
+DEFPY (clear_ip_mroute_count,
+ clear_ip_mroute_count_cmd,
+ "clear ip mroute [vrf NAME]$name count",
+ CLEAR_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n")
+{
+ return clear_ip_mroute_count_command(vty, name);
+}
+
+DEFPY (show_ip_mroute_count,
+ show_ip_mroute_count_cmd,
+ "show ip mroute [vrf NAME] count [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_count_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_mroute_count_vrf_all,
+ show_ip_mroute_count_vrf_all_cmd,
+ "show ip mroute vrf all count [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ return pim_show_mroute_count_vrf_all_helper(vty, !!json);
+}
+
+DEFPY (show_ip_mroute_summary,
+ show_ip_mroute_summary_cmd,
+ "show ip mroute [vrf NAME] summary [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ return pim_show_mroute_summary_helper(vrf, vty, !!json);
+}
+
+DEFPY (show_ip_mroute_summary_vrf_all,
+ show_ip_mroute_summary_vrf_all_cmd,
+ "show ip mroute vrf all summary [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ return pim_show_mroute_summary_vrf_all_helper(vty, !!json);
+}
+
+DEFUN (show_ip_rib,
+ show_ip_rib_cmd,
+ "show ip rib [vrf NAME] A.B.C.D",
+ SHOW_STR
+ IP_STR
+ RIB_STR
+ VRF_CMD_HELP_STR
+ "Unicast address\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+ struct in_addr addr;
+ const char *addr_str;
+ struct pim_nexthop nexthop;
+ int result;
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ memset(&nexthop, 0, sizeof(nexthop));
+ argv_find(argv, argc, "A.B.C.D", &idx);
+ addr_str = argv[idx]->arg;
+ result = inet_pton(AF_INET, addr_str, &addr);
+ if (result <= 0) {
+ vty_out(vty, "Bad unicast address %s: errno=%d: %s\n", addr_str,
+ errno, safe_strerror(errno));
+ return CMD_WARNING;
+ }
+
+ if (!pim_nexthop_lookup(vrf->info, &nexthop, addr, 0)) {
+ vty_out(vty,
+ "Failure querying RIB nexthop for unicast address %s\n",
+ addr_str);
+ return CMD_WARNING;
+ }
+
+ vty_out(vty,
+ "Address NextHop Interface Metric Preference\n");
+
+ vty_out(vty, "%-15s %-15pPAs %-9s %6d %10d\n", addr_str,
+ &nexthop.mrib_nexthop_addr,
+ nexthop.interface ? nexthop.interface->name : "<ifname?>",
+ nexthop.mrib_route_metric, nexthop.mrib_metric_preference);
+
+ return CMD_SUCCESS;
+}
+
+static void show_ssmpingd(struct pim_instance *pim, struct vty *vty)
+{
+ struct listnode *node;
+ struct ssmpingd_sock *ss;
+ time_t now;
+
+ vty_out(vty,
+ "Source Socket Address Port Uptime Requests\n");
+
+ if (!pim->ssmpingd_list)
+ return;
+
+ now = pim_time_monotonic_sec();
+
+ for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss)) {
+ char source_str[INET_ADDRSTRLEN];
+ char ss_uptime[10];
+ struct sockaddr_in bind_addr;
+ socklen_t len = sizeof(bind_addr);
+ char bind_addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<src?>", ss->source_addr, source_str,
+ sizeof(source_str));
+
+ if (pim_socket_getsockname(
+ ss->sock_fd, (struct sockaddr *)&bind_addr, &len)) {
+ vty_out(vty,
+ "%% Failure reading socket name for ssmpingd source %s on fd=%d\n",
+ source_str, ss->sock_fd);
+ }
+
+ pim_inet4_dump("<addr?>", bind_addr.sin_addr, bind_addr_str,
+ sizeof(bind_addr_str));
+ pim_time_uptime(ss_uptime, sizeof(ss_uptime),
+ now - ss->creation);
+
+ vty_out(vty, "%-15s %6d %-15s %5d %8s %8lld\n", source_str,
+ ss->sock_fd, bind_addr_str, ntohs(bind_addr.sin_port),
+ ss_uptime, (long long)ss->requests);
+ }
+}
+
+DEFUN (show_ip_ssmpingd,
+ show_ip_ssmpingd_cmd,
+ "show ip ssmpingd [vrf NAME]",
+ SHOW_STR
+ IP_STR
+ SHOW_SSMPINGD_STR
+ VRF_CMD_HELP_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ show_ssmpingd(vrf->info, vty);
+ return CMD_SUCCESS;
+}
+
+DEFUN (ip_pim_spt_switchover_infinity,
+ ip_pim_spt_switchover_infinity_cmd,
+ "ip pim spt-switchover infinity-and-beyond",
+ IP_STR
+ PIM_STR
+ "SPT-Switchover\n"
+ "Never switch to SPT Tree\n")
+{
+ return pim_process_spt_switchover_infinity_cmd(vty);
+}
+
+DEFPY (ip_pim_spt_switchover_infinity_plist,
+ ip_pim_spt_switchover_infinity_plist_cmd,
+ "ip pim spt-switchover infinity-and-beyond prefix-list WORD$plist",
+ IP_STR
+ PIM_STR
+ "SPT-Switchover\n"
+ "Never switch to SPT Tree\n"
+ "Prefix-List to control which groups to switch\n"
+ "Prefix-List name\n")
+{
+ return pim_process_spt_switchover_prefixlist_cmd(vty, plist);
+}
+
+DEFUN (no_ip_pim_spt_switchover_infinity,
+ no_ip_pim_spt_switchover_infinity_cmd,
+ "no ip pim spt-switchover infinity-and-beyond",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "SPT_Switchover\n"
+ "Never switch to SPT Tree\n")
+{
+ return pim_process_no_spt_switchover_cmd(vty);
+}
+
+DEFUN (no_ip_pim_spt_switchover_infinity_plist,
+ no_ip_pim_spt_switchover_infinity_plist_cmd,
+ "no ip pim spt-switchover infinity-and-beyond prefix-list WORD",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "SPT_Switchover\n"
+ "Never switch to SPT Tree\n"
+ "Prefix-List to control which groups to switch\n"
+ "Prefix-List name\n")
+{
+ return pim_process_no_spt_switchover_cmd(vty);
+}
+
+DEFPY (pim_register_accept_list,
+ pim_register_accept_list_cmd,
+ "[no] ip pim register-accept-list WORD$word",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Only accept registers from a specific source prefix list\n"
+ "Prefix-List name\n")
+{
+ const char *vrfname;
+ char reg_alist_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(reg_alist_xpath, sizeof(reg_alist_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ "frr-routing:ipv4");
+ strlcat(reg_alist_xpath, "/register-accept-list",
+ sizeof(reg_alist_xpath));
+
+ if (no)
+ nb_cli_enqueue_change(vty, reg_alist_xpath,
+ NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, reg_alist_xpath,
+ NB_OP_MODIFY, word);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ip_pim_joinprune_time,
+ ip_pim_joinprune_time_cmd,
+ "ip pim join-prune-interval (1-65535)$jpi",
+ IP_STR
+ "pim multicast routing\n"
+ "Join Prune Send Interval\n"
+ "Seconds\n")
+{
+ return pim_process_join_prune_cmd(vty, jpi_str);
+}
+
+DEFUN (no_ip_pim_joinprune_time,
+ no_ip_pim_joinprune_time_cmd,
+ "no ip pim join-prune-interval [(1-65535)]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Join Prune Send Interval\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_join_prune_cmd(vty);
+}
+
+DEFPY (ip_pim_register_suppress,
+ ip_pim_register_suppress_cmd,
+ "ip pim register-suppress-time (1-65535)$rst",
+ IP_STR
+ "pim multicast routing\n"
+ "Register Suppress Timer\n"
+ "Seconds\n")
+{
+ return pim_process_register_suppress_cmd(vty, rst_str);
+}
+
+DEFUN (no_ip_pim_register_suppress,
+ no_ip_pim_register_suppress_cmd,
+ "no ip pim register-suppress-time [(1-65535)]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Register Suppress Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_register_suppress_cmd(vty);
+}
+
+DEFPY (ip_pim_rp_keep_alive,
+ ip_pim_rp_keep_alive_cmd,
+ "ip pim rp keep-alive-timer (1-65535)$kat",
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "Keep alive Timer\n"
+ "Seconds\n")
+{
+ return pim_process_rp_kat_cmd(vty, kat_str);
+}
+
+DEFUN (no_ip_pim_rp_keep_alive,
+ no_ip_pim_rp_keep_alive_cmd,
+ "no ip pim rp keep-alive-timer [(1-65535)]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "Keep alive Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_rp_kat_cmd(vty);
+}
+
+DEFPY (ip_pim_keep_alive,
+ ip_pim_keep_alive_cmd,
+ "ip pim keep-alive-timer (1-65535)$kat",
+ IP_STR
+ "pim multicast routing\n"
+ "Keep alive Timer\n"
+ "Seconds\n")
+{
+ return pim_process_keepalivetimer_cmd(vty, kat_str);
+}
+
+DEFUN (no_ip_pim_keep_alive,
+ no_ip_pim_keep_alive_cmd,
+ "no ip pim keep-alive-timer [(1-65535)]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Keep alive Timer\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_keepalivetimer_cmd(vty);
+}
+
+DEFPY (ip_pim_packets,
+ ip_pim_packets_cmd,
+ "ip pim packets (1-255)",
+ IP_STR
+ "pim multicast routing\n"
+ "packets to process at one time per fd\n"
+ "Number of packets\n")
+{
+ return pim_process_pim_packet_cmd(vty, packets_str);
+}
+
+DEFUN (no_ip_pim_packets,
+ no_ip_pim_packets_cmd,
+ "no ip pim packets [(1-255)]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "packets to process at one time per fd\n"
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_pim_packet_cmd(vty);
+}
+
+DEFPY (ip_igmp_group_watermark,
+ ip_igmp_group_watermark_cmd,
+ "ip igmp watermark-warn (1-65535)$limit",
+ IP_STR
+ IGMP_STR
+ "Configure group limit for watermark warning\n"
+ "Group count to generate watermark warning\n")
+{
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+ pim->gm_watermark_limit = limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_ip_igmp_group_watermark,
+ no_ip_igmp_group_watermark_cmd,
+ "no ip igmp watermark-warn [(1-65535)$limit]",
+ NO_STR
+ IP_STR
+ IGMP_STR
+ "Unconfigure group limit for watermark warning\n"
+ IGNORED_IN_NO_STR)
+{
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+ pim->gm_watermark_limit = 0;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (ip_pim_v6_secondary,
+ ip_pim_v6_secondary_cmd,
+ "ip pim send-v6-secondary",
+ IP_STR
+ "pim multicast routing\n"
+ "Send v6 secondary addresses\n")
+{
+ const char *vrfname;
+ char send_v6_secondary_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(send_v6_secondary_xpath, "/send-v6-secondary",
+ sizeof(send_v6_secondary_xpath));
+
+ nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
+ "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (no_ip_pim_v6_secondary,
+ no_ip_pim_v6_secondary_cmd,
+ "no ip pim send-v6-secondary",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Send v6 secondary addresses\n")
+{
+ const char *vrfname;
+ char send_v6_secondary_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(send_v6_secondary_xpath, "/send-v6-secondary",
+ sizeof(send_v6_secondary_xpath));
+
+ nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
+ "false");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ip_pim_rp,
+ ip_pim_rp_cmd,
+ "ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "ip address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
+
+ return pim_process_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (ip_pim_rp_prefix_list,
+ ip_pim_rp_prefix_list_cmd,
+ "ip pim rp A.B.C.D$rp prefix-list WORD$plist",
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "ip address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFPY (no_ip_pim_rp,
+ no_ip_pim_rp_cmd,
+ "no ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "ip address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
+
+ return pim_process_no_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (no_ip_pim_rp_prefix_list,
+ no_ip_pim_rp_prefix_list_cmd,
+ "no ip pim rp A.B.C.D$rp prefix-list WORD$plist",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Rendezvous Point\n"
+ "ip address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFUN (ip_pim_ssm_prefix_list,
+ ip_pim_ssm_prefix_list_cmd,
+ "ip pim ssm prefix-list WORD",
+ IP_STR
+ "pim multicast routing\n"
+ "Source Specific Multicast\n"
+ "group range prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ const char *vrfname;
+ char ssm_plist_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ssm_plist_xpath, "/ssm-prefix-list", sizeof(ssm_plist_xpath));
+
+ nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_MODIFY, argv[4]->arg);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (no_ip_pim_ssm_prefix_list,
+ no_ip_pim_ssm_prefix_list_cmd,
+ "no ip pim ssm prefix-list",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Source Specific Multicast\n"
+ "group range prefix-list filter\n")
+{
+ const char *vrfname;
+ char ssm_plist_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ssm_plist_xpath, "/ssm-prefix-list", sizeof(ssm_plist_xpath));
+
+ nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (no_ip_pim_ssm_prefix_list_name,
+ no_ip_pim_ssm_prefix_list_name_cmd,
+ "no ip pim ssm prefix-list WORD",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Source Specific Multicast\n"
+ "group range prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ const char *vrfname;
+ const struct lyd_node *ssm_plist_dnode;
+ char ssm_plist_xpath[XPATH_MAXLEN];
+ const char *ssm_plist_name;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ssm_plist_xpath, "/ssm-prefix-list", sizeof(ssm_plist_xpath));
+ ssm_plist_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ ssm_plist_xpath);
+
+ if (!ssm_plist_dnode) {
+ vty_out(vty,
+ "%% pim ssm prefix-list %s doesn't exist\n",
+ argv[5]->arg);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ ssm_plist_name = yang_dnode_get_string(ssm_plist_dnode, ".");
+
+ if (ssm_plist_name && !strcmp(ssm_plist_name, argv[5]->arg)) {
+ nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_DESTROY,
+ NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+ }
+
+ vty_out(vty, "%% pim ssm prefix-list %s doesn't exist\n", argv[5]->arg);
+
+ return CMD_WARNING_CONFIG_FAILED;
+}
+
+DEFUN (show_ip_pim_ssm_range,
+ show_ip_pim_ssm_range_cmd,
+ "show ip pim [vrf NAME] group-type [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM group type\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ ip_pim_ssm_show_group_range(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+static void ip_pim_ssm_show_group_type(struct pim_instance *pim,
+ struct vty *vty, bool uj,
+ const char *group)
+{
+ struct in_addr group_addr;
+ const char *type_str;
+ int result;
+
+ result = inet_pton(AF_INET, group, &group_addr);
+ if (result <= 0)
+ type_str = "invalid";
+ else {
+ if (pim_is_group_224_4(group_addr))
+ type_str =
+ pim_is_grp_ssm(pim, group_addr) ? "SSM" : "ASM";
+ else
+ type_str = "not-multicast";
+ }
+
+ if (uj) {
+ json_object *json;
+ json = json_object_new_object();
+ json_object_string_add(json, "groupType", type_str);
+ vty_json(vty, json);
+ } else
+ vty_out(vty, "Group type : %s\n", type_str);
+}
+
+DEFUN (show_ip_pim_group_type,
+ show_ip_pim_group_type_cmd,
+ "show ip pim [vrf NAME] group-type A.B.C.D [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "multicast group type\n"
+ "group address\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ argv_find(argv, argc, "A.B.C.D", &idx);
+ ip_pim_ssm_show_group_type(vrf->info, vty, uj, argv[idx]->arg);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ip_pim_bsr,
+ show_ip_pim_bsr_cmd,
+ "show ip pim bsr [vrf NAME] [json$json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "boot-strap router information\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ return pim_show_bsr_helper(vrf, vty, !!json);
+}
+
+DEFUN (ip_ssmpingd,
+ ip_ssmpingd_cmd,
+ "ip ssmpingd [A.B.C.D]",
+ IP_STR
+ CONF_SSMPINGD_STR
+ "Source address\n")
+{
+ int idx_ipv4 = 2;
+ const char *src_str = (argc == 3) ? argv[idx_ipv4]->arg : "0.0.0.0";
+
+ return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str);
+}
+
+DEFUN (no_ip_ssmpingd,
+ no_ip_ssmpingd_cmd,
+ "no ip ssmpingd [A.B.C.D]",
+ NO_STR
+ IP_STR
+ CONF_SSMPINGD_STR
+ "Source address\n")
+{
+ int idx_ipv4 = 3;
+ const char *src_str = (argc == 4) ? argv[idx_ipv4]->arg : "0.0.0.0";
+
+ return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str);
+}
+
+DEFUN (ip_pim_ecmp,
+ ip_pim_ecmp_cmd,
+ "ip pim ecmp",
+ IP_STR
+ "pim multicast routing\n"
+ "Enable PIM ECMP \n")
+{
+ const char *vrfname;
+ char ecmp_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ecmp_xpath, sizeof(ecmp_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ecmp_xpath, "/ecmp", sizeof(ecmp_xpath));
+
+ nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (no_ip_pim_ecmp,
+ no_ip_pim_ecmp_cmd,
+ "no ip pim ecmp",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Disable PIM ECMP \n")
+{
+ const char *vrfname;
+ char ecmp_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ecmp_xpath, sizeof(ecmp_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ecmp_xpath, "/ecmp", sizeof(ecmp_xpath));
+
+ nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "false");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (ip_pim_ecmp_rebalance,
+ ip_pim_ecmp_rebalance_cmd,
+ "ip pim ecmp rebalance",
+ IP_STR
+ "pim multicast routing\n"
+ "Enable PIM ECMP \n"
+ "Enable PIM ECMP Rebalance\n")
+{
+ const char *vrfname;
+ char ecmp_xpath[XPATH_MAXLEN];
+ char ecmp_rebalance_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ecmp_xpath, sizeof(ecmp_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ecmp_xpath, "/ecmp", sizeof(ecmp_xpath));
+ snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ecmp_rebalance_xpath, "/ecmp-rebalance",
+ sizeof(ecmp_rebalance_xpath));
+
+ nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
+ nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (no_ip_pim_ecmp_rebalance,
+ no_ip_pim_ecmp_rebalance_cmd,
+ "no ip pim ecmp rebalance",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Disable PIM ECMP \n"
+ "Disable PIM ECMP Rebalance\n")
+{
+ const char *vrfname;
+ char ecmp_rebalance_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ strlcat(ecmp_rebalance_xpath, "/ecmp-rebalance",
+ sizeof(ecmp_rebalance_xpath));
+
+ nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "false");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (interface_ip_igmp,
+ interface_ip_igmp_cmd,
+ "ip igmp",
+ IP_STR
+ IFACE_IGMP_STR)
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_no_ip_igmp,
+ interface_no_ip_igmp_cmd,
+ "no ip igmp",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR)
+{
+ const struct lyd_node *pim_enable_dnode;
+ char pim_if_xpath[XPATH_MAXLEN];
+
+ int printed =
+ snprintf(pim_if_xpath, sizeof(pim_if_xpath),
+ "%s/frr-pim:pim/address-family[address-family='%s']",
+ VTY_CURR_XPATH, "frr-routing:ipv4");
+
+ if (printed >= (int)(sizeof(pim_if_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv4");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, ".")) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else
+ nb_cli_enqueue_change(vty, "./enable",
+ NB_OP_MODIFY, "false");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_ip_igmp_join,
+ interface_ip_igmp_join_cmd,
+ "ip igmp join A.B.C.D [A.B.C.D]",
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ int idx_group = 3;
+ int idx_source = 4;
+ const char *source_str;
+ char xpath[XPATH_MAXLEN];
+
+ if (argc == 5) {
+ source_str = argv[idx_source]->arg;
+
+ if (strcmp(source_str, "0.0.0.0") == 0) {
+ vty_out(vty, "Bad source address %s\n",
+ argv[idx_source]->arg);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "0.0.0.0";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH,
+ "frr-routing:ipv4", argv[idx_group]->arg, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (interface_no_ip_igmp_join,
+ interface_no_ip_igmp_join_cmd,
+ "no ip igmp join A.B.C.D [A.B.C.D]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ int idx_group = 4;
+ int idx_source = 5;
+ const char *source_str;
+ char xpath[XPATH_MAXLEN];
+
+ if (argc == 6) {
+ source_str = argv[idx_source]->arg;
+
+ if (strcmp(source_str, "0.0.0.0") == 0) {
+ vty_out(vty, "Bad source address %s\n",
+ argv[idx_source]->arg);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "0.0.0.0";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH,
+ "frr-routing:ipv4", argv[idx_group]->arg, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN (interface_ip_igmp_query_interval,
+ interface_ip_igmp_query_interval_cmd,
+ "ip igmp query-interval (1-65535)",
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_INTERVAL_STR
+ "Query interval in seconds\n")
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode =
+ yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv4");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable",
+ NB_OP_MODIFY, "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY,
+ argv[3]->arg);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_no_ip_igmp_query_interval,
+ interface_no_ip_igmp_query_interval_cmd,
+ "no ip igmp query-interval [(1-65535)]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_INTERVAL_STR
+ IGNORED_IN_NO_STR)
+{
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_ip_igmp_version,
+ interface_ip_igmp_version_cmd,
+ "ip igmp version (2-3)",
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP version\n"
+ "IGMP version number\n")
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ nb_cli_enqueue_change(vty, "./igmp-version", NB_OP_MODIFY,
+ argv[3]->arg);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_no_ip_igmp_version,
+ interface_no_ip_igmp_version_cmd,
+ "no ip igmp version (2-3)",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ "IGMP version\n"
+ "IGMP version number\n")
+{
+ nb_cli_enqueue_change(vty, "./igmp-version", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFPY (interface_ip_igmp_query_max_response_time,
+ interface_ip_igmp_query_max_response_time_cmd,
+ "ip igmp query-max-response-time (1-65535)$qmrt",
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR
+ "Query response value in deci-seconds\n")
+{
+ return gm_process_query_max_response_time_cmd(vty, qmrt_str);
+}
+
+DEFUN (interface_no_ip_igmp_query_max_response_time,
+ interface_no_ip_igmp_query_max_response_time_cmd,
+ "no ip igmp query-max-response-time [(1-65535)]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_query_max_response_time_cmd(vty);
+}
+
+DEFUN_HIDDEN (interface_ip_igmp_query_max_response_time_dsec,
+ interface_ip_igmp_query_max_response_time_dsec_cmd,
+ "ip igmp query-max-response-time-dsec (1-65535)",
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_DSEC_STR
+ "Query response value in deciseconds\n")
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode =
+ yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv4");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable",
+ NB_OP_MODIFY, "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_MODIFY,
+ argv[3]->arg);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN_HIDDEN (interface_no_ip_igmp_query_max_response_time_dsec,
+ interface_no_ip_igmp_query_max_response_time_dsec_cmd,
+ "no ip igmp query-max-response-time-dsec [(1-65535)]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_DSEC_STR
+ IGNORED_IN_NO_STR)
+{
+ nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_DESTROY,
+ NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFPY (interface_ip_igmp_last_member_query_count,
+ interface_ip_igmp_last_member_query_count_cmd,
+ "ip igmp last-member-query-count (1-255)$lmqc",
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR
+ "Last member query count\n")
+{
+ return gm_process_last_member_query_count_cmd(vty, lmqc_str);
+}
+
+DEFUN (interface_no_ip_igmp_last_member_query_count,
+ interface_no_ip_igmp_last_member_query_count_cmd,
+ "no ip igmp last-member-query-count [(1-255)]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_last_member_query_count_cmd(vty);
+}
+
+DEFPY (interface_ip_igmp_last_member_query_interval,
+ interface_ip_igmp_last_member_query_interval_cmd,
+ "ip igmp last-member-query-interval (1-65535)$lmqi",
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR
+ "Last member query interval in deciseconds\n")
+{
+ return gm_process_last_member_query_interval_cmd(vty, lmqi_str);
+}
+
+DEFUN (interface_no_ip_igmp_last_member_query_interval,
+ interface_no_ip_igmp_last_member_query_interval_cmd,
+ "no ip igmp last-member-query-interval [(1-65535)]",
+ NO_STR
+ IP_STR
+ IFACE_IGMP_STR
+ IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR
+ IGNORED_IN_NO_STR)
+{
+ return gm_process_no_last_member_query_interval_cmd(vty);
+}
+
+DEFUN (interface_ip_pim_drprio,
+ interface_ip_pim_drprio_cmd,
+ "ip pim drpriority (1-4294967295)",
+ IP_STR
+ PIM_STR
+ "Set the Designated Router Election Priority\n"
+ "Value of the new DR Priority\n")
+{
+ int idx_number = 3;
+
+ return pim_process_ip_pim_drprio_cmd(vty, argv[idx_number]->arg);
+}
+
+DEFUN (interface_no_ip_pim_drprio,
+ interface_no_ip_pim_drprio_cmd,
+ "no ip pim drpriority [(1-4294967295)]",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Revert the Designated Router Priority to default\n"
+ "Old Value of the Priority\n")
+{
+ return pim_process_no_ip_pim_drprio_cmd(vty);
+}
+
+DEFPY_HIDDEN (interface_ip_igmp_query_generate,
+ interface_ip_igmp_query_generate_cmd,
+ "ip igmp generate-query-once [version (2-3)]",
+ IP_STR
+ IFACE_IGMP_STR
+ "Generate igmp general query once\n"
+ "IGMP version\n"
+ "IGMP version number\n")
+{
+#if PIM_IPV == 4
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ int igmp_version;
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!ifp->info) {
+ vty_out(vty, "IGMP/PIM is not enabled on the interface %s\n",
+ ifp->name);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* It takes the igmp version configured on the interface as default */
+ igmp_version = pim_ifp->igmp_version;
+
+ if (argc > 3)
+ igmp_version = atoi(argv[4]->arg);
+
+ igmp_send_query_on_intf(ifp, igmp_version);
+#endif
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN (pim_test_sg_keepalive,
+ pim_test_sg_keepalive_cmd,
+ "test pim [vrf NAME$name] keepalive-reset A.B.C.D$source A.B.C.D$group",
+ "Test code\n"
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset the Keepalive Timer\n"
+ "The Source we are resetting\n"
+ "The Group we are resetting\n")
+{
+ struct pim_upstream *up;
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ pim_sgaddr sg;
+
+ sg.src = source;
+ sg.grp = group;
+
+ vrf = vrf_lookup_by_name(name ? name : VRF_DEFAULT_NAME);
+ if (!vrf) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", name);
+ return CMD_WARNING;
+ }
+
+ pim = vrf->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ up = pim_upstream_find(pim, &sg);
+ if (!up) {
+ vty_out(vty, "%% Unable to find %pSG specified\n", &sg);
+ return CMD_WARNING;
+ }
+
+ vty_out(vty, "Setting %pSG to current keep alive time: %d\n", &sg,
+ pim->keep_alive_time);
+ pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (interface_ip_pim_activeactive,
+ interface_ip_pim_activeactive_cmd,
+ "[no$no] ip pim active-active",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Mark interface as Active-Active for MLAG operations, Hidden because not finished yet\n")
+{
+ return pim_process_ip_pim_activeactive_cmd(vty, no);
+}
+
+DEFUN_HIDDEN (interface_ip_pim_ssm,
+ interface_ip_pim_ssm_cmd,
+ "ip pim ssm",
+ IP_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ vty_out(vty,
+ "WARN: Enabled PIM SM on interface; configure PIM SSM range if needed\n");
+
+ return NB_OK;
+}
+
+DEFUN_HIDDEN (interface_ip_pim_sm,
+ interface_ip_pim_sm_cmd,
+ "ip pim sm",
+ IP_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_ip_pim_cmd(vty);
+}
+
+DEFPY (interface_ip_pim,
+ interface_ip_pim_cmd,
+ "ip pim [passive$passive]",
+ IP_STR
+ PIM_STR
+ "Disable exchange of protocol packets\n")
+{
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_HIDDEN (interface_no_ip_pim_ssm,
+ interface_no_ip_pim_ssm_cmd,
+ "no ip pim ssm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFUN_HIDDEN (interface_no_ip_pim_sm,
+ interface_no_ip_pim_sm_cmd,
+ "no ip pim sm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFPY (interface_no_ip_pim,
+ interface_no_ip_pim_cmd,
+ "no ip pim [passive$passive]",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Disable exchange of protocol packets\n")
+{
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, false);
+
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+/* boundaries */
+DEFUN(interface_ip_pim_boundary_oil,
+ interface_ip_pim_boundary_oil_cmd,
+ "ip multicast boundary oil WORD",
+ IP_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_ip_pim_boundary_oil_cmd(vty, argv[4]->arg);
+}
+
+DEFUN(interface_no_ip_pim_boundary_oil,
+ interface_no_ip_pim_boundary_oil_cmd,
+ "no ip multicast boundary oil [WORD]",
+ NO_STR
+ IP_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_no_ip_pim_boundary_oil_cmd(vty);
+}
+
+DEFUN (interface_ip_mroute,
+ interface_ip_mroute_cmd,
+ "ip mroute INTERFACE A.B.C.D [A.B.C.D]",
+ IP_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group address\n"
+ "Source address\n")
+{
+ int idx_interface = 2;
+ int idx_ipv4 = 3;
+ const char *source_str;
+
+ if (argc == (idx_ipv4 + 1))
+ source_str = "0.0.0.0";
+ else
+ source_str = argv[idx_ipv4 + 1]->arg;
+
+ return pim_process_ip_mroute_cmd(vty, argv[idx_interface]->arg,
+ argv[idx_ipv4]->arg, source_str);
+}
+
+DEFUN (interface_no_ip_mroute,
+ interface_no_ip_mroute_cmd,
+ "no ip mroute INTERFACE A.B.C.D [A.B.C.D]",
+ NO_STR
+ IP_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group Address\n"
+ "Source Address\n")
+{
+ int idx_interface = 3;
+ int idx_ipv4 = 4;
+ const char *source_str;
+
+ if (argc == (idx_ipv4 + 1))
+ source_str = "0.0.0.0";
+ else
+ source_str = argv[idx_ipv4 + 1]->arg;
+
+ return pim_process_no_ip_mroute_cmd(vty, argv[idx_interface]->arg,
+ argv[idx_ipv4]->arg, source_str);
+}
+
+DEFUN (interface_ip_pim_hello,
+ interface_ip_pim_hello_cmd,
+ "ip pim hello (1-65535) [(1-65535)]",
+ IP_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IFACE_PIM_HELLO_TIME_STR
+ IFACE_PIM_HELLO_HOLD_STR)
+{
+ int idx_time = 3;
+ int idx_hold = 4;
+
+ if (argc == idx_hold + 1)
+ return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
+ argv[idx_hold]->arg);
+
+ else
+ return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
+ NULL);
+}
+
+DEFUN (interface_no_ip_pim_hello,
+ interface_no_ip_pim_hello_cmd,
+ "no ip pim hello [(1-65535) [(1-65535)]]",
+ NO_STR
+ IP_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IGNORED_IN_NO_STR
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_ip_pim_hello_cmd(vty);
+}
+
+DEFUN (debug_igmp,
+ debug_igmp_cmd,
+ "debug igmp",
+ DEBUG_STR
+ DEBUG_IGMP_STR)
+{
+ PIM_DO_DEBUG_GM_EVENTS;
+ PIM_DO_DEBUG_GM_PACKETS;
+ PIM_DO_DEBUG_GM_TRACE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_igmp,
+ no_debug_igmp_cmd,
+ "no debug igmp",
+ NO_STR
+ DEBUG_STR
+ DEBUG_IGMP_STR)
+{
+ PIM_DONT_DEBUG_GM_EVENTS;
+ PIM_DONT_DEBUG_GM_PACKETS;
+ PIM_DONT_DEBUG_GM_TRACE;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (debug_igmp_events,
+ debug_igmp_events_cmd,
+ "debug igmp events",
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_EVENTS_STR)
+{
+ PIM_DO_DEBUG_GM_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_igmp_events,
+ no_debug_igmp_events_cmd,
+ "no debug igmp events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_EVENTS_STR)
+{
+ PIM_DONT_DEBUG_GM_EVENTS;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (debug_igmp_packets,
+ debug_igmp_packets_cmd,
+ "debug igmp packets",
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_PACKETS_STR)
+{
+ PIM_DO_DEBUG_GM_PACKETS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_igmp_packets,
+ no_debug_igmp_packets_cmd,
+ "no debug igmp packets",
+ NO_STR
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_PACKETS_STR)
+{
+ PIM_DONT_DEBUG_GM_PACKETS;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (debug_igmp_trace,
+ debug_igmp_trace_cmd,
+ "debug igmp trace",
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_TRACE_STR)
+{
+ PIM_DO_DEBUG_GM_TRACE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_igmp_trace,
+ no_debug_igmp_trace_cmd,
+ "no debug igmp trace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_TRACE_STR)
+{
+ PIM_DONT_DEBUG_GM_TRACE;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (debug_igmp_trace_detail,
+ debug_igmp_trace_detail_cmd,
+ "debug igmp trace detail",
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_TRACE_STR
+ "detailed\n")
+{
+ PIM_DO_DEBUG_GM_TRACE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_igmp_trace_detail,
+ no_debug_igmp_trace_detail_cmd,
+ "no debug igmp trace detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_IGMP_STR
+ DEBUG_IGMP_TRACE_STR
+ "detailed\n")
+{
+ PIM_DONT_DEBUG_GM_TRACE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (debug_mroute,
+ debug_mroute_cmd,
+ "debug mroute",
+ DEBUG_STR
+ DEBUG_MROUTE_STR)
+{
+ PIM_DO_DEBUG_MROUTE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_mroute_detail,
+ debug_mroute_detail_cmd,
+ "debug mroute detail",
+ DEBUG_STR
+ DEBUG_MROUTE_STR
+ "detailed\n")
+{
+ PIM_DO_DEBUG_MROUTE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_mroute,
+ no_debug_mroute_cmd,
+ "no debug mroute",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MROUTE_STR)
+{
+ PIM_DONT_DEBUG_MROUTE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_mroute_detail,
+ no_debug_mroute_detail_cmd,
+ "no debug mroute detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MROUTE_STR
+ "detailed\n")
+{
+ PIM_DONT_DEBUG_MROUTE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_pim_static,
+ debug_pim_static_cmd,
+ "debug pim static",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_STATIC_STR)
+{
+ PIM_DO_DEBUG_STATIC;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_pim_static,
+ no_debug_pim_static_cmd,
+ "no debug pim static",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_STATIC_STR)
+{
+ PIM_DONT_DEBUG_STATIC;
+ return CMD_SUCCESS;
+}
+
+
+DEFPY (debug_pim,
+ debug_pim_cmd,
+ "[no] debug pim",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR)
+{
+ if (!no)
+ return pim_debug_pim_cmd();
+ else
+ return pim_no_debug_pim_cmd();
+}
+
+DEFPY (debug_pim_nht,
+ debug_pim_nht_cmd,
+ "[no] debug pim nht",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT;
+ else
+ PIM_DONT_DEBUG_PIM_NHT;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_nht_det,
+ debug_pim_nht_det_cmd,
+ "[no] debug pim nht detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n"
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_NHT_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_pim_nht_rp,
+ debug_pim_nht_rp_cmd,
+ "debug pim nht rp",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n"
+ "RP Nexthop Tracking\n")
+{
+ PIM_DO_DEBUG_PIM_NHT_RP;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_pim_nht_rp,
+ no_debug_pim_nht_rp_cmd,
+ "no debug pim nht rp",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n"
+ "RP Nexthop Tracking\n")
+{
+ PIM_DONT_DEBUG_PIM_NHT_RP;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_events,
+ debug_pim_events_cmd,
+ "[no] debug pim events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_EVENTS_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_EVENTS;
+ else
+ PIM_DONT_DEBUG_PIM_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_packets,
+ debug_pim_packets_cmd,
+ "[no] debug pim packets [<hello$hello|joins$joins|register$registers>]",
+ NO_STR DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_PACKETS_STR
+ DEBUG_PIM_HELLO_PACKETS_STR
+ DEBUG_PIM_J_P_PACKETS_STR
+ DEBUG_PIM_PIM_REG_PACKETS_STR)
+{
+ if (!no)
+ return pim_debug_pim_packets_cmd(hello, joins, registers, vty);
+ else
+ return pim_no_debug_pim_packets_cmd(hello, joins, registers,
+ vty);
+}
+
+DEFPY (debug_pim_packetdump_send,
+ debug_pim_packetdump_send_cmd,
+ "[no] debug pim packet-dump send",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_PACKETDUMP_STR
+ DEBUG_PIM_PACKETDUMP_SEND_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_packetdump_recv,
+ debug_pim_packetdump_recv_cmd,
+ "[no] debug pim packet-dump receive",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_PACKETDUMP_STR
+ DEBUG_PIM_PACKETDUMP_RECV_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_trace,
+ debug_pim_trace_cmd,
+ "[no] debug pim trace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_TRACE_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_trace_detail,
+ debug_pim_trace_detail_cmd,
+ "[no] debug pim trace detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_TRACE_STR
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_ssmpingd,
+ debug_ssmpingd_cmd,
+ "debug ssmpingd",
+ DEBUG_STR
+ DEBUG_SSMPINGD_STR)
+{
+ PIM_DO_DEBUG_SSMPINGD;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_ssmpingd,
+ no_debug_ssmpingd_cmd,
+ "no debug ssmpingd",
+ NO_STR
+ DEBUG_STR
+ DEBUG_SSMPINGD_STR)
+{
+ PIM_DONT_DEBUG_SSMPINGD;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pim_zebra,
+ debug_pim_zebra_cmd,
+ "[no] debug pim zebra",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_ZEBRA_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_ZEBRA;
+ else
+ PIM_DONT_DEBUG_ZEBRA;
+ return CMD_SUCCESS;
+}
+
+DEFUN(debug_pim_mlag, debug_pim_mlag_cmd, "debug pim mlag",
+ DEBUG_STR DEBUG_PIM_STR DEBUG_PIM_MLAG_STR)
+{
+ PIM_DO_DEBUG_MLAG;
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_debug_pim_mlag, no_debug_pim_mlag_cmd, "no debug pim mlag",
+ NO_STR DEBUG_STR DEBUG_PIM_STR DEBUG_PIM_MLAG_STR)
+{
+ PIM_DONT_DEBUG_MLAG;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_pim_vxlan,
+ debug_pim_vxlan_cmd,
+ "debug pim vxlan",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_VXLAN_STR)
+{
+ PIM_DO_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_pim_vxlan,
+ no_debug_pim_vxlan_cmd,
+ "no debug pim vxlan",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_VXLAN_STR)
+{
+ PIM_DONT_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_msdp,
+ debug_msdp_cmd,
+ "debug msdp",
+ DEBUG_STR
+ DEBUG_MSDP_STR)
+{
+ PIM_DO_DEBUG_MSDP_EVENTS;
+ PIM_DO_DEBUG_MSDP_PACKETS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_msdp,
+ no_debug_msdp_cmd,
+ "no debug msdp",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MSDP_STR)
+{
+ PIM_DONT_DEBUG_MSDP_EVENTS;
+ PIM_DONT_DEBUG_MSDP_PACKETS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_msdp_events,
+ debug_msdp_events_cmd,
+ "debug msdp events",
+ DEBUG_STR
+ DEBUG_MSDP_STR
+ DEBUG_MSDP_EVENTS_STR)
+{
+ PIM_DO_DEBUG_MSDP_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_msdp_events,
+ no_debug_msdp_events_cmd,
+ "no debug msdp events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MSDP_STR
+ DEBUG_MSDP_EVENTS_STR)
+{
+ PIM_DONT_DEBUG_MSDP_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_msdp_packets,
+ debug_msdp_packets_cmd,
+ "debug msdp packets",
+ DEBUG_STR
+ DEBUG_MSDP_STR
+ DEBUG_MSDP_PACKETS_STR)
+{
+ PIM_DO_DEBUG_MSDP_PACKETS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_msdp_packets,
+ no_debug_msdp_packets_cmd,
+ "no debug msdp packets",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MSDP_STR
+ DEBUG_MSDP_PACKETS_STR)
+{
+ PIM_DONT_DEBUG_MSDP_PACKETS;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_mtrace,
+ debug_mtrace_cmd,
+ "debug mtrace",
+ DEBUG_STR
+ DEBUG_MTRACE_STR)
+{
+ PIM_DO_DEBUG_MTRACE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_mtrace,
+ no_debug_mtrace_cmd,
+ "no debug mtrace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_MTRACE_STR)
+{
+ PIM_DONT_DEBUG_MTRACE;
+ return CMD_SUCCESS;
+}
+
+DEFUN (debug_bsm,
+ debug_bsm_cmd,
+ "debug pim bsm",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DO_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bsm,
+ no_debug_bsm_cmd,
+ "no debug pim bsm",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DONT_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+
+DEFUN_NOSH (show_debugging_pim,
+ show_debugging_pim_cmd,
+ "show debugging [pim]",
+ SHOW_STR
+ DEBUG_STR
+ PIM_STR)
+{
+ vty_out(vty, "PIM debugging status\n");
+
+ pim_debug_config_write(vty);
+
+ cmd_show_lib_debugs(vty);
+ return CMD_SUCCESS;
+}
+
+DEFUN (interface_pim_use_source,
+ interface_pim_use_source_cmd,
+ "ip pim use-source A.B.C.D",
+ IP_STR
+ PIM_STR
+ "Configure primary IP address\n"
+ "source ip address\n")
+{
+ nb_cli_enqueue_change(vty, "./use-source", NB_OP_MODIFY, argv[3]->arg);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (interface_no_pim_use_source,
+ interface_no_pim_use_source_cmd,
+ "no ip pim use-source [A.B.C.D]",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Delete source IP address\n"
+ "source ip address\n")
+{
+ nb_cli_enqueue_change(vty, "./use-source", NB_OP_MODIFY, "0.0.0.0");
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFPY (ip_pim_bfd,
+ ip_pim_bfd_cmd,
+ "ip pim bfd [profile BFDPROF$prof]",
+ IP_STR
+ PIM_STR
+ "Enables BFD support\n"
+ "Use BFD profile\n"
+ "Use BFD profile name\n")
+{
+ const struct lyd_node *igmp_enable_dnode;
+
+ igmp_enable_dnode =
+ yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv4");
+ if (!igmp_enable_dnode)
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ else {
+ if (!yang_dnode_get_bool(igmp_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./bfd", NB_OP_CREATE, NULL);
+ if (prof)
+ nb_cli_enqueue_change(vty, "./bfd/profile", NB_OP_MODIFY, prof);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFPY(no_ip_pim_bfd_profile, no_ip_pim_bfd_profile_cmd,
+ "no ip pim bfd profile [BFDPROF]",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Enables BFD support\n"
+ "Disable BFD profile\n"
+ "BFD Profile name\n")
+{
+ nb_cli_enqueue_change(vty, "./bfd/profile", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (no_ip_pim_bfd,
+ no_ip_pim_bfd_cmd,
+ "no ip pim bfd",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Disables BFD support\n")
+{
+ nb_cli_enqueue_change(vty, "./bfd", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH,
+ "frr-routing:ipv4");
+}
+
+DEFUN (ip_pim_bsm,
+ ip_pim_bsm_cmd,
+ "ip pim bsm",
+ IP_STR
+ PIM_STR
+ "Enable BSM support on the interface\n")
+{
+ return pim_process_bsm_cmd(vty);
+}
+DEFUN (no_ip_pim_bsm,
+ no_ip_pim_bsm_cmd,
+ "no ip pim bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Enable BSM support on the interface\n")
+{
+ return pim_process_no_bsm_cmd(vty);
+}
+
+DEFUN (ip_pim_ucast_bsm,
+ ip_pim_ucast_bsm_cmd,
+ "ip pim unicast-bsm",
+ IP_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ return pim_process_unicast_bsm_cmd(vty);
+}
+
+DEFUN (no_ip_pim_ucast_bsm,
+ no_ip_pim_ucast_bsm_cmd,
+ "no ip pim unicast-bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ return pim_process_no_unicast_bsm_cmd(vty);
+}
+
+#if HAVE_BFDD > 0
+DEFUN_HIDDEN (
+ ip_pim_bfd_param,
+ ip_pim_bfd_param_cmd,
+ "ip pim bfd (2-255) (1-65535) (1-65535)",
+ IP_STR
+ PIM_STR
+ "Enables BFD support\n"
+ "Detect Multiplier\n"
+ "Required min receive interval\n"
+ "Desired min transmit interval\n")
+#else
+ DEFUN(
+ ip_pim_bfd_param,
+ ip_pim_bfd_param_cmd,
+ "ip pim bfd (2-255) (1-65535) (1-65535)",
+ IP_STR
+ PIM_STR
+ "Enables BFD support\n"
+ "Detect Multiplier\n"
+ "Required min receive interval\n"
+ "Desired min transmit interval\n")
+#endif /* HAVE_BFDD */
+{
+ int idx_number = 3;
+ int idx_number_2 = 4;
+ int idx_number_3 = 5;
+ const struct lyd_node *igmp_enable_dnode;
+
+ igmp_enable_dnode =
+ yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv4");
+ if (!igmp_enable_dnode)
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ else {
+ if (!yang_dnode_get_bool(igmp_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./bfd", NB_OP_CREATE, NULL);
+ nb_cli_enqueue_change(vty, "./bfd/min-rx-interval", NB_OP_MODIFY,
+ argv[idx_number_2]->arg);
+ nb_cli_enqueue_change(vty, "./bfd/min-tx-interval", NB_OP_MODIFY,
+ argv[idx_number_3]->arg);
+ nb_cli_enqueue_change(vty, "./bfd/detect_mult", NB_OP_MODIFY,
+ argv[idx_number]->arg);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH, "frr-routing:ipv4");
+}
+
+#if HAVE_BFDD == 0
+ALIAS(no_ip_pim_bfd, no_ip_pim_bfd_param_cmd,
+ "no ip pim bfd (2-255) (1-65535) (1-65535)",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Enables BFD support\n"
+ "Detect Multiplier\n"
+ "Required min receive interval\n"
+ "Desired min transmit interval\n")
+#endif /* !HAVE_BFDD */
+
+DEFPY(ip_msdp_peer, ip_msdp_peer_cmd,
+ "ip msdp peer A.B.C.D$peer source A.B.C.D$source",
+ IP_STR
+ CFG_MSDP_STR
+ "Configure MSDP peer\n"
+ "Peer IP address\n"
+ "Source address for TCP connection\n"
+ "Local IP address\n")
+{
+ const char *vrfname;
+ char temp_xpath[XPATH_MAXLEN];
+ char msdp_peer_source_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(msdp_peer_source_xpath, sizeof(msdp_peer_source_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ "frr-routing:ipv4");
+ snprintf(temp_xpath, sizeof(temp_xpath),
+ "/msdp-peer[peer-ip='%s']/source-ip", peer_str);
+ strlcat(msdp_peer_source_xpath, temp_xpath,
+ sizeof(msdp_peer_source_xpath));
+
+ nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY,
+ source_str);
+
+ return nb_cli_apply_changes(vty,
+ FRR_PIM_INTERFACE_XPATH, "frr-routing:ipv4");
+}
+
+DEFPY(ip_msdp_timers, ip_msdp_timers_cmd,
+ "ip msdp timers (1-65535)$keepalive (1-65535)$holdtime [(1-65535)$connretry]",
+ IP_STR
+ CFG_MSDP_STR
+ "MSDP timers configuration\n"
+ "Keep alive period (in seconds)\n"
+ "Hold time period (in seconds)\n"
+ "Connection retry period (in seconds)\n")
+{
+ const char *vrfname;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ nb_cli_enqueue_change(vty, "./hold-time", NB_OP_MODIFY, holdtime_str);
+ nb_cli_enqueue_change(vty, "./keep-alive", NB_OP_MODIFY, keepalive_str);
+ if (connretry_str)
+ nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_MODIFY,
+ connretry_str);
+ else
+ nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_DESTROY,
+ NULL);
+
+ nb_cli_apply_changes(vty, FRR_PIM_MSDP_XPATH, "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv4");
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_ip_msdp_timers, no_ip_msdp_timers_cmd,
+ "no ip msdp timers [(1-65535) (1-65535) [(1-65535)]]",
+ NO_STR
+ IP_STR
+ CFG_MSDP_STR
+ "MSDP timers configuration\n"
+ IGNORED_IN_NO_STR
+ IGNORED_IN_NO_STR
+ IGNORED_IN_NO_STR)
+{
+ const char *vrfname;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ nb_cli_enqueue_change(vty, "./hold-time", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./keep-alive", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_DESTROY, NULL);
+
+ nb_cli_apply_changes(vty, FRR_PIM_MSDP_XPATH, "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv4");
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_msdp_peer,
+ no_ip_msdp_peer_cmd,
+ "no ip msdp peer A.B.C.D",
+ NO_STR
+ IP_STR
+ CFG_MSDP_STR
+ "Delete MSDP peer\n"
+ "peer ip address\n")
+{
+ const char *vrfname;
+ char msdp_peer_xpath[XPATH_MAXLEN];
+ char temp_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(msdp_peer_xpath, sizeof(msdp_peer_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ snprintf(temp_xpath, sizeof(temp_xpath),
+ "/msdp-peer[peer-ip='%s']",
+ argv[4]->arg);
+
+ strlcat(msdp_peer_xpath, temp_xpath, sizeof(msdp_peer_xpath));
+
+ nb_cli_enqueue_change(vty, msdp_peer_xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(ip_msdp_mesh_group_member,
+ ip_msdp_mesh_group_member_cmd,
+ "ip msdp mesh-group WORD$gname member A.B.C.D$maddr",
+ IP_STR
+ CFG_MSDP_STR
+ "Configure MSDP mesh-group\n"
+ "Mesh group name\n"
+ "Mesh group member\n"
+ "Peer IP address\n")
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ /* Create mesh group. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
+
+ /* Create mesh group member. */
+ strlcat(xpath_value, "/members[address='", sizeof(xpath_value));
+ strlcat(xpath_value, maddr_str, sizeof(xpath_value));
+ strlcat(xpath_value, "']", sizeof(xpath_value));
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(no_ip_msdp_mesh_group_member,
+ no_ip_msdp_mesh_group_member_cmd,
+ "no ip msdp mesh-group WORD$gname member A.B.C.D$maddr",
+ NO_STR
+ IP_STR
+ CFG_MSDP_STR
+ "Delete MSDP mesh-group member\n"
+ "Mesh group name\n"
+ "Mesh group member\n"
+ "Peer IP address\n")
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+ char xpath_member_value[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ /* Get mesh group base XPath. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+
+ if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value)) {
+ vty_out(vty, "%% mesh-group does not exist\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* Remove mesh group member. */
+ strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
+ strlcat(xpath_member_value, "/members[address='",
+ sizeof(xpath_member_value));
+ strlcat(xpath_member_value, maddr_str, sizeof(xpath_member_value));
+ strlcat(xpath_member_value, "']", sizeof(xpath_member_value));
+ if (!yang_dnode_exists(vty->candidate_config->dnode,
+ xpath_member_value)) {
+ vty_out(vty, "%% mesh-group member does not exist\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ nb_cli_enqueue_change(vty, xpath_member_value, NB_OP_DESTROY, NULL);
+
+ /*
+ * If this is the last member, then we must remove the group altogether
+ * to not break legacy CLI behaviour.
+ */
+ pim_cli_legacy_mesh_group_behavior(vty, gname);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(ip_msdp_mesh_group_source,
+ ip_msdp_mesh_group_source_cmd,
+ "ip msdp mesh-group WORD$gname source A.B.C.D$saddr",
+ IP_STR
+ CFG_MSDP_STR
+ "Configure MSDP mesh-group\n"
+ "Mesh group name\n"
+ "Mesh group local address\n"
+ "Source IP address for the TCP connection\n")
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ /* Create mesh group. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
+
+ /* Create mesh group source. */
+ strlcat(xpath_value, "/source", sizeof(xpath_value));
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, saddr_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(no_ip_msdp_mesh_group_source,
+ no_ip_msdp_mesh_group_source_cmd,
+ "no ip msdp mesh-group WORD$gname source [A.B.C.D]",
+ NO_STR
+ IP_STR
+ CFG_MSDP_STR
+ "Delete MSDP mesh-group source\n"
+ "Mesh group name\n"
+ "Mesh group source\n"
+ "Mesh group local address\n")
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ /* Get mesh group base XPath. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
+
+ /* Create mesh group source. */
+ strlcat(xpath_value, "/source", sizeof(xpath_value));
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+
+ /*
+ * If this is the last member, then we must remove the group altogether
+ * to not break legacy CLI behaviour.
+ */
+ pim_cli_legacy_mesh_group_behavior(vty, gname);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(no_ip_msdp_mesh_group,
+ no_ip_msdp_mesh_group_cmd,
+ "no ip msdp mesh-group WORD$gname",
+ NO_STR
+ IP_STR
+ CFG_MSDP_STR
+ "Delete MSDP mesh-group\n"
+ "Mesh group name\n")
+{
+ const char *vrfname;
+ char xpath_value[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ /* Get mesh group base XPath. */
+ snprintf(xpath_value, sizeof(xpath_value),
+ FRR_PIM_VRF_XPATH "/msdp-mesh-groups[name='%s']",
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4", gname);
+ if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value))
+ return CMD_SUCCESS;
+
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+static void ip_msdp_show_mesh_group(struct vty *vty, struct pim_msdp_mg *mg,
+ struct json_object *json)
+{
+ struct listnode *mbrnode;
+ struct pim_msdp_mg_mbr *mbr;
+ char mbr_str[INET_ADDRSTRLEN];
+ char src_str[INET_ADDRSTRLEN];
+ char state_str[PIM_MSDP_STATE_STRLEN];
+ enum pim_msdp_peer_state state;
+ json_object *json_mg_row = NULL;
+ json_object *json_members = NULL;
+ json_object *json_row = NULL;
+
+ pim_inet4_dump("<source?>", mg->src_ip, src_str, sizeof(src_str));
+ if (json) {
+ /* currently there is only one mesh group but we should still
+ * make
+ * it a dict with mg-name as key */
+ json_mg_row = json_object_new_object();
+ json_object_string_add(json_mg_row, "name",
+ mg->mesh_group_name);
+ json_object_string_add(json_mg_row, "source", src_str);
+ } else {
+ vty_out(vty, "Mesh group : %s\n", mg->mesh_group_name);
+ vty_out(vty, " Source : %s\n", src_str);
+ vty_out(vty, " Member State\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbrnode, mbr)) {
+ pim_inet4_dump("<mbr?>", mbr->mbr_ip, mbr_str, sizeof(mbr_str));
+ if (mbr->mp) {
+ state = mbr->mp->state;
+ } else {
+ state = PIM_MSDP_DISABLED;
+ }
+ pim_msdp_state_dump(state, state_str, sizeof(state_str));
+ if (json) {
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "member", mbr_str);
+ json_object_string_add(json_row, "state", state_str);
+ if (!json_members) {
+ json_members = json_object_new_object();
+ json_object_object_add(json_mg_row, "members",
+ json_members);
+ }
+ json_object_object_add(json_members, mbr_str, json_row);
+ } else {
+ vty_out(vty, " %-15s %11s\n", mbr_str, state_str);
+ }
+ }
+
+ if (json)
+ json_object_object_add(json, mg->mesh_group_name, json_mg_row);
+}
+
+DEFUN (show_ip_msdp_mesh_group,
+ show_ip_msdp_mesh_group_cmd,
+ "show ip msdp [vrf NAME] mesh-group [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP mesh-group information\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ int idx = 2;
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+ struct pim_instance *pim;
+ struct json_object *json = NULL;
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim = vrf->info;
+ /* Quick case: list is empty. */
+ if (SLIST_EMPTY(&pim->msdp.mglist)) {
+ if (uj)
+ vty_out(vty, "{}\n");
+
+ return CMD_SUCCESS;
+ }
+
+ if (uj)
+ json = json_object_new_object();
+
+ SLIST_FOREACH (mg, &pim->msdp.mglist, mg_entry)
+ ip_msdp_show_mesh_group(vty, mg, json);
+
+ if (uj)
+ vty_json(vty, json);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_msdp_mesh_group_vrf_all,
+ show_ip_msdp_mesh_group_vrf_all_cmd,
+ "show ip msdp vrf all mesh-group [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP mesh-group information\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct json_object *json = NULL, *vrf_json = NULL;
+ struct pim_instance *pim;
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+
+ if (uj)
+ json = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ vrf_json = json_object_new_object();
+ json_object_object_add(json, vrf->name, vrf_json);
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+
+ pim = vrf->info;
+ SLIST_FOREACH (mg, &pim->msdp.mglist, mg_entry)
+ ip_msdp_show_mesh_group(vty, mg, vrf_json);
+ }
+
+ if (uj)
+ vty_json(vty, json);
+
+
+ return CMD_SUCCESS;
+}
+
+static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct listnode *mpnode;
+ struct pim_msdp_peer *mp;
+ char peer_str[INET_ADDRSTRLEN];
+ char local_str[INET_ADDRSTRLEN];
+ char state_str[PIM_MSDP_STATE_STRLEN];
+ char timebuf[PIM_MSDP_UPTIME_STRLEN];
+ int64_t now;
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty,
+ "Peer Local State Uptime SaCnt\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
+ if (mp->state == PIM_MSDP_ESTABLISHED) {
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(timebuf, sizeof(timebuf),
+ now - mp->uptime);
+ } else {
+ strlcpy(timebuf, "-", sizeof(timebuf));
+ }
+ pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
+ pim_inet4_dump("<local?>", mp->local, local_str,
+ sizeof(local_str));
+ pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "peer", peer_str);
+ json_object_string_add(json_row, "local", local_str);
+ json_object_string_add(json_row, "state", state_str);
+ json_object_string_add(json_row, "upTime", timebuf);
+ json_object_int_add(json_row, "saCount", mp->sa_cnt);
+ json_object_object_add(json, peer_str, json_row);
+ } else {
+ vty_out(vty, "%-15s %15s %11s %8s %6d\n", peer_str,
+ local_str, state_str, timebuf, mp->sa_cnt);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty,
+ const char *peer, bool uj)
+{
+ struct listnode *mpnode;
+ struct pim_msdp_peer *mp;
+ char peer_str[INET_ADDRSTRLEN];
+ char local_str[INET_ADDRSTRLEN];
+ char state_str[PIM_MSDP_STATE_STRLEN];
+ char timebuf[PIM_MSDP_UPTIME_STRLEN];
+ char katimer[PIM_MSDP_TIMER_STRLEN];
+ char crtimer[PIM_MSDP_TIMER_STRLEN];
+ char holdtimer[PIM_MSDP_TIMER_STRLEN];
+ int64_t now;
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
+ pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
+ if (strcmp(peer, "detail") && strcmp(peer, peer_str))
+ continue;
+
+ if (mp->state == PIM_MSDP_ESTABLISHED) {
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(timebuf, sizeof(timebuf),
+ now - mp->uptime);
+ } else {
+ strlcpy(timebuf, "-", sizeof(timebuf));
+ }
+ pim_inet4_dump("<local?>", mp->local, local_str,
+ sizeof(local_str));
+ pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
+ pim_time_timer_to_hhmmss(katimer, sizeof(katimer),
+ mp->ka_timer);
+ pim_time_timer_to_hhmmss(crtimer, sizeof(crtimer),
+ mp->cr_timer);
+ pim_time_timer_to_hhmmss(holdtimer, sizeof(holdtimer),
+ mp->hold_timer);
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "peer", peer_str);
+ json_object_string_add(json_row, "local", local_str);
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ json_object_string_add(json_row,
+ "meshGroupName",
+ mp->mesh_group_name);
+ json_object_string_add(json_row, "state", state_str);
+ json_object_string_add(json_row, "upTime", timebuf);
+ json_object_string_add(json_row, "keepAliveTimer",
+ katimer);
+ json_object_string_add(json_row, "connRetryTimer",
+ crtimer);
+ json_object_string_add(json_row, "holdTimer",
+ holdtimer);
+ json_object_string_add(json_row, "lastReset",
+ mp->last_reset);
+ json_object_int_add(json_row, "connAttempts",
+ mp->conn_attempts);
+ json_object_int_add(json_row, "establishedChanges",
+ mp->est_flaps);
+ json_object_int_add(json_row, "saCount", mp->sa_cnt);
+ json_object_int_add(json_row, "kaSent", mp->ka_tx_cnt);
+ json_object_int_add(json_row, "kaRcvd", mp->ka_rx_cnt);
+ json_object_int_add(json_row, "saSent", mp->sa_tx_cnt);
+ json_object_int_add(json_row, "saRcvd", mp->sa_rx_cnt);
+ json_object_object_add(json, peer_str, json_row);
+ } else {
+ vty_out(vty, "Peer : %s\n", peer_str);
+ vty_out(vty, " Local : %s\n", local_str);
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ vty_out(vty, " Mesh Group : %s\n",
+ mp->mesh_group_name);
+ vty_out(vty, " State : %s\n", state_str);
+ vty_out(vty, " Uptime : %s\n", timebuf);
+
+ vty_out(vty, " Keepalive Timer : %s\n", katimer);
+ vty_out(vty, " Conn Retry Timer : %s\n", crtimer);
+ vty_out(vty, " Hold Timer : %s\n", holdtimer);
+ vty_out(vty, " Last Reset : %s\n",
+ mp->last_reset);
+ vty_out(vty, " Conn Attempts : %d\n",
+ mp->conn_attempts);
+ vty_out(vty, " Established Changes : %d\n",
+ mp->est_flaps);
+ vty_out(vty, " SA Count : %d\n",
+ mp->sa_cnt);
+ vty_out(vty, " Statistics :\n");
+ vty_out(vty,
+ " Sent Rcvd\n");
+ vty_out(vty, " Keepalives : %10d %10d\n",
+ mp->ka_tx_cnt, mp->ka_rx_cnt);
+ vty_out(vty, " SAs : %10d %10d\n",
+ mp->sa_tx_cnt, mp->sa_rx_cnt);
+ vty_out(vty, "\n");
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+DEFUN (show_ip_msdp_peer_detail,
+ show_ip_msdp_peer_detail_cmd,
+ "show ip msdp [vrf NAME] peer [detail|A.B.C.D] [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP peer information\n"
+ "Detailed output\n"
+ "peer ip address\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ char *arg = NULL;
+
+ if (argv_find(argv, argc, "detail", &idx))
+ arg = argv[idx]->text;
+ else if (argv_find(argv, argc, "A.B.C.D", &idx))
+ arg = argv[idx]->arg;
+
+ if (arg)
+ ip_msdp_show_peers_detail(vrf->info, vty, argv[idx]->arg, uj);
+ else
+ ip_msdp_show_peers(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_msdp_peer_detail_vrf_all,
+ show_ip_msdp_peer_detail_vrf_all_cmd,
+ "show ip msdp vrf all peer [detail|A.B.C.D] [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP peer information\n"
+ "Detailed output\n"
+ "peer ip address\n"
+ JSON_STR)
+{
+ int idx = 2;
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ if (argv_find(argv, argc, "detail", &idx)
+ || argv_find(argv, argc, "A.B.C.D", &idx))
+ ip_msdp_show_peers_detail(vrf->info, vty,
+ argv[idx]->arg, uj);
+ else
+ ip_msdp_show_peers(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+static void ip_msdp_show_sa(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+ char rp_str[INET_ADDRSTRLEN];
+ char timebuf[PIM_MSDP_UPTIME_STRLEN];
+ char spt_str[8];
+ char local_str[8];
+ int64_t now;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty,
+ "Source Group RP Local SPT Uptime\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(timebuf, sizeof(timebuf), now - sa->uptime);
+ if (sa->flags & PIM_MSDP_SAF_PEER) {
+ pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
+ if (sa->up) {
+ strlcpy(spt_str, "yes", sizeof(spt_str));
+ } else {
+ strlcpy(spt_str, "no", sizeof(spt_str));
+ }
+ } else {
+ strlcpy(rp_str, "-", sizeof(rp_str));
+ strlcpy(spt_str, "-", sizeof(spt_str));
+ }
+ if (sa->flags & PIM_MSDP_SAF_LOCAL) {
+ strlcpy(local_str, "yes", sizeof(local_str));
+ } else {
+ strlcpy(local_str, "no", sizeof(local_str));
+ }
+ if (uj) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &sa->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &sa->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "rp", rp_str);
+ json_object_string_add(json_row, "local", local_str);
+ json_object_string_add(json_row, "sptSetup", spt_str);
+ json_object_string_add(json_row, "upTime", timebuf);
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ vty_out(vty, "%-15pPAs %15pPAs %15s %5c %3c %8s\n",
+ &sa->sg.src, &sa->sg.grp, rp_str, local_str[0],
+ spt_str[0], timebuf);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa,
+ const char *src_str,
+ const char *grp_str, struct vty *vty,
+ bool uj, json_object *json)
+{
+ char rp_str[INET_ADDRSTRLEN];
+ char peer_str[INET_ADDRSTRLEN];
+ char timebuf[PIM_MSDP_UPTIME_STRLEN];
+ char spt_str[8];
+ char local_str[8];
+ char statetimer[PIM_MSDP_TIMER_STRLEN];
+ int64_t now;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(timebuf, sizeof(timebuf), now - sa->uptime);
+ if (sa->flags & PIM_MSDP_SAF_PEER) {
+ pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
+ pim_inet4_dump("<peer?>", sa->peer, peer_str, sizeof(peer_str));
+ if (sa->up) {
+ strlcpy(spt_str, "yes", sizeof(spt_str));
+ } else {
+ strlcpy(spt_str, "no", sizeof(spt_str));
+ }
+ } else {
+ strlcpy(rp_str, "-", sizeof(rp_str));
+ strlcpy(peer_str, "-", sizeof(peer_str));
+ strlcpy(spt_str, "-", sizeof(spt_str));
+ }
+ if (sa->flags & PIM_MSDP_SAF_LOCAL) {
+ strlcpy(local_str, "yes", sizeof(local_str));
+ } else {
+ strlcpy(local_str, "no", sizeof(local_str));
+ }
+ pim_time_timer_to_hhmmss(statetimer, sizeof(statetimer),
+ sa->sa_state_timer);
+ if (uj) {
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str, json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "rp", rp_str);
+ json_object_string_add(json_row, "local", local_str);
+ json_object_string_add(json_row, "sptSetup", spt_str);
+ json_object_string_add(json_row, "upTime", timebuf);
+ json_object_string_add(json_row, "stateTimer", statetimer);
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ vty_out(vty, "SA : %s\n", sa->sg_str);
+ vty_out(vty, " RP : %s\n", rp_str);
+ vty_out(vty, " Peer : %s\n", peer_str);
+ vty_out(vty, " Local : %s\n", local_str);
+ vty_out(vty, " SPT Setup : %s\n", spt_str);
+ vty_out(vty, " Uptime : %s\n", timebuf);
+ vty_out(vty, " State Timer : %s\n", statetimer);
+ vty_out(vty, "\n");
+ }
+}
+
+static void ip_msdp_show_sa_detail(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+ json_object *json = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
+
+ ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty, uj,
+ json);
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+DEFUN (show_ip_msdp_sa_detail,
+ show_ip_msdp_sa_detail_cmd,
+ "show ip msdp [vrf NAME] sa detail [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP active-source information\n"
+ "Detailed output\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ ip_msdp_show_sa_detail(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_msdp_sa_detail_vrf_all,
+ show_ip_msdp_sa_detail_vrf_all_cmd,
+ "show ip msdp vrf all sa detail [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP active-source information\n"
+ "Detailed output\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ ip_msdp_show_sa_detail(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+static void ip_msdp_show_sa_addr(struct pim_instance *pim, struct vty *vty,
+ const char *addr, bool uj)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+ json_object *json = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
+
+ if (!strcmp(addr, src_str) || !strcmp(addr, grp_str)) {
+ ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty,
+ uj, json);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void ip_msdp_show_sa_sg(struct pim_instance *pim, struct vty *vty,
+ const char *src, const char *grp, bool uj)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+ json_object *json = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
+
+ if (!strcmp(src, src_str) && !strcmp(grp, grp_str)) {
+ ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty,
+ uj, json);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+DEFUN (show_ip_msdp_sa_sg,
+ show_ip_msdp_sa_sg_cmd,
+ "show ip msdp [vrf NAME] sa [A.B.C.D [A.B.C.D]] [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP active-source information\n"
+ "source or group ip\n"
+ "group ip\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ int idx = 2;
+
+ vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ? argv[idx++]->arg
+ : NULL;
+ char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx)
+ ? argv[idx]->arg
+ : NULL;
+
+ if (src_ip && grp_ip)
+ ip_msdp_show_sa_sg(vrf->info, vty, src_ip, grp_ip, uj);
+ else if (src_ip)
+ ip_msdp_show_sa_addr(vrf->info, vty, src_ip, uj);
+ else
+ ip_msdp_show_sa(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_msdp_sa_sg_vrf_all,
+ show_ip_msdp_sa_sg_vrf_all_cmd,
+ "show ip msdp vrf all sa [A.B.C.D [A.B.C.D]] [json]",
+ SHOW_STR
+ IP_STR
+ MSDP_STR
+ VRF_CMD_HELP_STR
+ "MSDP active-source information\n"
+ "source or group ip\n"
+ "group ip\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ bool first = true;
+ int idx = 2;
+
+ char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ? argv[idx++]->arg
+ : NULL;
+ char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx)
+ ? argv[idx]->arg
+ : NULL;
+
+ if (uj)
+ vty_out(vty, "{ ");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (uj) {
+ if (!first)
+ vty_out(vty, ", ");
+ vty_out(vty, " \"%s\": ", vrf->name);
+ first = false;
+ } else
+ vty_out(vty, "VRF: %s\n", vrf->name);
+
+ if (src_ip && grp_ip)
+ ip_msdp_show_sa_sg(vrf->info, vty, src_ip, grp_ip, uj);
+ else if (src_ip)
+ ip_msdp_show_sa_addr(vrf->info, vty, src_ip, uj);
+ else
+ ip_msdp_show_sa(vrf->info, vty, uj);
+ }
+ if (uj)
+ vty_out(vty, "}\n");
+
+ return CMD_SUCCESS;
+}
+
+struct pim_sg_cache_walk_data {
+ struct vty *vty;
+ json_object *json;
+ json_object *json_group;
+ struct in_addr addr;
+ bool addr_match;
+};
+
+static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg,
+ struct pim_sg_cache_walk_data *cwd)
+{
+ struct vty *vty = cwd->vty;
+ json_object *json = cwd->json;
+ json_object *json_row;
+ bool installed = (vxlan_sg->up) ? true : false;
+ const char *iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
+ const char *oif_name;
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ oif_name = vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
+ else
+ oif_name = vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
+
+ if (cwd->addr_match && pim_addr_cmp(vxlan_sg->sg.src, cwd->addr) &&
+ pim_addr_cmp(vxlan_sg->sg.grp, cwd->addr)) {
+ return;
+ }
+ if (json) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &vxlan_sg->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &vxlan_sg->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &cwd->json_group);
+
+ if (!cwd->json_group) {
+ cwd->json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ cwd->json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "input", iif_name);
+ json_object_string_add(json_row, "output", oif_name);
+ if (installed)
+ json_object_boolean_true_add(json_row, "installed");
+ else
+ json_object_boolean_false_add(json_row, "installed");
+ json_object_object_add(cwd->json_group, src_str, json_row);
+ } else {
+ vty_out(vty, "%-15pPAs %-15pPAs %-15s %-15s %-5s\n",
+ &vxlan_sg->sg.src, &vxlan_sg->sg.grp, iif_name,
+ oif_name, installed ? "I" : "");
+ }
+}
+
+static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *bucket, void *arg)
+{
+ pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)bucket->data,
+ (struct pim_sg_cache_walk_data *)arg);
+}
+
+static void pim_show_vxlan_sg(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Output Flags\n");
+ }
+
+ memset(&cwd, 0, sizeof(cwd));
+ cwd.vty = vty;
+ cwd.json = json;
+ hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void pim_show_vxlan_sg_match_addr(struct pim_instance *pim,
+ struct vty *vty, char *addr_str,
+ bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+ int result = 0;
+
+ memset(&cwd, 0, sizeof(cwd));
+ result = inet_pton(AF_INET, addr_str, &cwd.addr);
+ if (result <= 0) {
+ vty_out(vty, "Bad address %s: errno=%d: %s\n", addr_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Output Flags\n");
+ }
+
+ cwd.vty = vty;
+ cwd.json = json;
+ cwd.addr_match = true;
+ hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+static void pim_show_vxlan_sg_one(struct pim_instance *pim,
+ struct vty *vty, char *src_str, char *grp_str,
+ bool uj)
+{
+ json_object *json = NULL;
+ pim_sgaddr sg;
+ int result = 0;
+ struct pim_vxlan_sg *vxlan_sg;
+ const char *iif_name;
+ bool installed;
+ const char *oif_name;
+
+ result = inet_pton(AF_INET, src_str, &sg.src);
+ if (result <= 0) {
+ vty_out(vty, "Bad src address %s: errno=%d: %s\n", src_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+ result = inet_pton(AF_INET, grp_str, &sg.grp);
+ if (result <= 0) {
+ vty_out(vty, "Bad grp address %s: errno=%d: %s\n", grp_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+
+ if (uj)
+ json = json_object_new_object();
+
+ vxlan_sg = pim_vxlan_sg_find(pim, &sg);
+ if (vxlan_sg) {
+ installed = (vxlan_sg->up) ? true : false;
+ iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ oif_name =
+ vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
+ else
+ oif_name =
+ vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
+
+ if (uj) {
+ json_object_string_add(json, "source", src_str);
+ json_object_string_add(json, "group", grp_str);
+ json_object_string_add(json, "input", iif_name);
+ json_object_string_add(json, "output", oif_name);
+ if (installed)
+ json_object_boolean_true_add(json, "installed");
+ else
+ json_object_boolean_false_add(json,
+ "installed");
+ } else {
+ vty_out(vty, "SG : %s\n", vxlan_sg->sg_str);
+ vty_out(vty, " Input : %s\n", iif_name);
+ vty_out(vty, " Output : %s\n", oif_name);
+ vty_out(vty, " installed : %s\n",
+ installed?"yes":"no");
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+DEFUN (show_ip_pim_vxlan_sg,
+ show_ip_pim_vxlan_sg_cmd,
+ "show ip pim [vrf NAME] vxlan-groups [A.B.C.D [A.B.C.D]] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "VxLAN BUM groups\n"
+ "source or group ip\n"
+ "group ip\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ int idx = 2;
+
+ vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ?
+ argv[idx++]->arg:NULL;
+ char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx) ?
+ argv[idx]->arg:NULL;
+
+ if (src_ip && grp_ip)
+ pim_show_vxlan_sg_one(vrf->info, vty, src_ip, grp_ip, uj);
+ else if (src_ip)
+ pim_show_vxlan_sg_match_addr(vrf->info, vty, src_ip, uj);
+ else
+ pim_show_vxlan_sg(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+static void pim_show_vxlan_sg_work(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+ struct listnode *node;
+ struct pim_vxlan_sg *vxlan_sg;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Flags\n");
+ }
+
+ memset(&cwd, 0, sizeof(cwd));
+ cwd.vty = vty;
+ cwd.json = json;
+ for (ALL_LIST_ELEMENTS_RO(pim_vxlan_p->work_list, node, vxlan_sg))
+ pim_show_vxlan_sg_entry(vxlan_sg, &cwd);
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+DEFUN_HIDDEN (show_ip_pim_vxlan_sg_work,
+ show_ip_pim_vxlan_sg_work_cmd,
+ "show ip pim [vrf NAME] vxlan-work [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "VxLAN work list\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ int idx = 2;
+
+ vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_vxlan_sg_work(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_HIDDEN (no_ip_pim_mlag,
+ no_ip_pim_mlag_cmd,
+ "no ip pim mlag",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "MLAG\n")
+{
+ char mlag_xpath[XPATH_MAXLEN];
+
+ snprintf(mlag_xpath, sizeof(mlag_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", "default", "frr-routing:ipv4");
+ strlcat(mlag_xpath, "/mlag", sizeof(mlag_xpath));
+
+ nb_cli_enqueue_change(vty, mlag_xpath, NB_OP_DESTROY, NULL);
+
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN_HIDDEN (ip_pim_mlag,
+ ip_pim_mlag_cmd,
+ "ip pim mlag INTERFACE role [primary|secondary] state [up|down] addr A.B.C.D",
+ IP_STR
+ PIM_STR
+ "MLAG\n"
+ "peerlink sub interface\n"
+ "MLAG role\n"
+ "MLAG role primary\n"
+ "MLAG role secondary\n"
+ "peer session state\n"
+ "peer session state up\n"
+ "peer session state down\n"
+ "configure PIP\n"
+ "unique ip address\n")
+{
+ int idx;
+ char mlag_peerlink_rif_xpath[XPATH_MAXLEN];
+ char mlag_my_role_xpath[XPATH_MAXLEN];
+ char mlag_peer_state_xpath[XPATH_MAXLEN];
+ char mlag_reg_address_xpath[XPATH_MAXLEN];
+
+ snprintf(mlag_peerlink_rif_xpath, sizeof(mlag_peerlink_rif_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", "default", "frr-routing:ipv4");
+ strlcat(mlag_peerlink_rif_xpath, "/mlag/peerlink-rif",
+ sizeof(mlag_peerlink_rif_xpath));
+
+ idx = 3;
+ nb_cli_enqueue_change(vty, mlag_peerlink_rif_xpath, NB_OP_MODIFY,
+ argv[idx]->arg);
+
+ snprintf(mlag_my_role_xpath, sizeof(mlag_my_role_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", "default", "frr-routing:ipv4");
+ strlcat(mlag_my_role_xpath, "/mlag/my-role",
+ sizeof(mlag_my_role_xpath));
+
+ idx += 2;
+ if (!strcmp(argv[idx]->arg, "primary")) {
+ nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
+ "MLAG_ROLE_PRIMARY");
+
+ } else if (!strcmp(argv[idx]->arg, "secondary")) {
+ nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
+ "MLAG_ROLE_SECONDARY");
+
+ } else {
+ vty_out(vty, "unknown MLAG role %s\n", argv[idx]->arg);
+ return CMD_WARNING;
+ }
+
+ snprintf(mlag_peer_state_xpath, sizeof(mlag_peer_state_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", "default", "frr-routing:ipv4");
+ strlcat(mlag_peer_state_xpath, "/mlag/peer-state",
+ sizeof(mlag_peer_state_xpath));
+
+ idx += 2;
+ if (!strcmp(argv[idx]->arg, "up")) {
+ nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
+ "true");
+
+ } else if (strcmp(argv[idx]->arg, "down")) {
+ nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
+ "false");
+
+ } else {
+ vty_out(vty, "unknown MLAG state %s\n", argv[idx]->arg);
+ return CMD_WARNING;
+ }
+
+ snprintf(mlag_reg_address_xpath, sizeof(mlag_reg_address_xpath),
+ FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", "default", "frr-routing:ipv4");
+ strlcat(mlag_reg_address_xpath, "/mlag/reg-address",
+ sizeof(mlag_reg_address_xpath));
+
+ idx += 2;
+ nb_cli_enqueue_change(vty, mlag_reg_address_xpath, NB_OP_MODIFY,
+ argv[idx]->arg);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void pim_cmd_init(void)
+{
+ if_cmd_init(pim_interface_config_write);
+
+ install_node(&debug_node);
+
+ install_element(ENABLE_NODE, &pim_test_sg_keepalive_cmd);
+
+ install_element(CONFIG_NODE, &ip_pim_rp_cmd);
+ install_element(VRF_NODE, &ip_pim_rp_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_rp_cmd);
+ install_element(VRF_NODE, &no_ip_pim_rp_cmd);
+ install_element(CONFIG_NODE, &ip_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &ip_pim_rp_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &no_ip_pim_rp_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_ssm_prefix_list_cmd);
+ install_element(VRF_NODE, &no_ip_pim_ssm_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_ssm_prefix_list_name_cmd);
+ install_element(VRF_NODE, &no_ip_pim_ssm_prefix_list_name_cmd);
+ install_element(CONFIG_NODE, &ip_pim_ssm_prefix_list_cmd);
+ install_element(VRF_NODE, &ip_pim_ssm_prefix_list_cmd);
+ install_element(CONFIG_NODE, &ip_pim_register_suppress_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_register_suppress_cmd);
+ install_element(CONFIG_NODE, &ip_pim_spt_switchover_infinity_cmd);
+ install_element(VRF_NODE, &ip_pim_spt_switchover_infinity_cmd);
+ install_element(CONFIG_NODE, &ip_pim_spt_switchover_infinity_plist_cmd);
+ install_element(VRF_NODE, &ip_pim_spt_switchover_infinity_plist_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_spt_switchover_infinity_cmd);
+ install_element(VRF_NODE, &no_ip_pim_spt_switchover_infinity_cmd);
+ install_element(CONFIG_NODE,
+ &no_ip_pim_spt_switchover_infinity_plist_cmd);
+ install_element(VRF_NODE, &no_ip_pim_spt_switchover_infinity_plist_cmd);
+ install_element(CONFIG_NODE, &pim_register_accept_list_cmd);
+ install_element(VRF_NODE, &pim_register_accept_list_cmd);
+ install_element(CONFIG_NODE, &ip_pim_joinprune_time_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_joinprune_time_cmd);
+ install_element(CONFIG_NODE, &ip_pim_keep_alive_cmd);
+ install_element(VRF_NODE, &ip_pim_keep_alive_cmd);
+ install_element(CONFIG_NODE, &ip_pim_rp_keep_alive_cmd);
+ install_element(VRF_NODE, &ip_pim_rp_keep_alive_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_keep_alive_cmd);
+ install_element(VRF_NODE, &no_ip_pim_keep_alive_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_rp_keep_alive_cmd);
+ install_element(VRF_NODE, &no_ip_pim_rp_keep_alive_cmd);
+ install_element(CONFIG_NODE, &ip_pim_packets_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_packets_cmd);
+ install_element(CONFIG_NODE, &ip_pim_v6_secondary_cmd);
+ install_element(VRF_NODE, &ip_pim_v6_secondary_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_v6_secondary_cmd);
+ install_element(VRF_NODE, &no_ip_pim_v6_secondary_cmd);
+ install_element(CONFIG_NODE, &ip_ssmpingd_cmd);
+ install_element(VRF_NODE, &ip_ssmpingd_cmd);
+ install_element(CONFIG_NODE, &no_ip_ssmpingd_cmd);
+ install_element(VRF_NODE, &no_ip_ssmpingd_cmd);
+ install_element(CONFIG_NODE, &ip_msdp_peer_cmd);
+ install_element(VRF_NODE, &ip_msdp_peer_cmd);
+ install_element(CONFIG_NODE, &no_ip_msdp_peer_cmd);
+ install_element(VRF_NODE, &no_ip_msdp_peer_cmd);
+ install_element(CONFIG_NODE, &ip_pim_ecmp_cmd);
+ install_element(VRF_NODE, &ip_pim_ecmp_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_ecmp_cmd);
+ install_element(VRF_NODE, &no_ip_pim_ecmp_cmd);
+ install_element(CONFIG_NODE, &ip_pim_ecmp_rebalance_cmd);
+ install_element(VRF_NODE, &ip_pim_ecmp_rebalance_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_ecmp_rebalance_cmd);
+ install_element(VRF_NODE, &no_ip_pim_ecmp_rebalance_cmd);
+ install_element(CONFIG_NODE, &ip_pim_mlag_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_mlag_cmd);
+ install_element(CONFIG_NODE, &ip_igmp_group_watermark_cmd);
+ install_element(VRF_NODE, &ip_igmp_group_watermark_cmd);
+ install_element(CONFIG_NODE, &no_ip_igmp_group_watermark_cmd);
+ install_element(VRF_NODE, &no_ip_igmp_group_watermark_cmd);
+
+ install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_igmp_join_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_version_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_igmp_version_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ip_igmp_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ip_igmp_query_max_response_time_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ip_igmp_query_max_response_time_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ip_igmp_query_max_response_time_dsec_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ip_igmp_query_max_response_time_dsec_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ip_igmp_last_member_query_count_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ip_igmp_last_member_query_count_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ip_igmp_last_member_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ip_igmp_last_member_query_interval_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_sm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_sm_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_igmp_query_generate_cmd);
+
+ // Static mroutes NEB
+ install_element(INTERFACE_NODE, &interface_ip_mroute_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ip_mroute_cmd);
+
+ install_element(VIEW_NODE, &show_ip_igmp_interface_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_interface_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_join_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_join_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_groups_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_groups_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_groups_retransmissions_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_sources_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_sources_retransmissions_cmd);
+ install_element(VIEW_NODE, &show_ip_igmp_statistics_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_assert_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_assert_internal_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_assert_metric_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_assert_winner_metric_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_interface_traffic_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_interface_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_interface_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_join_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_join_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_jp_agg_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_local_membership_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_mlag_summary_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_mlag_up_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_mlag_up_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_neighbor_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_neighbor_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_rpf_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_rpf_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_secondary_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_state_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_state_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_upstream_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_upstream_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_channel_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_upstream_join_desired_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_upstream_rpf_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
+ install_element(VIEW_NODE, &show_ip_multicast_cmd);
+ install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_multicast_count_cmd);
+ install_element(VIEW_NODE, &show_ip_multicast_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_count_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_summary_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_summary_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_rib_cmd);
+ install_element(VIEW_NODE, &show_ip_ssmpingd_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
+
+ install_element(ENABLE_NODE, &clear_ip_mroute_count_cmd);
+ install_element(ENABLE_NODE, &clear_ip_interfaces_cmd);
+ install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd);
+ install_element(ENABLE_NODE, &clear_ip_mroute_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_interfaces_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_bsr_db_cmd);
+
+ install_element(ENABLE_NODE, &show_debugging_pim_cmd);
+
+ install_element(ENABLE_NODE, &debug_igmp_cmd);
+ install_element(ENABLE_NODE, &no_debug_igmp_cmd);
+ install_element(ENABLE_NODE, &debug_igmp_events_cmd);
+ install_element(ENABLE_NODE, &no_debug_igmp_events_cmd);
+ install_element(ENABLE_NODE, &debug_igmp_packets_cmd);
+ install_element(ENABLE_NODE, &no_debug_igmp_packets_cmd);
+ install_element(ENABLE_NODE, &debug_igmp_trace_cmd);
+ install_element(ENABLE_NODE, &no_debug_igmp_trace_cmd);
+ install_element(ENABLE_NODE, &debug_igmp_trace_detail_cmd);
+ install_element(ENABLE_NODE, &no_debug_igmp_trace_detail_cmd);
+ install_element(ENABLE_NODE, &debug_mroute_cmd);
+ install_element(ENABLE_NODE, &debug_mroute_detail_cmd);
+ install_element(ENABLE_NODE, &no_debug_mroute_cmd);
+ install_element(ENABLE_NODE, &no_debug_mroute_detail_cmd);
+ install_element(ENABLE_NODE, &debug_pim_static_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_static_cmd);
+ install_element(ENABLE_NODE, &debug_pim_cmd);
+ install_element(ENABLE_NODE, &debug_pim_nht_cmd);
+ install_element(ENABLE_NODE, &debug_pim_nht_det_cmd);
+ install_element(ENABLE_NODE, &debug_pim_nht_rp_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_nht_rp_cmd);
+ install_element(ENABLE_NODE, &debug_pim_events_cmd);
+ install_element(ENABLE_NODE, &debug_pim_packets_cmd);
+ install_element(ENABLE_NODE, &debug_pim_packetdump_send_cmd);
+ install_element(ENABLE_NODE, &debug_pim_packetdump_recv_cmd);
+ install_element(ENABLE_NODE, &debug_pim_trace_cmd);
+ install_element(ENABLE_NODE, &debug_pim_trace_detail_cmd);
+ install_element(ENABLE_NODE, &debug_ssmpingd_cmd);
+ install_element(ENABLE_NODE, &no_debug_ssmpingd_cmd);
+ install_element(ENABLE_NODE, &debug_pim_zebra_cmd);
+ install_element(ENABLE_NODE, &debug_pim_mlag_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_mlag_cmd);
+ install_element(ENABLE_NODE, &debug_pim_vxlan_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_vxlan_cmd);
+ install_element(ENABLE_NODE, &debug_msdp_cmd);
+ install_element(ENABLE_NODE, &no_debug_msdp_cmd);
+ install_element(ENABLE_NODE, &debug_msdp_events_cmd);
+ install_element(ENABLE_NODE, &no_debug_msdp_events_cmd);
+ install_element(ENABLE_NODE, &debug_msdp_packets_cmd);
+ install_element(ENABLE_NODE, &no_debug_msdp_packets_cmd);
+ install_element(ENABLE_NODE, &debug_mtrace_cmd);
+ install_element(ENABLE_NODE, &no_debug_mtrace_cmd);
+ install_element(ENABLE_NODE, &debug_bsm_cmd);
+ install_element(ENABLE_NODE, &no_debug_bsm_cmd);
+
+ install_element(CONFIG_NODE, &debug_igmp_cmd);
+ install_element(CONFIG_NODE, &no_debug_igmp_cmd);
+ install_element(CONFIG_NODE, &debug_igmp_events_cmd);
+ install_element(CONFIG_NODE, &no_debug_igmp_events_cmd);
+ install_element(CONFIG_NODE, &debug_igmp_packets_cmd);
+ install_element(CONFIG_NODE, &no_debug_igmp_packets_cmd);
+ install_element(CONFIG_NODE, &debug_igmp_trace_cmd);
+ install_element(CONFIG_NODE, &no_debug_igmp_trace_cmd);
+ install_element(CONFIG_NODE, &debug_igmp_trace_detail_cmd);
+ install_element(CONFIG_NODE, &no_debug_igmp_trace_detail_cmd);
+ install_element(CONFIG_NODE, &debug_mroute_cmd);
+ install_element(CONFIG_NODE, &debug_mroute_detail_cmd);
+ install_element(CONFIG_NODE, &no_debug_mroute_cmd);
+ install_element(CONFIG_NODE, &no_debug_mroute_detail_cmd);
+ install_element(CONFIG_NODE, &debug_pim_static_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_static_cmd);
+ install_element(CONFIG_NODE, &debug_pim_cmd);
+ install_element(CONFIG_NODE, &debug_pim_nht_cmd);
+ install_element(CONFIG_NODE, &debug_pim_nht_det_cmd);
+ install_element(CONFIG_NODE, &debug_pim_nht_rp_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_nht_rp_cmd);
+ install_element(CONFIG_NODE, &debug_pim_events_cmd);
+ install_element(CONFIG_NODE, &debug_pim_packets_cmd);
+ install_element(CONFIG_NODE, &debug_pim_packetdump_send_cmd);
+ install_element(CONFIG_NODE, &debug_pim_packetdump_recv_cmd);
+ install_element(CONFIG_NODE, &debug_pim_trace_cmd);
+ install_element(CONFIG_NODE, &debug_pim_trace_detail_cmd);
+ install_element(CONFIG_NODE, &debug_ssmpingd_cmd);
+ install_element(CONFIG_NODE, &no_debug_ssmpingd_cmd);
+ install_element(CONFIG_NODE, &debug_pim_zebra_cmd);
+ install_element(CONFIG_NODE, &debug_pim_mlag_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_mlag_cmd);
+ install_element(CONFIG_NODE, &debug_pim_vxlan_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_vxlan_cmd);
+ install_element(CONFIG_NODE, &debug_msdp_cmd);
+ install_element(CONFIG_NODE, &no_debug_msdp_cmd);
+ install_element(CONFIG_NODE, &debug_msdp_events_cmd);
+ install_element(CONFIG_NODE, &no_debug_msdp_events_cmd);
+ install_element(CONFIG_NODE, &debug_msdp_packets_cmd);
+ install_element(CONFIG_NODE, &no_debug_msdp_packets_cmd);
+ install_element(CONFIG_NODE, &debug_mtrace_cmd);
+ install_element(CONFIG_NODE, &no_debug_mtrace_cmd);
+ install_element(CONFIG_NODE, &debug_bsm_cmd);
+ install_element(CONFIG_NODE, &no_debug_bsm_cmd);
+
+ install_element(CONFIG_NODE, &ip_msdp_timers_cmd);
+ install_element(VRF_NODE, &ip_msdp_timers_cmd);
+ install_element(CONFIG_NODE, &no_ip_msdp_timers_cmd);
+ install_element(VRF_NODE, &no_ip_msdp_timers_cmd);
+ install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
+ install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
+ install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_member_cmd);
+ install_element(VRF_NODE, &no_ip_msdp_mesh_group_member_cmd);
+ install_element(CONFIG_NODE, &ip_msdp_mesh_group_source_cmd);
+ install_element(VRF_NODE, &ip_msdp_mesh_group_source_cmd);
+ install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_source_cmd);
+ install_element(VRF_NODE, &no_ip_msdp_mesh_group_source_cmd);
+ install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_cmd);
+ install_element(VRF_NODE, &no_ip_msdp_mesh_group_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_peer_detail_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_peer_detail_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_sa_detail_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_sa_detail_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_sa_sg_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_sa_sg_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_mesh_group_cmd);
+ install_element(VIEW_NODE, &show_ip_msdp_mesh_group_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_ssm_range_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_group_type_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
+ install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
+ install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
+ /* Install BSM command */
+ install_element(INTERFACE_NODE, &ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &ip_pim_ucast_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_ucast_bsm_cmd);
+ /* Install BFD command */
+ install_element(INTERFACE_NODE, &ip_pim_bfd_cmd);
+ install_element(INTERFACE_NODE, &ip_pim_bfd_param_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_bfd_profile_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_bfd_cmd);
+#if HAVE_BFDD == 0
+ install_element(INTERFACE_NODE, &no_ip_pim_bfd_param_cmd);
+#endif /* !HAVE_BFDD */
+}
diff --git a/pimd/pim_cmd.h b/pimd/pim_cmd.h
new file mode 100644
index 0000000..d39d77c
--- /dev/null
+++ b/pimd/pim_cmd.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_CMD_H
+#define PIM_CMD_H
+
+#define PIM_STR "PIM information\n"
+#define IGMP_STR "IGMP information\n"
+#define IGMP_GROUP_STR "IGMP groups information\n"
+#define IGMP_SOURCE_STR "IGMP sources information\n"
+#define CONF_SSMPINGD_STR "Enable ssmpingd operation\n"
+#define SHOW_SSMPINGD_STR "ssmpingd operation\n"
+#define IFACE_PIM_STR "Enable PIM SSM operation\n"
+#define IFACE_PIM_SM_STR "Enable PIM SM operation\n"
+#define IFACE_PIM_HELLO_STR "Hello Interval\n"
+#define IFACE_PIM_HELLO_TIME_STR "Time in seconds for Hello Interval\n"
+#define IFACE_PIM_HELLO_HOLD_STR "Time in seconds for Hold Interval\n"
+#define IFACE_IGMP_STR "Enable IGMP operation\n"
+#define IFACE_IGMP_QUERY_INTERVAL_STR "IGMP host query interval\n"
+#define IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR "IGMP max query response value (seconds)\n"
+#define IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_DSEC_STR "IGMP max query response value (deciseconds)\n"
+#define IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR "IGMP last member query interval\n"
+#define IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR "IGMP last member query count\n"
+#define DEBUG_IGMP_STR "IGMP protocol activity\n"
+#define DEBUG_IGMP_EVENTS_STR "IGMP protocol events\n"
+#define DEBUG_IGMP_PACKETS_STR "IGMP protocol packets\n"
+#define DEBUG_IGMP_TRACE_STR "IGMP internal daemon activity\n"
+#define DEBUG_MROUTE_STR "PIM interaction with kernel MFC cache\n"
+#define DEBUG_STATIC_STR "PIM Static Multicast Route activity\n"
+#define DEBUG_PIM_STR "PIM protocol activity\n"
+#define DEBUG_PIM_EVENTS_STR "PIM protocol events\n"
+#define DEBUG_PIM_PACKETS_STR "PIM protocol packets\n"
+#define DEBUG_PIM_HELLO_PACKETS_STR "PIM Hello protocol packets\n"
+#define DEBUG_PIM_J_P_PACKETS_STR "PIM Join/Prune protocol packets\n"
+#define DEBUG_PIM_PIM_REG_PACKETS_STR "PIM Register/Reg-Stop protocol packets\n"
+#define DEBUG_PIM_PACKETDUMP_STR "PIM packet dump\n"
+#define DEBUG_PIM_PACKETDUMP_SEND_STR "Dump sent packets\n"
+#define DEBUG_PIM_PACKETDUMP_RECV_STR "Dump received packets\n"
+#define DEBUG_PIM_TRACE_STR "PIM internal daemon activity\n"
+#define DEBUG_PIM_ZEBRA_STR "ZEBRA protocol activity\n"
+#define DEBUG_PIM_MLAG_STR "PIM Mlag activity\n"
+#define DEBUG_PIM_VXLAN_STR "PIM VxLAN events\n"
+#define DEBUG_SSMPINGD_STR "ssmpingd activity\n"
+#define CLEAR_IP_IGMP_STR "IGMP clear commands\n"
+#define CLEAR_IP_PIM_STR "PIM clear commands\n"
+#define MROUTE_STR "IP multicast routing table\n"
+#define RIB_STR "IP unicast routing table\n"
+#define CFG_MSDP_STR "Configure multicast source discovery protocol\n"
+#define MSDP_STR "MSDP information\n"
+#define DEBUG_MSDP_STR "MSDP protocol activity\n"
+#define DEBUG_MSDP_EVENTS_STR "MSDP protocol events\n"
+#define DEBUG_MSDP_INTERNAL_STR "MSDP protocol internal\n"
+#define DEBUG_MSDP_PACKETS_STR "MSDP protocol packets\n"
+#define DEBUG_MTRACE_STR "Mtrace protocol activity\n"
+#define DEBUG_PIM_BSM_STR "BSR message processing activity\n"
+
+
+void pim_cmd_init(void);
+
+#define PIM_TIME_STRLEN 10
+#endif /* PIM_CMD_H */
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
new file mode 100644
index 0000000..c3eb49d
--- /dev/null
+++ b/pimd/pim_cmd_common.c
@@ -0,0 +1,5695 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for IPv6 FRR
+ * Copyright (C) 2022 Vmware, Inc.
+ * Mobashshera Rasool <mrasool@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "lib/json.h"
+#include "command.h"
+#include "if.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "plist.h"
+#include "hash.h"
+#include "nexthop.h"
+#include "vrf.h"
+#include "ferr.h"
+#include "lib/srcdest_table.h"
+#include "lib/linklist.h"
+#include "termtable.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_vty.h"
+#include "lib/northbound_cli.h"
+#include "pim_errors.h"
+#include "pim_nb.h"
+#include "pim_mroute.h"
+#include "pim_cmd.h"
+#include "pim6_cmd.h"
+#include "pim_cmd_common.h"
+#include "pim_time.h"
+#include "pim_zebra.h"
+#include "pim_zlookup.h"
+#include "pim_iface.h"
+#include "pim_macro.h"
+#include "pim_neighbor.h"
+#include "pim_nht.h"
+#include "pim_sock.h"
+#include "pim_ssm.h"
+#include "pim_static.h"
+#include "pim_addr.h"
+#include "pim_static.h"
+#include "pim_util.h"
+#include "pim6_mld.h"
+
+/**
+ * Get current node VRF name.
+ *
+ * NOTE:
+ * In case of failure it will print error message to user.
+ *
+ * \returns name or NULL if failed to get VRF.
+ */
+const char *pim_cli_get_vrf_name(struct vty *vty)
+{
+ const struct lyd_node *vrf_node;
+
+ /* Not inside any VRF context. */
+ if (vty->xpath_index == 0)
+ return VRF_DEFAULT_NAME;
+
+ vrf_node = yang_dnode_get(vty->candidate_config->dnode, VTY_CURR_XPATH);
+ if (vrf_node == NULL) {
+ vty_out(vty, "%% Failed to get vrf dnode in configuration\n");
+ return NULL;
+ }
+
+ return yang_dnode_get_string(vrf_node, "./name");
+}
+
+int pim_process_join_prune_cmd(struct vty *vty, const char *jpi_str)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/join-prune-interval", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, jpi_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_join_prune_cmd(struct vty *vty)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/join-prune-interval", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_spt_switchover_infinity_cmd(struct vty *vty)
+{
+ const char *vrfname;
+ char spt_plist_xpath[XPATH_MAXLEN];
+ char spt_action_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(spt_plist_xpath, sizeof(spt_plist_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_plist_xpath, "/spt-switchover/spt-infinity-prefix-list",
+ sizeof(spt_plist_xpath));
+
+ snprintf(spt_action_xpath, sizeof(spt_action_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_action_xpath, "/spt-switchover/spt-action",
+ sizeof(spt_action_xpath));
+
+ if (yang_dnode_exists(vty->candidate_config->dnode, spt_plist_xpath))
+ nb_cli_enqueue_change(vty, spt_plist_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, spt_action_xpath, NB_OP_MODIFY,
+ "PIM_SPT_INFINITY");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_spt_switchover_prefixlist_cmd(struct vty *vty,
+ const char *plist)
+{
+ const char *vrfname;
+ char spt_plist_xpath[XPATH_MAXLEN];
+ char spt_action_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(spt_plist_xpath, sizeof(spt_plist_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_plist_xpath, "/spt-switchover/spt-infinity-prefix-list",
+ sizeof(spt_plist_xpath));
+
+ snprintf(spt_action_xpath, sizeof(spt_action_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_action_xpath, "/spt-switchover/spt-action",
+ sizeof(spt_action_xpath));
+
+ nb_cli_enqueue_change(vty, spt_action_xpath, NB_OP_MODIFY,
+ "PIM_SPT_INFINITY");
+ nb_cli_enqueue_change(vty, spt_plist_xpath, NB_OP_MODIFY,
+ plist);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_spt_switchover_cmd(struct vty *vty)
+{
+ const char *vrfname;
+ char spt_plist_xpath[XPATH_MAXLEN];
+ char spt_action_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(spt_plist_xpath, sizeof(spt_plist_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_plist_xpath, "/spt-switchover/spt-infinity-prefix-list",
+ sizeof(spt_plist_xpath));
+
+ snprintf(spt_action_xpath, sizeof(spt_action_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(spt_action_xpath, "/spt-switchover/spt-action",
+ sizeof(spt_action_xpath));
+
+ nb_cli_enqueue_change(vty, spt_plist_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, spt_action_xpath, NB_OP_MODIFY,
+ "PIM_SPT_IMMEDIATE");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_pim_packet_cmd(struct vty *vty, const char *packet)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/packets", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, packet);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_pim_packet_cmd(struct vty *vty)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/packets", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_keepalivetimer_cmd(struct vty *vty, const char *kat)
+{
+ const char *vrfname;
+ char ka_timer_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ka_timer_xpath, sizeof(ka_timer_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
+ strlcat(ka_timer_xpath, "/keep-alive-timer", sizeof(ka_timer_xpath));
+
+ nb_cli_enqueue_change(vty, ka_timer_xpath, NB_OP_MODIFY,
+ kat);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_keepalivetimer_cmd(struct vty *vty)
+{
+ const char *vrfname;
+ char ka_timer_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ka_timer_xpath, sizeof(ka_timer_xpath), FRR_PIM_VRF_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
+ strlcat(ka_timer_xpath, "/keep-alive-timer", sizeof(ka_timer_xpath));
+
+ nb_cli_enqueue_change(vty, ka_timer_xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_rp_kat_cmd(struct vty *vty, const char *rpkat)
+{
+ const char *vrfname;
+ char rp_ka_timer_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_ka_timer_xpath, sizeof(rp_ka_timer_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(rp_ka_timer_xpath, "/rp-keep-alive-timer",
+ sizeof(rp_ka_timer_xpath));
+
+ nb_cli_enqueue_change(vty, rp_ka_timer_xpath, NB_OP_MODIFY,
+ rpkat);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_rp_kat_cmd(struct vty *vty)
+{
+ const char *vrfname;
+ char rp_ka_timer[6];
+ char rp_ka_timer_xpath[XPATH_MAXLEN];
+ uint v;
+ char rs_timer_xpath[XPATH_MAXLEN];
+
+ snprintf(rs_timer_xpath, sizeof(rs_timer_xpath),
+ FRR_PIM_ROUTER_XPATH, FRR_PIM_AF_XPATH_VAL);
+ strlcat(rs_timer_xpath, "/register-suppress-time",
+ sizeof(rs_timer_xpath));
+
+ /* RFC4601 */
+ v = yang_dnode_get_uint16(vty->candidate_config->dnode, "%s",
+ rs_timer_xpath);
+ v = 3 * v + PIM_REGISTER_PROBE_TIME_DEFAULT;
+ if (v > UINT16_MAX)
+ v = UINT16_MAX;
+ snprintf(rp_ka_timer, sizeof(rp_ka_timer), "%u", v);
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_ka_timer_xpath, sizeof(rp_ka_timer_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(rp_ka_timer_xpath, "/rp-keep-alive-timer",
+ sizeof(rp_ka_timer_xpath));
+
+ nb_cli_enqueue_change(vty, rp_ka_timer_xpath, NB_OP_MODIFY,
+ rp_ka_timer);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_register_suppress_cmd(struct vty *vty, const char *rst)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/register-suppress-time", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, rst);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_register_suppress_cmd(struct vty *vty)
+{
+ char xpath[XPATH_MAXLEN];
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_ROUTER_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(xpath, "/register-suppress-time", sizeof(xpath));
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_ip_pim_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable)
+{
+ if (enable)
+ nb_cli_enqueue_change(vty, "./pim-passive-enable", NB_OP_MODIFY,
+ "true");
+ else
+ nb_cli_enqueue_change(vty, "./pim-passive-enable", NB_OP_MODIFY,
+ "false");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_cmd(struct vty *vty)
+{
+ const struct lyd_node *mld_enable_dnode;
+ char mld_if_xpath[XPATH_MAXLEN];
+
+ int printed =
+ snprintf(mld_if_xpath, sizeof(mld_if_xpath),
+ "%s/frr-gmp:gmp/address-family[address-family='%s']",
+ VTY_CURR_XPATH, FRR_PIM_AF_XPATH_VAL);
+
+ if (printed >= (int)(sizeof(mld_if_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ mld_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+
+ if (!mld_enable_dnode) {
+ nb_cli_enqueue_change(vty, mld_if_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else {
+ if (!yang_dnode_get_bool(mld_enable_dnode, ".")) {
+ nb_cli_enqueue_change(vty, mld_if_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "false");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_drprio_cmd(struct vty *vty, const char *drpriority_str)
+{
+ nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_MODIFY,
+ drpriority_str);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_drprio_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_hello_cmd(struct vty *vty, const char *hello_str,
+ const char *hold_str)
+{
+ const struct lyd_node *mld_enable_dnode;
+
+ mld_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+
+ if (!mld_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ } else {
+ if (!yang_dnode_get_bool(mld_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_MODIFY, hello_str);
+
+ if (hold_str)
+ nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_MODIFY,
+ hold_str);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_hello_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_activeactive_cmd(struct vty *vty, const char *no)
+{
+ if (no)
+ nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
+ "false");
+ else {
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+
+ nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
+ "true");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_boundary_oil_cmd(struct vty *vty, const char *oil)
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_MODIFY,
+ oil);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_boundary_oil_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_DESTROY,
+ NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str)
+{
+ nb_cli_enqueue_change(vty, "./oif", NB_OP_MODIFY, interface);
+
+ if (!source_str) {
+ char buf[SRCDEST2STR_BUFFER];
+
+ inet_ntop(AF_INET6, &in6addr_any, buf, sizeof(buf));
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, buf,
+ group_str);
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, source_str,
+ group_str);
+}
+
+int pim_process_no_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str)
+{
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+
+ if (!source_str) {
+ char buf[SRCDEST2STR_BUFFER];
+
+ inet_ntop(AF_INET6, &in6addr_any, buf, sizeof(buf));
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, buf,
+ group_str);
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, source_str,
+ group_str);
+}
+
+int pim_process_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str)
+{
+ const char *vrfname;
+ char rp_group_xpath[XPATH_MAXLEN];
+ int result = 0;
+ struct prefix group;
+ pim_addr rp_addr;
+
+ result = str2prefix(group_str, &group);
+ if (result) {
+ struct prefix temp;
+
+ prefix_copy(&temp, &group);
+ apply_mask(&temp);
+ if (!prefix_same(&group, &temp)) {
+ vty_out(vty, "%% Inconsistent address and mask: %s\n",
+ group_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ }
+
+ if (!result) {
+ vty_out(vty, "%% Bad group address specified: %s\n", group_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ result = inet_pton(PIM_AF, rp_str, &rp_addr);
+ if (result <= 0) {
+ vty_out(vty, "%% Bad RP address specified: %s\n", rp_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (pim_addr_is_any(rp_addr) || pim_addr_is_multicast(rp_addr)) {
+ vty_out(vty, "%% Bad RP address specified: %s\n", rp_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+#if PIM_IPV == 6
+ if (IN6_IS_ADDR_LINKLOCAL(&rp_addr)) {
+ vty_out(vty, "%% Bad RP address specified: %s\n", rp_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+#endif
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_group_xpath, sizeof(rp_group_xpath),
+ FRR_PIM_STATIC_RP_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(rp_group_xpath, "/group-list", sizeof(rp_group_xpath));
+
+ nb_cli_enqueue_change(vty, rp_group_xpath, NB_OP_CREATE, group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str)
+{
+ char group_list_xpath[XPATH_MAXLEN];
+ char group_xpath[XPATH_MAXLEN];
+ char rp_xpath[XPATH_MAXLEN];
+ int printed;
+ const char *vrfname;
+ const struct lyd_node *group_dnode;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+
+ printed = snprintf(group_list_xpath, sizeof(group_list_xpath),
+ "%s/group-list", rp_xpath);
+
+ if (printed >= (int)(sizeof(group_list_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ printed = snprintf(group_xpath, sizeof(group_xpath), "%s[.='%s']",
+ group_list_xpath, group_str);
+
+ if (printed >= (int)(sizeof(group_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ group_dnode = yang_dnode_get(vty->candidate_config->dnode, group_xpath);
+ if (!group_dnode) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ if (yang_is_last_list_dnode(group_dnode))
+ nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, group_list_xpath, NB_OP_DESTROY,
+ group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list)
+{
+ const char *vrfname;
+ char rp_plist_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_plist_xpath, sizeof(rp_plist_xpath),
+ FRR_PIM_STATIC_RP_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(rp_plist_xpath, "/prefix-list", sizeof(rp_plist_xpath));
+
+ nb_cli_enqueue_change(vty, rp_plist_xpath, NB_OP_MODIFY, prefix_list);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list)
+{
+ char rp_xpath[XPATH_MAXLEN];
+ char plist_xpath[XPATH_MAXLEN];
+ const char *vrfname;
+ const struct lyd_node *plist_dnode;
+ const char *plist;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+
+ snprintf(plist_xpath, sizeof(plist_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(plist_xpath, "/prefix-list", sizeof(plist_xpath));
+
+ plist_dnode = yang_dnode_get(vty->candidate_config->dnode, plist_xpath);
+ if (!plist_dnode) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ plist = yang_dnode_get_string(plist_dnode, "%s", plist_xpath);
+ if (strcmp(prefix_list, plist)) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match)
+{
+ return (pim_addr_is_any(match.grp) ||
+ !pim_addr_cmp(match.grp, item.grp)) &&
+ (pim_addr_is_any(match.src) ||
+ !pim_addr_cmp(match.src, item.src));
+}
+
+void json_object_pim_ifp_add(struct json_object *json, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ json_object_string_add(json, "name", ifp->name);
+ json_object_string_add(json, "state", if_is_up(ifp) ? "up" : "down");
+ json_object_string_addf(json, "address", "%pPA",
+ &pim_ifp->primary_address);
+ json_object_int_add(json, "index", ifp->ifindex);
+
+ if (if_is_multicast(ifp))
+ json_object_boolean_true_add(json, "flagMulticast");
+
+ if (if_is_broadcast(ifp))
+ json_object_boolean_true_add(json, "flagBroadcast");
+
+ if (ifp->flags & IFF_ALLMULTI)
+ json_object_boolean_true_add(json, "flagAllMulticast");
+
+ if (ifp->flags & IFF_PROMISC)
+ json_object_boolean_true_add(json, "flagPromiscuous");
+
+ if (PIM_IF_IS_DELETED(ifp))
+ json_object_boolean_true_add(json, "flagDeleted");
+
+ if (pim_if_lan_delay_enabled(ifp))
+ json_object_boolean_true_add(json, "lanDelayEnabled");
+}
+
+void pim_print_ifp_flags(struct vty *vty, struct interface *ifp)
+{
+ vty_out(vty, "Flags\n");
+ vty_out(vty, "-----\n");
+ vty_out(vty, "All Multicast : %s\n",
+ (ifp->flags & IFF_ALLMULTI) ? "yes" : "no");
+ vty_out(vty, "Broadcast : %s\n",
+ if_is_broadcast(ifp) ? "yes" : "no");
+ vty_out(vty, "Deleted : %s\n",
+ PIM_IF_IS_DELETED(ifp) ? "yes" : "no");
+ vty_out(vty, "Interface Index : %d\n", ifp->ifindex);
+ vty_out(vty, "Multicast : %s\n",
+ if_is_multicast(ifp) ? "yes" : "no");
+ vty_out(vty, "Promiscuous : %s\n",
+ (ifp->flags & IFF_PROMISC) ? "yes" : "no");
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+}
+
+void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up)
+{
+ json_object_boolean_add(
+ json, "drJoinDesired",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED));
+ json_object_boolean_add(
+ json, "drJoinDesiredUpdated",
+ CHECK_FLAG(up->flags,
+ PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED));
+ json_object_boolean_add(
+ json, "firstHopRouter",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_FHR));
+ json_object_boolean_add(
+ json, "sourceIgmp",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_IGMP));
+ json_object_boolean_add(
+ json, "sourcePim",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_PIM));
+ json_object_boolean_add(
+ json, "sourceStream",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_STREAM));
+ /* XXX: need to print ths flag in the plain text display as well */
+ json_object_boolean_add(
+ json, "sourceMsdp",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_MSDP));
+ json_object_boolean_add(
+ json, "sendSGRptPrune",
+ CHECK_FLAG(up->flags,
+ PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE));
+ json_object_boolean_add(
+ json, "lastHopRouter",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_LHR));
+ json_object_boolean_add(
+ json, "disableKATExpiry",
+ CHECK_FLAG(up->flags,
+ PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY));
+ json_object_boolean_add(
+ json, "staticIncomingInterface",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_STATIC_IIF));
+ json_object_boolean_add(
+ json, "allowIncomingInterfaceinOil",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL));
+ json_object_boolean_add(
+ json, "noPimRegistrationData",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA));
+ json_object_boolean_add(
+ json, "forcePimRegistration",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG));
+ json_object_boolean_add(
+ json, "sourceVxlanOrigination",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG));
+ json_object_boolean_add(
+ json, "sourceVxlanTermination",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM));
+ json_object_boolean_add(
+ json, "mlagVxlan",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN));
+ json_object_boolean_add(
+ json, "mlagNonDesignatedForwarder",
+ CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF));
+}
+
+static const char *
+pim_upstream_state2brief_str(enum pim_upstream_state join_state,
+ char *state_str, size_t state_str_len)
+{
+ switch (join_state) {
+ case PIM_UPSTREAM_NOTJOINED:
+ strlcpy(state_str, "NotJ", state_str_len);
+ break;
+ case PIM_UPSTREAM_JOINED:
+ strlcpy(state_str, "J", state_str_len);
+ break;
+ default:
+ strlcpy(state_str, "Unk", state_str_len);
+ }
+ return state_str;
+}
+
+static const char *pim_reg_state2brief_str(enum pim_reg_state reg_state,
+ char *state_str,
+ size_t state_str_len)
+{
+ switch (reg_state) {
+ case PIM_REG_NOINFO:
+ strlcpy(state_str, "RegNI", state_str_len);
+ break;
+ case PIM_REG_JOIN:
+ strlcpy(state_str, "RegJ", state_str_len);
+ break;
+ case PIM_REG_JOIN_PENDING:
+ case PIM_REG_PRUNE:
+ strlcpy(state_str, "RegP", state_str_len);
+ break;
+ }
+ return state_str;
+}
+
+void pim_show_rpf_refresh_stats(struct vty *vty, struct pim_instance *pim,
+ time_t now, json_object *json)
+{
+ char refresh_uptime[10];
+
+ pim_time_uptime_begin(refresh_uptime, sizeof(refresh_uptime), now,
+ pim->rpf_cache_refresh_last);
+
+ if (json) {
+ json_object_int_add(json, "rpfCacheRefreshDelayMsecs",
+ router->rpf_cache_refresh_delay_msec);
+ json_object_int_add(
+ json, "rpfCacheRefreshTimer",
+ pim_time_timer_remain_msec(pim->rpf_cache_refresher));
+ json_object_int_add(json, "rpfCacheRefreshRequests",
+ pim->rpf_cache_refresh_requests);
+ json_object_int_add(json, "rpfCacheRefreshEvents",
+ pim->rpf_cache_refresh_events);
+ json_object_string_add(json, "rpfCacheRefreshLast",
+ refresh_uptime);
+ json_object_int_add(json, "nexthopLookups",
+ pim->nexthop_lookups);
+ json_object_int_add(json, "nexthopLookupsAvoided",
+ pim->nexthop_lookups_avoided);
+ } else {
+ vty_out(vty,
+ "RPF Cache Refresh Delay: %ld msecs\n"
+ "RPF Cache Refresh Timer: %ld msecs\n"
+ "RPF Cache Refresh Requests: %lld\n"
+ "RPF Cache Refresh Events: %lld\n"
+ "RPF Cache Refresh Last: %s\n"
+ "Nexthop Lookups: %lld\n"
+ "Nexthop Lookups Avoided: %lld\n",
+ router->rpf_cache_refresh_delay_msec,
+ pim_time_timer_remain_msec(pim->rpf_cache_refresher),
+ (long long)pim->rpf_cache_refresh_requests,
+ (long long)pim->rpf_cache_refresh_events,
+ refresh_uptime, (long long)pim->nexthop_lookups,
+ (long long)pim->nexthop_lookups_avoided);
+ }
+}
+
+void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
+{
+ struct pim_upstream *up;
+ time_t now = pim_time_monotonic_sec();
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ pim_show_rpf_refresh_stats(vty, pim, now, json);
+
+ if (!json) {
+ vty_out(vty, "\n");
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Source|Group|RpfIface|RpfAddress|RibNextHop|Metric|Pref");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ const char *rpf_ifname;
+ struct pim_rpf *rpf = &up->rpf;
+
+ rpf_ifname =
+ rpf->source_nexthop.interface ? rpf->source_nexthop
+ .interface->name
+ : "<ifname?>";
+
+ if (json) {
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &up->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "rpfInterface",
+ rpf_ifname);
+ json_object_string_addf(json_row, "rpfAddress", "%pPA",
+ &rpf->rpf_addr);
+ json_object_string_addf(
+ json_row, "ribNexthop", "%pPAs",
+ &rpf->source_nexthop.mrib_nexthop_addr);
+ json_object_int_add(
+ json_row, "routeMetric",
+ rpf->source_nexthop.mrib_route_metric);
+ json_object_int_add(
+ json_row, "routePreference",
+ rpf->source_nexthop.mrib_metric_preference);
+ json_object_object_add(json_group, src_str, json_row);
+
+ } else {
+ ttable_add_row(
+ tt, "%pPAs|%pPAs|%s|%pPA|%pPAs|%d|%d",
+ &up->sg.src, &up->sg.grp, rpf_ifname,
+ &rpf->rpf_addr,
+ &rpf->source_nexthop.mrib_nexthop_addr,
+ rpf->source_nexthop.mrib_route_metric,
+ rpf->source_nexthop.mrib_metric_preference);
+ }
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty)
+{
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Address|Neighbor|Secondary");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ pim_addr ifaddr;
+ struct listnode *neighnode;
+ struct pim_neighbor *neigh;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (pim_ifp->pim_sock_fd < 0)
+ continue;
+
+ ifaddr = pim_ifp->primary_address;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode,
+ neigh)) {
+ struct listnode *prefix_node;
+ struct prefix *p;
+
+ if (!neigh->prefix_list)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list,
+ prefix_node, p))
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pFX",
+ ifp->name, &ifaddr,
+ &neigh->source_addr, p);
+ }
+ }
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+}
+
+void pim_show_state(struct pim_instance *pim, struct vty *vty,
+ const char *src_or_group, const char *group,
+ json_object *json)
+{
+ struct channel_oil *c_oil;
+#if PIM_IPV != 4
+ struct ttable *tt = NULL;
+ char *table = NULL;
+#endif
+ char flag[50];
+ json_object *json_group = NULL;
+ json_object *json_ifp_in = NULL;
+ json_object *json_ifp_out = NULL;
+ json_object *json_source = NULL;
+ time_t now;
+ int first_oif;
+
+ now = pim_time_monotonic_sec();
+
+ if (!json) {
+ vty_out(vty,
+ "Codes: J -> Pim Join, I -> " GM " Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted\n");
+#if PIM_IPV == 4
+ vty_out(vty,
+ "Active Source Group RPT IIF OIL\n");
+#else
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Active|Source|Group|RPT|IIF|OIL");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+#endif
+ }
+
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ char src_str[PIM_ADDRSTRLEN];
+ char grp_str[PIM_ADDRSTRLEN];
+ char in_ifname[INTERFACE_NAMSIZ + 1];
+ char out_ifname[INTERFACE_NAMSIZ + 1];
+ int oif_vif_index;
+ struct interface *ifp_in;
+ bool isRpt;
+
+ first_oif = 1;
+
+ if ((c_oil->up &&
+ PIM_UPSTREAM_FLAG_TEST_USE_RPT(c_oil->up->flags)) ||
+ pim_addr_is_any(*oil_origin(c_oil)))
+ isRpt = true;
+ else
+ isRpt = false;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ oil_mcastgrp(c_oil));
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ oil_origin(c_oil));
+ ifp_in = pim_if_find_by_vif_index(pim, *oil_incoming_vif(c_oil));
+
+ if (ifp_in)
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
+ else
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
+
+ if (src_or_group) {
+ if (strcmp(src_or_group, src_str) &&
+ strcmp(src_or_group, grp_str))
+ continue;
+
+ if (group && strcmp(group, grp_str))
+ continue;
+ }
+
+ if (json) {
+
+ /* Find the group, create it if it doesn't exist */
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ /* Find the source nested under the group, create it if
+ * it doesn't exist
+ */
+ json_object_object_get_ex(json_group, src_str,
+ &json_source);
+
+ if (!json_source) {
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, src_str,
+ json_source);
+ }
+
+ /* Find the inbound interface nested under the source,
+ * create it if it doesn't exist
+ */
+ json_object_object_get_ex(json_source, in_ifname,
+ &json_ifp_in);
+
+ if (!json_ifp_in) {
+ json_ifp_in = json_object_new_object();
+ json_object_object_add(json_source, in_ifname,
+ json_ifp_in);
+ json_object_int_add(json_source, "installed",
+ c_oil->installed);
+ json_object_boolean_add(json_source, "isRpt",
+ isRpt);
+ json_object_int_add(json_source, "refCount",
+ c_oil->oil_ref_count);
+ json_object_int_add(json_source, "oilListSize",
+ c_oil->oil_size);
+ json_object_int_add(
+ json_source, "oilRescan",
+ c_oil->oil_inherited_rescan);
+ json_object_int_add(json_source, "lastUsed",
+ c_oil->cc.lastused);
+ json_object_int_add(json_source, "packetCount",
+ c_oil->cc.pktcnt);
+ json_object_int_add(json_source, "byteCount",
+ c_oil->cc.bytecnt);
+ json_object_int_add(json_source,
+ "wrongInterface",
+ c_oil->cc.wrong_if);
+ }
+ } else
+#if PIM_IPV == 4
+ vty_out(vty, "%-6d %-15pPAs %-15pPAs %-3s %-16s ",
+ c_oil->installed, oil_origin(c_oil),
+ oil_mcastgrp(c_oil), isRpt ? "y" : "n",
+ in_ifname);
+#else
+ /* Add a new row for c_oil with no OIF */
+ ttable_add_row(tt, "%d|%pPAs|%pPAs|%s|%s|%c",
+ c_oil->installed, oil_origin(c_oil),
+ oil_mcastgrp(c_oil), isRpt ? "y" : "n",
+ in_ifname, ' ');
+#endif
+
+ for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
+ ++oif_vif_index) {
+ struct interface *ifp_out;
+ char oif_uptime[10];
+ int ttl;
+
+ ttl = oil_if_has(c_oil, oif_vif_index);
+ if (ttl < 1)
+ continue;
+
+ ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
+ pim_time_uptime(
+ oif_uptime, sizeof(oif_uptime),
+ now - c_oil->oif_creation[oif_vif_index]);
+
+ if (ifp_out)
+ strlcpy(out_ifname, ifp_out->name,
+ sizeof(out_ifname));
+ else
+ strlcpy(out_ifname, "<oif?>",
+ sizeof(out_ifname));
+
+ if (json) {
+ json_ifp_out = json_object_new_object();
+ json_object_string_add(json_ifp_out, "source",
+ src_str);
+ json_object_string_add(json_ifp_out, "group",
+ grp_str);
+ json_object_string_add(json_ifp_out,
+ "inboundInterface",
+ in_ifname);
+ json_object_string_add(json_ifp_out,
+ "outboundInterface",
+ out_ifname);
+ json_object_int_add(json_ifp_out, "installed",
+ c_oil->installed);
+
+ json_object_object_add(json_ifp_in, out_ifname,
+ json_ifp_out);
+ } else {
+ flag[0] = '\0';
+ snprintf(flag, sizeof(flag), "(%c%c%c%c%c)",
+ (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_GM)
+ ? 'I'
+ : ' ',
+ (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_PIM)
+ ? 'J'
+ : ' ',
+ (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_VXLAN)
+ ? 'V'
+ : ' ',
+ (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_STAR)
+ ? '*'
+ : ' ',
+ (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_MUTE)
+ ? 'M'
+ : ' ');
+
+ if (first_oif) {
+ first_oif = 0;
+#if PIM_IPV == 4
+ vty_out(vty, "%s%s", out_ifname, flag);
+#else
+ /* OIF found.
+ * Delete the existing row for c_oil,
+ * with no OIF.
+ * Add a new row for c_oil with OIF and
+ * flag.
+ */
+ ttable_del_row(tt, tt->nrows - 1);
+ ttable_add_row(
+ tt, "%d|%pPAs|%pPAs|%s|%s|%s%s",
+ c_oil->installed,
+ oil_origin(c_oil),
+ oil_mcastgrp(c_oil),
+ isRpt ? "y" : "n", in_ifname,
+ out_ifname, flag);
+#endif
+ } else {
+#if PIM_IPV == 4
+ vty_out(vty, ", %s%s", out_ifname,
+ flag);
+#else
+ ttable_add_row(tt,
+ "%c|%c|%c|%c|%c|%s%s",
+ ' ', ' ', ' ', ' ', ' ',
+ out_ifname, flag);
+#endif
+ }
+ }
+ }
+#if PIM_IPV == 4
+ if (!json)
+ vty_out(vty, "\n");
+#endif
+ }
+
+ /* Dump the generated table. */
+ if (!json) {
+#if PIM_IPV == 4
+ vty_out(vty, "\n");
+#else
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+#endif
+ }
+}
+
+/* pim statistics - just adding only bsm related now.
+ * We can continue to add all pim related stats here.
+ */
+void pim_show_statistics(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool uj)
+{
+ json_object *json = NULL;
+ struct interface *ifp;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "bsmRx", pim->bsm_rcvd);
+ json_object_int_add(json, "bsmTx", pim->bsm_sent);
+ json_object_int_add(json, "bsmDropped", pim->bsm_dropped);
+ } else {
+ vty_out(vty, "BSM Statistics :\n");
+ vty_out(vty, "----------------\n");
+ vty_out(vty, "Number of Received BSMs : %" PRIu64 "\n",
+ pim->bsm_rcvd);
+ vty_out(vty, "Number of Forwared BSMs : %" PRIu64 "\n",
+ pim->bsm_sent);
+ vty_out(vty, "Number of Dropped BSMs : %" PRIu64 "\n",
+ pim->bsm_dropped);
+ }
+
+ vty_out(vty, "\n");
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (ifname && strcmp(ifname, ifp->name))
+ continue;
+
+ if (!pim_ifp)
+ continue;
+
+ if (!uj) {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty,
+ "Number of BSMs dropped due to config miss : %u\n",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ vty_out(vty, "Number of unicast BSMs dropped : %u\n",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ vty_out(vty,
+ "Number of BSMs dropped due to invalid scope zone : %u\n",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ } else {
+
+ json_object *json_row = NULL;
+
+ json_row = json_object_new_object();
+
+ json_object_string_add(json_row, "If Name", ifp->name);
+ json_object_int_add(json_row, "bsmDroppedConfig",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ json_object_int_add(
+ json_row, "bsmDroppedUnicast",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ json_object_int_add(json_row,
+ "bsmDroppedInvalidScopeZone",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ json_object_object_add(json, ifp->name, json_row);
+ }
+ vty_out(vty, "\n");
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
+ pim_sgaddr *sg, json_object *json)
+{
+ struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ time_t now;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Iif|Source|Group|State|Uptime|JoinTimer|RSTimer|KATimer|RefCnt");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ char uptime[10];
+ char join_timer[10];
+ char rs_timer[10];
+ char ka_timer[10];
+ char msdp_reg_timer[10];
+ char state_str[PIM_REG_STATE_STR_LEN];
+
+ if (!pim_sgaddr_match(up->sg, *sg))
+ continue;
+
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - up->state_transition);
+ pim_time_timer_to_hhmmss(join_timer, sizeof(join_timer),
+ up->t_join_timer);
+
+ /*
+ * If the upstream is not dummy and it has a J/P timer for the
+ * neighbor display that
+ */
+ if (!up->t_join_timer && up->rpf.source_nexthop.interface) {
+ struct pim_neighbor *nbr;
+
+ nbr = pim_neighbor_find(
+ up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr, false);
+ if (nbr)
+ pim_time_timer_to_hhmmss(join_timer,
+ sizeof(join_timer),
+ nbr->jp_timer);
+ }
+
+ pim_time_timer_to_hhmmss(rs_timer, sizeof(rs_timer),
+ up->t_rs_timer);
+ pim_time_timer_to_hhmmss(ka_timer, sizeof(ka_timer),
+ up->t_ka_timer);
+ pim_time_timer_to_hhmmss(msdp_reg_timer, sizeof(msdp_reg_timer),
+ up->t_msdp_reg_timer);
+
+ pim_upstream_state2brief_str(up->join_state, state_str,
+ sizeof(state_str));
+ if (up->reg_state != PIM_REG_NOINFO) {
+ char tmp_str[PIM_REG_STATE_STR_LEN];
+ char tmp[sizeof(state_str) + 1];
+
+ snprintf(tmp, sizeof(tmp), ",%s",
+ pim_reg_state2brief_str(up->reg_state, tmp_str,
+ sizeof(tmp_str)));
+ strlcat(state_str, tmp, sizeof(state_str));
+ }
+
+ if (json) {
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &up->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_pim_upstream_add(json_row, up);
+ json_object_string_add(
+ json_row, "inboundInterface",
+ up->rpf.source_nexthop.interface
+ ? up->rpf.source_nexthop.interface->name
+ : "Unknown");
+
+ /*
+ * The RPF address we use is slightly different
+ * based upon what we are looking up.
+ * If we have a S, list that unless
+ * we are the FHR, else we just put
+ * the RP as the rpfAddress
+ */
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR ||
+ pim_addr_is_any(up->sg.src)) {
+ struct pim_rpf *rpg;
+
+ rpg = RP(pim, up->sg.grp);
+ json_object_string_addf(json_row, "rpfAddress",
+ "%pPA", &rpg->rpf_addr);
+ } else {
+ json_object_string_add(json_row, "rpfAddress",
+ src_str);
+ }
+
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "state", state_str);
+ json_object_string_add(
+ json_row, "joinState",
+ pim_upstream_state2str(up->join_state));
+ json_object_string_add(
+ json_row, "regState",
+ pim_reg_state2str(up->reg_state, state_str,
+ sizeof(state_str)));
+ json_object_string_add(json_row, "upTime", uptime);
+ json_object_string_add(json_row, "joinTimer",
+ join_timer);
+ json_object_string_add(json_row, "resetTimer",
+ rs_timer);
+ json_object_string_add(json_row, "keepaliveTimer",
+ ka_timer);
+ json_object_string_add(json_row, "msdpRegTimer",
+ msdp_reg_timer);
+ json_object_int_add(json_row, "refCount",
+ up->ref_count);
+ json_object_int_add(json_row, "sptBit", up->sptbit);
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ ttable_add_row(tt,
+ "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s|%d",
+ up->rpf.source_nexthop.interface
+ ? up->rpf.source_nexthop.interface->name
+ : "Unknown",
+ &up->sg.src, &up->sg.grp, state_str, uptime,
+ join_timer, rs_timer, ka_timer, up->ref_count);
+ }
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void pim_show_join_desired_helper(struct pim_instance *pim,
+ struct vty *vty,
+ struct pim_upstream *up,
+ json_object *json, bool uj,
+ struct ttable *tt)
+{
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (uj) {
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src);
+
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str, json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_pim_upstream_add(json_row, up);
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+
+ if (pim_upstream_evaluate_join_desired(pim, up))
+ json_object_boolean_true_add(json_row,
+ "evaluateJoinDesired");
+
+ json_object_object_add(json_group, src_str, json_row);
+
+ } else {
+ ttable_add_row(tt, "%pPAs|%pPAs|%s", &up->sg.src, &up->sg.grp,
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
+ }
+}
+
+void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ json_object *json = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Source|Group|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* scan all interfaces */
+ pim_show_join_desired_helper(pim, vty, up, json, uj, tt);
+ }
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct pim_upstream *up;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt,
+ "Source|Group|RpfIface|RibNextHop|RpfAddress");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ struct pim_rpf *rpf;
+ const char *rpf_ifname;
+
+ rpf = &up->rpf;
+
+ rpf_ifname =
+ rpf->source_nexthop.interface ? rpf->source_nexthop
+ .interface->name
+ : "<ifname?>";
+
+ if (uj) {
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &up->sg.src);
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_pim_upstream_add(json_row, up);
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "rpfInterface",
+ rpf_ifname);
+ json_object_string_addf(
+ json_row, "ribNexthop", "%pPAs",
+ &rpf->source_nexthop.mrib_nexthop_addr);
+ json_object_string_addf(json_row, "rpfAddress", "%pPA",
+ &rpf->rpf_addr);
+ json_object_object_add(json_group, src_str, json_row);
+ } else {
+ ttable_add_row(tt, "%pPAs|%pPAs|%s|%pPA|%pPA",
+ &up->sg.src, &up->sg.grp, rpf_ifname,
+ &rpf->source_nexthop.mrib_nexthop_addr,
+ &rpf->rpf_addr);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void pim_show_join_helper(struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch, json_object *json,
+ time_t now, struct ttable *tt)
+{
+ json_object *json_iface = NULL;
+ json_object *json_row = NULL;
+ json_object *json_grp = NULL;
+ pim_addr ifaddr;
+ char uptime[10];
+ char expire[10];
+ char prune[10];
+
+ ifaddr = pim_ifp->primary_address;
+
+ pim_time_uptime_begin(uptime, sizeof(uptime), now, ch->ifjoin_creation);
+ pim_time_timer_to_mmss(expire, sizeof(expire),
+ ch->t_ifjoin_expiry_timer);
+ pim_time_timer_to_mmss(prune, sizeof(prune),
+ ch->t_ifjoin_prune_pending_timer);
+
+ if (json) {
+ char ch_grp_str[PIM_ADDRSTRLEN];
+
+ json_object_object_get_ex(json, ch->interface->name,
+ &json_iface);
+
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_pim_ifp_add(json_iface, ch->interface);
+ json_object_object_add(json, ch->interface->name,
+ json_iface);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "source", "%pPAs",
+ &ch->sg.src);
+ json_object_string_addf(json_row, "group", "%pPAs",
+ &ch->sg.grp);
+ json_object_string_add(json_row, "upTime", uptime);
+ json_object_string_add(json_row, "expire", expire);
+ json_object_string_add(json_row, "prune", prune);
+ json_object_string_add(
+ json_row, "channelJoinName",
+ pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags));
+ if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))
+ json_object_int_add(json_row, "sgRpt", 1);
+ if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))
+ json_object_int_add(json_row, "protocolPim", 1);
+ if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags))
+ json_object_int_add(json_row, "protocolIgmp", 1);
+ snprintfrr(ch_grp_str, sizeof(ch_grp_str), "%pPAs",
+ &ch->sg.grp);
+ json_object_object_get_ex(json_iface, ch_grp_str, &json_grp);
+ if (!json_grp) {
+ json_grp = json_object_new_object();
+ json_object_object_addf(json_grp, json_row, "%pPAs",
+ &ch->sg.src);
+ json_object_object_addf(json_iface, json_grp, "%pPAs",
+ &ch->sg.grp);
+ } else
+ json_object_object_addf(json_grp, json_row, "%pPAs",
+ &ch->sg.src);
+ } else {
+ ttable_add_row(
+ tt, "%s|%pPAs|%pPAs|%pPAs|%s|%s|%s|%s",
+ ch->interface->name, &ifaddr, &ch->sg.src, &ch->sg.grp,
+ pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags),
+ uptime, expire, prune);
+ }
+}
+
+int pim_show_join_cmd_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, const char *json)
+{
+ pim_sgaddr sg = {};
+ struct vrf *v;
+ struct pim_instance *pim;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
+ return CMD_WARNING;
+ }
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (!pim_addr_is_any(s_or_g)) {
+ if (!pim_addr_is_any(g)) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ pim_show_join(pim, vty, &sg, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_join_vrf_all_cmd_helper(struct vty *vty, const char *json)
+{
+ pim_sgaddr sg = {0};
+ struct vrf *vrf_struct;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf_struct, vrf_name_head, &vrfs_by_name) {
+ if (!json_parent)
+ vty_out(vty, "VRF: %s\n", vrf_struct->name);
+ else
+ json_vrf = json_object_new_object();
+ pim_show_join(vrf_struct->info, vty, &sg, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, vrf_struct->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_WARNING;
+}
+
+void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ json_object *json)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+ time_t now;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|Address|Source|Group|State|Uptime|Expire|Prune");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ if (!pim_sgaddr_match(ch->sg, *sg))
+ continue;
+
+ pim_show_join_helper(pim_ifp, ch, json, now, tt);
+ } /* scan interface channels */
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void pim_show_jp_agg_helper(struct interface *ifp,
+ struct pim_neighbor *neigh,
+ struct pim_upstream *up, int is_join,
+ struct ttable *tt)
+{
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%pPAs|%s", ifp->name,
+ &neigh->source_addr, &up->sg.src, &up->sg.grp,
+ is_join ? "J" : "P");
+}
+
+int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty)
+{
+ struct vrf *v;
+ struct pim_instance *pim;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
+ return CMD_WARNING;
+ }
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_jp_agg_list(pim, vty);
+
+ return CMD_SUCCESS;
+}
+
+void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct listnode *n_node;
+ struct pim_neighbor *neigh;
+ struct listnode *jag_node;
+ struct pim_jp_agg_group *jag;
+ struct listnode *js_node;
+ struct pim_jp_sources *js;
+ struct ttable *tt;
+ char *table;
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|RPF Nbr|Source|Group|State");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, n_node,
+ neigh)) {
+ for (ALL_LIST_ELEMENTS_RO(neigh->upstream_jp_agg,
+ jag_node, jag)) {
+ for (ALL_LIST_ELEMENTS_RO(jag->sources, js_node,
+ js)) {
+ pim_show_jp_agg_helper(ifp, neigh,
+ js->up,
+ js->is_join, tt);
+ }
+ }
+ }
+ }
+
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+}
+
+int pim_show_membership_cmd_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_show_membership(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+static void pim_show_membership_helper(struct vty *vty,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch,
+ struct json_object *json)
+{
+ json_object *json_iface = NULL;
+ json_object *json_row = NULL;
+
+ json_object_object_get_ex(json, ch->interface->name, &json_iface);
+ if (!json_iface) {
+ json_iface = json_object_new_object();
+ json_object_pim_ifp_add(json_iface, ch->interface);
+ json_object_object_add(json, ch->interface->name, json_iface);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "source", "%pPAs", &ch->sg.src);
+ json_object_string_addf(json_row, "group", "%pPAs", &ch->sg.grp);
+ json_object_string_add(json_row, "localMembership",
+ ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO
+ ? "NOINFO"
+ : "INCLUDE");
+ json_object_object_addf(json_iface, json_row, "%pPAs", &ch->sg.grp);
+}
+
+void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+ enum json_type type;
+ json_object *json = NULL;
+ json_object *json_tmp = NULL;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ json = json_object_new_object();
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_show_membership_helper(vty, pim_ifp, ch, json);
+ } /* scan interface channels */
+ }
+
+ if (uj) {
+ vty_json(vty, json);
+ } else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Address|Source|Group|Membership");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ /*
+ * Example of the json data we are traversing
+ *
+ * {
+ * "swp3":{
+ * "name":"swp3",
+ * "state":"up",
+ * "address":"10.1.20.1",
+ * "index":5,
+ * "flagMulticast":true,
+ * "flagBroadcast":true,
+ * "lanDelayEnabled":true,
+ * "226.10.10.10":{
+ * "source":"*",
+ * "group":"226.10.10.10",
+ * "localMembership":"INCLUDE"
+ * }
+ * }
+ * }
+ */
+
+ /* foreach interface */
+ json_object_object_foreach(json, key, val)
+ {
+
+ /* Find all of the keys where the val is an object. In
+ * the example
+ * above the only one is 226.10.10.10
+ */
+ json_object_object_foreach(val, if_field_key,
+ if_field_val)
+ {
+ type = json_object_get_type(if_field_val);
+
+ if (type == json_type_object) {
+ const char *address, *source,
+ *localMembership;
+
+ json_object_object_get_ex(
+ val, "address", &json_tmp);
+ address = json_object_get_string(
+ json_tmp);
+
+ json_object_object_get_ex(if_field_val,
+ "source",
+ &json_tmp);
+ source = json_object_get_string(
+ json_tmp);
+
+ json_object_object_get_ex(
+ if_field_val, "localMembership",
+ &json_tmp);
+ localMembership =
+ json_object_get_string(
+ json_tmp);
+
+ ttable_add_row(tt, "%s|%s|%s|%s|%s",
+ key, address, source,
+ if_field_key,
+ localMembership);
+ }
+ }
+ }
+ json_object_free(json);
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void pim_show_channel_helper(struct pim_instance *pim,
+ struct pim_interface *pim_ifp,
+ struct pim_ifchannel *ch, json_object *json,
+ bool uj, struct ttable *tt)
+{
+ struct pim_upstream *up = ch->upstream;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (uj) {
+ char grp_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp);
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str, json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_pim_upstream_add(json_row, up);
+ json_object_string_add(json_row, "interface",
+ ch->interface->name);
+ json_object_string_addf(json_row, "source", "%pPAs",
+ &up->sg.src);
+ json_object_string_addf(json_row, "group", "%pPAs",
+ &up->sg.grp);
+
+ if (pim_macro_ch_lost_assert(ch))
+ json_object_boolean_true_add(json_row, "lostAssert");
+
+ if (pim_macro_chisin_joins(ch))
+ json_object_boolean_true_add(json_row, "joins");
+
+ if (pim_macro_chisin_pim_include(ch))
+ json_object_boolean_true_add(json_row, "pimInclude");
+
+ if (pim_upstream_evaluate_join_desired(pim, up))
+ json_object_boolean_true_add(json_row,
+ "evaluateJoinDesired");
+
+ json_object_object_addf(json_group, json_row, "%pPAs",
+ &up->sg.src);
+
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%pPAs|%s|%s|%s|%s|%s",
+ ch->interface->name, &up->sg.src, &up->sg.grp,
+ pim_macro_ch_lost_assert(ch) ? "yes" : "no",
+ pim_macro_chisin_joins(ch) ? "yes" : "no",
+ pim_macro_chisin_pim_include(ch) ? "yes" : "no",
+ PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags)
+ ? "yes"
+ : "no",
+ pim_upstream_evaluate_join_desired(pim, up)
+ ? "yes"
+ : "no");
+ }
+}
+
+void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ json_object *json = NULL;
+ char *table = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+ else {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|Source|Group|LostAssert|Joins|PimInclude|JoinDesired|EvalJD");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ /* scan per-interface (S,G) state */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ /* scan all interfaces */
+ pim_show_channel_helper(pim, pim_ifp, ch, json, uj, tt);
+ }
+ }
+
+ if (uj)
+ vty_json(vty, json);
+ else {
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+int pim_show_channel_cmd_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_show_channel(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_interface_cmd_helper(const char *vrf, struct vty *vty, bool uj,
+ bool mlag, const char *interface)
+{
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ if (uj)
+ json_parent = json_object_new_object();
+
+ if (interface)
+ pim_show_interfaces_single(v->info, vty, interface, mlag,
+ json_parent);
+ else
+ pim_show_interfaces(v->info, vty, mlag, json_parent);
+
+ if (uj)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_interface_vrf_all_cmd_helper(struct vty *vty, bool uj, bool mlag,
+ const char *interface)
+{
+ struct vrf *v;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (uj)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (v, vrf_name_head, &vrfs_by_name) {
+ if (!uj)
+ vty_out(vty, "VRF: %s\n", v->name);
+ else
+ json_vrf = json_object_new_object();
+
+ if (interface)
+ pim_show_interfaces_single(v->info, vty, interface,
+ mlag, json_vrf);
+ else
+ pim_show_interfaces(v->info, vty, mlag, json_vrf);
+
+ if (uj)
+ json_object_object_add(json_parent, v->name, json_vrf);
+ }
+ if (uj)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag,
+ json_object *json)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_upstream *up;
+ int fhr = 0;
+ int pim_nbrs = 0;
+ int pim_ifchannels = 0;
+ bool uj = true;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json_row = NULL;
+ json_object *json_tmp;
+
+ if (!json) {
+ uj = false;
+ json = json_object_new_object();
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (mlag == true && pim_ifp->activeactive == false)
+ continue;
+
+ pim_nbrs = pim_ifp->pim_neighbor_list->count;
+ pim_ifchannels = pim_if_ifchannel_count(pim_ifp);
+ fhr = 0;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up)
+ if (ifp == up->rpf.source_nexthop.interface)
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR)
+ fhr++;
+
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+ json_object_int_add(json_row, "pimNeighbors", pim_nbrs);
+ json_object_int_add(json_row, "pimIfChannels", pim_ifchannels);
+ json_object_int_add(json_row, "firstHopRouterCount", fhr);
+ json_object_string_addf(json_row, "pimDesignatedRouter",
+ "%pPAs", &pim_ifp->pim_dr_addr);
+
+ if (!pim_addr_cmp(pim_ifp->pim_dr_addr,
+ pim_ifp->primary_address))
+ json_object_boolean_true_add(
+ json_row, "pimDesignatedRouterLocal");
+
+ json_object_object_add(json, ifp->name, json_row);
+ }
+
+ if (!uj) {
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|State|Address|PIM Nbrs|PIM DR|FHR|IfChannels");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ json_object_object_foreach(json, key, val)
+ {
+ const char *state, *address, *pimdr;
+ int neighbors, firsthpr, pimifchnl;
+
+ json_object_object_get_ex(val, "state", &json_tmp);
+ state = json_object_get_string(json_tmp);
+
+ json_object_object_get_ex(val, "address", &json_tmp);
+ address = json_object_get_string(json_tmp);
+
+ json_object_object_get_ex(val, "pimNeighbors",
+ &json_tmp);
+ neighbors = json_object_get_int(json_tmp);
+
+ if (json_object_object_get_ex(
+ val, "pimDesignatedRouterLocal",
+ &json_tmp)) {
+ pimdr = "local";
+ } else {
+ json_object_object_get_ex(
+ val, "pimDesignatedRouter", &json_tmp);
+ pimdr = json_object_get_string(json_tmp);
+ }
+
+ json_object_object_get_ex(val, "firstHopRouter",
+ &json_tmp);
+ firsthpr = json_object_get_int(json_tmp);
+
+ json_object_object_get_ex(val, "pimIfChannels",
+ &json_tmp);
+ pimifchnl = json_object_get_int(json_tmp);
+
+ ttable_add_row(tt, "%s|%s|%s|%d|%s|%d|%d", key, state,
+ address, neighbors, pimdr, firsthpr,
+ pimifchnl);
+ }
+ json_object_free(json);
+
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+
+ ttable_del(tt);
+ }
+}
+
+void pim_show_interfaces_single(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool mlag,
+ json_object *json)
+{
+ pim_addr ifaddr;
+ struct interface *ifp;
+ struct listnode *neighnode;
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+ struct pim_upstream *up;
+ time_t now;
+ char dr_str[PIM_ADDRSTRLEN];
+ char dr_uptime[10];
+ char expire[10];
+ char grp_str[PIM_ADDRSTRLEN];
+ char hello_period[10];
+ char hello_timer[10];
+ char neigh_src_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+ char stat_uptime[10];
+ char uptime[10];
+ int found_ifname = 0;
+ int print_header;
+ json_object *json_row = NULL;
+ json_object *json_pim_neighbor = NULL;
+ json_object *json_pim_neighbors = NULL;
+ json_object *json_group = NULL;
+ json_object *json_group_source = NULL;
+ json_object *json_fhr_sources = NULL;
+ struct pim_secondary_addr *sec_addr;
+ struct listnode *sec_node;
+
+ now = pim_time_monotonic_sec();
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (mlag == true && pim_ifp->activeactive == false)
+ continue;
+
+ if (strcmp(ifname, "detail") && strcmp(ifname, ifp->name))
+ continue;
+
+ found_ifname = 1;
+ ifaddr = pim_ifp->primary_address;
+ snprintfrr(dr_str, sizeof(dr_str), "%pPAs",
+ &pim_ifp->pim_dr_addr);
+ pim_time_uptime_begin(dr_uptime, sizeof(dr_uptime), now,
+ pim_ifp->pim_dr_election_last);
+ pim_time_timer_to_hhmmss(hello_timer, sizeof(hello_timer),
+ pim_ifp->t_pim_hello_timer);
+ pim_time_mmss(hello_period, sizeof(hello_period),
+ pim_ifp->pim_hello_period);
+ pim_time_uptime(stat_uptime, sizeof(stat_uptime),
+ now - pim_ifp->pim_ifstat_start);
+
+ if (json) {
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+
+ if (!pim_addr_is_any(pim_ifp->update_source)) {
+ json_object_string_addf(
+ json_row, "useSource", "%pPAs",
+ &pim_ifp->update_source);
+ }
+ if (pim_ifp->sec_addr_list) {
+ json_object *sec_list = NULL;
+
+ sec_list = json_object_new_array();
+ for (ALL_LIST_ELEMENTS_RO(
+ pim_ifp->sec_addr_list, sec_node,
+ sec_addr)) {
+ json_object_array_add(
+ sec_list,
+ json_object_new_stringf(
+ "%pFXh",
+ &sec_addr->addr));
+ }
+ json_object_object_add(json_row,
+ "secondaryAddressList",
+ sec_list);
+ }
+
+ if (pim_ifp->pim_passive_enable)
+ json_object_boolean_true_add(json_row,
+ "passive");
+
+ /* PIM neighbors */
+ if (pim_ifp->pim_neighbor_list->count) {
+ json_pim_neighbors = json_object_new_object();
+
+ for (ALL_LIST_ELEMENTS_RO(
+ pim_ifp->pim_neighbor_list,
+ neighnode, neigh)) {
+ json_pim_neighbor =
+ json_object_new_object();
+ snprintfrr(neigh_src_str,
+ sizeof(neigh_src_str),
+ "%pPAs",
+ &neigh->source_addr);
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - neigh->creation);
+ pim_time_timer_to_hhmmss(
+ expire, sizeof(expire),
+ neigh->t_expire_timer);
+
+ json_object_string_add(
+ json_pim_neighbor, "address",
+ neigh_src_str);
+ json_object_string_add(
+ json_pim_neighbor, "upTime",
+ uptime);
+ json_object_string_add(
+ json_pim_neighbor, "holdtime",
+ expire);
+
+ json_object_object_add(
+ json_pim_neighbors,
+ neigh_src_str,
+ json_pim_neighbor);
+ }
+
+ json_object_object_add(json_row, "neighbors",
+ json_pim_neighbors);
+ }
+
+ json_object_string_add(json_row, "drAddress", dr_str);
+ json_object_int_add(json_row, "drPriority",
+ pim_ifp->pim_dr_priority);
+ json_object_string_add(json_row, "drUptime", dr_uptime);
+ json_object_int_add(json_row, "drElections",
+ pim_ifp->pim_dr_election_count);
+ json_object_int_add(json_row, "drChanges",
+ pim_ifp->pim_dr_election_changes);
+
+ /* FHR */
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (ifp != up->rpf.source_nexthop.interface)
+ continue;
+
+ if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR))
+ continue;
+
+ if (!json_fhr_sources)
+ json_fhr_sources =
+ json_object_new_object();
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ &up->sg.grp);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ &up->sg.src);
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - up->state_transition);
+
+ /*
+ * Does this group live in json_fhr_sources?
+ * If not create it.
+ */
+ json_object_object_get_ex(json_fhr_sources,
+ grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json_fhr_sources,
+ grp_str,
+ json_group);
+ }
+
+ json_group_source = json_object_new_object();
+ json_object_string_add(json_group_source,
+ "source", src_str);
+ json_object_string_add(json_group_source,
+ "group", grp_str);
+ json_object_string_add(json_group_source,
+ "upTime", uptime);
+ json_object_object_add(json_group, src_str,
+ json_group_source);
+ }
+
+ if (json_fhr_sources) {
+ json_object_object_add(json_row,
+ "firstHopRouter",
+ json_fhr_sources);
+ }
+
+ json_object_int_add(json_row, "helloPeriod",
+ pim_ifp->pim_hello_period);
+ json_object_int_add(json_row, "holdTime",
+ PIM_IF_DEFAULT_HOLDTIME(pim_ifp));
+ json_object_string_add(json_row, "helloTimer",
+ hello_timer);
+ json_object_string_add(json_row, "helloStatStart",
+ stat_uptime);
+ json_object_int_add(json_row, "helloReceived",
+ pim_ifp->pim_ifstat_hello_recv);
+ json_object_int_add(json_row, "helloReceivedFailed",
+ pim_ifp->pim_ifstat_hello_recvfail);
+ json_object_int_add(json_row, "helloSend",
+ pim_ifp->pim_ifstat_hello_sent);
+ json_object_int_add(json_row, "hellosendFailed",
+ pim_ifp->pim_ifstat_hello_sendfail);
+ json_object_int_add(json_row, "helloGenerationId",
+ pim_ifp->pim_generation_id);
+
+ json_object_int_add(
+ json_row, "effectivePropagationDelay",
+ pim_if_effective_propagation_delay_msec(ifp));
+ json_object_int_add(
+ json_row, "effectiveOverrideInterval",
+ pim_if_effective_override_interval_msec(ifp));
+ json_object_int_add(
+ json_row, "joinPruneOverrideInterval",
+ pim_if_jp_override_interval_msec(ifp));
+
+ json_object_int_add(
+ json_row, "propagationDelay",
+ pim_ifp->pim_propagation_delay_msec);
+ json_object_int_add(
+ json_row, "propagationDelayHighest",
+ pim_ifp->pim_neighbors_highest_propagation_delay_msec);
+ json_object_int_add(
+ json_row, "overrideInterval",
+ pim_ifp->pim_override_interval_msec);
+ json_object_int_add(
+ json_row, "overrideIntervalHighest",
+ pim_ifp->pim_neighbors_highest_override_interval_msec);
+ if (pim_ifp->bsm_enable)
+ json_object_boolean_true_add(json_row,
+ "bsmEnabled");
+ if (pim_ifp->ucast_bsm_accept)
+ json_object_boolean_true_add(json_row,
+ "ucastBsmEnabled");
+ json_object_object_add(json, ifp->name, json_row);
+
+ } else {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "State : %s\n",
+ if_is_up(ifp) ? "up" : "down");
+ if (!pim_addr_is_any(pim_ifp->update_source)) {
+ vty_out(vty, "Use Source : %pPAs\n",
+ &pim_ifp->update_source);
+ }
+ if (pim_ifp->sec_addr_list) {
+ vty_out(vty, "Address : %pPAs (primary)\n",
+ &ifaddr);
+ for (ALL_LIST_ELEMENTS_RO(
+ pim_ifp->sec_addr_list, sec_node,
+ sec_addr))
+ vty_out(vty, " %pFX\n",
+ &sec_addr->addr);
+ } else {
+ vty_out(vty, "Address : %pPAs\n", &ifaddr);
+ }
+
+ if (pim_ifp->pim_passive_enable)
+ vty_out(vty, "Passive : %s\n",
+ (pim_ifp->pim_passive_enable) ? "yes"
+ : "no");
+
+ vty_out(vty, "\n");
+
+ /* PIM neighbors */
+ print_header = 1;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list,
+ neighnode, neigh)) {
+
+ if (print_header) {
+ vty_out(vty, "PIM Neighbors\n");
+ vty_out(vty, "-------------\n");
+ print_header = 0;
+ }
+
+ snprintfrr(neigh_src_str, sizeof(neigh_src_str),
+ "%pPAs", &neigh->source_addr);
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - neigh->creation);
+ pim_time_timer_to_hhmmss(expire, sizeof(expire),
+ neigh->t_expire_timer);
+ vty_out(vty,
+ "%-15s : up for %s, holdtime expires in %s\n",
+ neigh_src_str, uptime, expire);
+ }
+
+ if (!print_header) {
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+ }
+
+ vty_out(vty, "Designated Router\n");
+ vty_out(vty, "-----------------\n");
+ vty_out(vty, "Address : %s\n", dr_str);
+ vty_out(vty, "Priority : %u(%d)\n",
+ pim_ifp->pim_dr_priority,
+ pim_ifp->pim_dr_num_nondrpri_neighbors);
+ vty_out(vty, "Uptime : %s\n", dr_uptime);
+ vty_out(vty, "Elections : %d\n",
+ pim_ifp->pim_dr_election_count);
+ vty_out(vty, "Changes : %d\n",
+ pim_ifp->pim_dr_election_changes);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ /* FHR */
+ print_header = 1;
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!up->rpf.source_nexthop.interface)
+ continue;
+
+ if (strcmp(ifp->name,
+ up->rpf.source_nexthop
+ .interface->name) != 0)
+ continue;
+
+ if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR))
+ continue;
+
+ if (print_header) {
+ vty_out(vty,
+ "FHR - First Hop Router\n");
+ vty_out(vty,
+ "----------------------\n");
+ print_header = 0;
+ }
+
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - up->state_transition);
+ vty_out(vty,
+ "%pPAs : %pPAs is a source, uptime is %s\n",
+ &up->sg.grp, &up->sg.src, uptime);
+ }
+
+ if (!print_header) {
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+ }
+
+ vty_out(vty, "Hellos\n");
+ vty_out(vty, "------\n");
+ vty_out(vty, "Period : %d\n",
+ pim_ifp->pim_hello_period);
+ vty_out(vty, "HoldTime : %d\n",
+ PIM_IF_DEFAULT_HOLDTIME(pim_ifp));
+ vty_out(vty, "Timer : %s\n", hello_timer);
+ vty_out(vty, "StatStart : %s\n", stat_uptime);
+ vty_out(vty, "Receive : %d\n",
+ pim_ifp->pim_ifstat_hello_recv);
+ vty_out(vty, "Receive Failed : %d\n",
+ pim_ifp->pim_ifstat_hello_recvfail);
+ vty_out(vty, "Send : %d\n",
+ pim_ifp->pim_ifstat_hello_sent);
+ vty_out(vty, "Send Failed : %d\n",
+ pim_ifp->pim_ifstat_hello_sendfail);
+ vty_out(vty, "Generation ID : %08x\n",
+ pim_ifp->pim_generation_id);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ pim_print_ifp_flags(vty, ifp);
+
+ vty_out(vty, "Join Prune Interval\n");
+ vty_out(vty, "-------------------\n");
+ vty_out(vty, "LAN Delay : %s\n",
+ pim_if_lan_delay_enabled(ifp) ? "yes" : "no");
+ vty_out(vty, "Effective Propagation Delay : %d msec\n",
+ pim_if_effective_propagation_delay_msec(ifp));
+ vty_out(vty, "Effective Override Interval : %d msec\n",
+ pim_if_effective_override_interval_msec(ifp));
+ vty_out(vty, "Join Prune Override Interval : %d msec\n",
+ pim_if_jp_override_interval_msec(ifp));
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ vty_out(vty, "LAN Prune Delay\n");
+ vty_out(vty, "---------------\n");
+ vty_out(vty, "Propagation Delay : %d msec\n",
+ pim_ifp->pim_propagation_delay_msec);
+ vty_out(vty, "Propagation Delay (Highest) : %d msec\n",
+ pim_ifp->pim_neighbors_highest_propagation_delay_msec);
+ vty_out(vty, "Override Interval : %d msec\n",
+ pim_ifp->pim_override_interval_msec);
+ vty_out(vty, "Override Interval (Highest) : %d msec\n",
+ pim_ifp->pim_neighbors_highest_override_interval_msec);
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+
+ vty_out(vty, "BSM Status\n");
+ vty_out(vty, "----------\n");
+ vty_out(vty, "Bsm Enabled : %s\n",
+ pim_ifp->bsm_enable ? "yes" : "no");
+ vty_out(vty, "Unicast Bsm Enabled : %s\n",
+ pim_ifp->ucast_bsm_accept ? "yes" : "no");
+ vty_out(vty, "\n");
+ vty_out(vty, "\n");
+ }
+ }
+
+ if (!found_ifname && !json)
+ vty_out(vty, "%% No such interface\n");
+}
+
+void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct pim_ssm *ssm = pim->ssm_info;
+ const char *range_str =
+ ssm->plist_name ? ssm->plist_name : PIM_SSM_STANDARD_RANGE;
+
+ if (uj) {
+ json_object *json;
+
+ json = json_object_new_object();
+ json_object_string_add(json, "ssmGroups", range_str);
+ vty_json(vty, json);
+ } else
+ vty_out(vty, "SSM group range : %s\n", range_str);
+}
+
+struct vty_pnc_cache_walk_data {
+ struct vty *vty;
+ struct pim_instance *pim;
+};
+
+struct json_pnc_cache_walk_data {
+ json_object *json_obj;
+ struct pim_instance *pim;
+};
+
+static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
+{
+ struct pim_nexthop_cache *pnc = bucket->data;
+ struct vty_pnc_cache_walk_data *cwd = arg;
+ struct vty *vty = cwd->vty;
+ struct pim_instance *pim = cwd->pim;
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Address|Interface|Nexthop");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+
+#if PIM_IPV == 4
+ ttable_add_row(tt, "%pPA|%s|%pI4", &pnc->rpf.rpf_addr,
+ ifp ? ifp->name : "NULL", &nh_node->gate.ipv4);
+#else
+ ttable_add_row(tt, "%pPA|%s|%pI6", &pnc->rpf.rpf_addr,
+ ifp ? ifp->name : "NULL", &nh_node->gate.ipv6);
+#endif
+ }
+ /* Dump the generated table. */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+
+ return CMD_SUCCESS;
+}
+
+static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
+ void *arg)
+{
+ struct pim_nexthop_cache *pnc = backet->data;
+ struct json_pnc_cache_walk_data *cwd = arg;
+ struct pim_instance *pim = cwd->pim;
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ char addr_str[PIM_ADDRSTRLEN];
+ json_object *json_row = NULL;
+ json_object *json_ifp = NULL;
+ json_object *json_arr = NULL;
+ struct pim_interface *pim_ifp = NULL;
+ bool pim_enable = false;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ snprintfrr(addr_str, sizeof(addr_str), "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_object_object_get_ex(cwd->json_obj, addr_str, &json_row);
+ if (!json_row) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "address", "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_object_object_addf(cwd->json_obj, json_row, "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_arr = json_object_new_array();
+ json_object_object_add(json_row, "nexthops", json_arr);
+ }
+ json_ifp = json_object_new_object();
+ json_object_string_add(json_ifp, "interface",
+ ifp ? ifp->name : "NULL");
+
+ if (ifp)
+ pim_ifp = ifp->info;
+
+ if (pim_ifp && pim_ifp->pim_enable)
+ pim_enable = true;
+
+ json_object_boolean_add(json_ifp, "pimEnabled", pim_enable);
+#if PIM_IPV == 4
+ json_object_string_addf(json_ifp, "nexthop", "%pI4",
+ &nh_node->gate.ipv4);
+#else
+ json_object_string_addf(json_ifp, "nexthop", "%pI6",
+ &nh_node->gate.ipv6);
+#endif
+ json_object_array_add(json_arr, json_ifp);
+ }
+ return CMD_SUCCESS;
+}
+
+int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
+ pim_addr source, pim_addr group)
+{
+ int result = 0;
+ pim_addr vif_source;
+ struct prefix grp;
+ struct pim_nexthop nexthop;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+#if PIM_IPV == 4
+ if (pim_is_group_224_4(source)) {
+ vty_out(vty,
+ "Invalid argument. Expected Valid Source Address.\n");
+ return CMD_WARNING;
+ }
+
+ if (!pim_is_group_224_4(group)) {
+ vty_out(vty,
+ "Invalid argument. Expected Valid Multicast Group Address.\n");
+ return CMD_WARNING;
+ }
+#endif
+
+ if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))
+ return CMD_SUCCESS;
+
+ pim_addr_to_prefix(&grp, group);
+ memset(&nexthop, 0, sizeof(nexthop));
+
+ result =
+ pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0);
+
+ if (!result) {
+ vty_out(vty,
+ "Nexthop Lookup failed, no usable routes returned.\n");
+ return CMD_SUCCESS;
+ }
+
+ vty_out(vty, "Group %pFXh --- Nexthop %pPAs Interface %s\n", &grp,
+ &nexthop.mrib_nexthop_addr, nexthop.interface->name);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_show_nexthop(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct vty_pnc_cache_walk_data cwd;
+ struct json_pnc_cache_walk_data jcwd;
+
+ cwd.vty = vty;
+ cwd.pim = pim;
+ jcwd.pim = pim;
+
+ if (uj) {
+ jcwd.json_obj = json_object_new_object();
+ } else {
+ vty_out(vty, "Number of registered addresses: %lu\n",
+ pim->rpf_hash->count);
+ }
+
+ if (uj) {
+ hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb,
+ &jcwd);
+ vty_json(vty, jcwd.json_obj);
+ } else
+ hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
+}
+
+int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
+ const char *json, const char *interface)
+{
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ if (interface)
+ pim_show_neighbors_single(v->info, vty, interface, json_parent);
+ else
+ pim_show_neighbors(v->info, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_neighbors_vrf_all_cmd_helper(struct vty *vty, const char *json,
+ const char *interface)
+{
+ struct vrf *v;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+ RB_FOREACH (v, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", v->name);
+ else
+ json_vrf = json_object_new_object();
+
+ if (interface)
+ pim_show_neighbors_single(v->info, vty, interface,
+ json_vrf);
+ else
+ pim_show_neighbors(v->info, vty, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, v->name, json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty,
+ const char *neighbor, json_object *json)
+{
+ struct listnode *neighnode;
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+ time_t now;
+ int found_neighbor = 0;
+ int option_address_list;
+ int option_dr_priority;
+ int option_generation_id;
+ int option_holdtime;
+ int option_lan_prune_delay;
+ int option_t_bit;
+ char uptime[10];
+ char expire[10];
+ char neigh_src_str[PIM_ADDRSTRLEN];
+
+ json_object *json_ifp = NULL;
+ json_object *json_row = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (pim_ifp->pim_sock_fd < 0)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode,
+ neigh)) {
+ snprintfrr(neigh_src_str, sizeof(neigh_src_str),
+ "%pPAs", &neigh->source_addr);
+
+ /*
+ * The user can specify either the interface name or the
+ * PIM neighbor IP.
+ * If this pim_ifp matches neither then skip.
+ */
+ if (strcmp(neighbor, "detail") &&
+ strcmp(neighbor, ifp->name) &&
+ strcmp(neighbor, neigh_src_str))
+ continue;
+
+ found_neighbor = 1;
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - neigh->creation);
+ pim_time_timer_to_hhmmss(expire, sizeof(expire),
+ neigh->t_expire_timer);
+
+ option_address_list = 0;
+ option_dr_priority = 0;
+ option_generation_id = 0;
+ option_holdtime = 0;
+ option_lan_prune_delay = 0;
+ option_t_bit = 0;
+
+ if (PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_ADDRESS_LIST))
+ option_address_list = 1;
+
+ if (PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY))
+ option_dr_priority = 1;
+
+ if (PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_GENERATION_ID))
+ option_generation_id = 1;
+
+ if (PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_HOLDTIME))
+ option_holdtime = 1;
+
+ if (PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY))
+ option_lan_prune_delay = 1;
+
+ if (PIM_OPTION_IS_SET(
+ neigh->hello_options,
+ PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION))
+ option_t_bit = 1;
+
+ if (json) {
+
+ /* Does this ifp live in json? If not create it
+ */
+ json_object_object_get_ex(json, ifp->name,
+ &json_ifp);
+
+ if (!json_ifp) {
+ json_ifp = json_object_new_object();
+ json_object_pim_ifp_add(json_ifp, ifp);
+ json_object_object_add(json, ifp->name,
+ json_ifp);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "interface",
+ ifp->name);
+ json_object_string_add(json_row, "address",
+ neigh_src_str);
+ json_object_string_add(json_row, "upTime",
+ uptime);
+ json_object_string_add(json_row, "holdtime",
+ expire);
+ json_object_int_add(json_row, "drPriority",
+ neigh->dr_priority);
+ json_object_int_add(json_row, "generationId",
+ neigh->generation_id);
+
+ if (option_address_list)
+ json_object_boolean_true_add(
+ json_row,
+ "helloOptionAddressList");
+
+ if (option_dr_priority)
+ json_object_boolean_true_add(
+ json_row,
+ "helloOptionDrPriority");
+
+ if (option_generation_id)
+ json_object_boolean_true_add(
+ json_row,
+ "helloOptionGenerationId");
+
+ if (option_holdtime)
+ json_object_boolean_true_add(
+ json_row,
+ "helloOptionHoldtime");
+
+ if (option_lan_prune_delay)
+ json_object_boolean_true_add(
+ json_row,
+ "helloOptionLanPruneDelay");
+
+ if (option_t_bit)
+ json_object_boolean_true_add(
+ json_row, "helloOptionTBit");
+
+ json_object_object_add(json_ifp, neigh_src_str,
+ json_row);
+
+ } else {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "Neighbor : %s\n", neigh_src_str);
+ vty_out(vty,
+ " Uptime : %s\n",
+ uptime);
+ vty_out(vty,
+ " Holdtime : %s\n",
+ expire);
+ vty_out(vty,
+ " DR Priority : %d\n",
+ neigh->dr_priority);
+ vty_out(vty,
+ " Generation ID : %08x\n",
+ neigh->generation_id);
+ vty_out(vty,
+ " Override Interval (msec) : %d\n",
+ neigh->override_interval_msec);
+ vty_out(vty,
+ " Propagation Delay (msec) : %d\n",
+ neigh->propagation_delay_msec);
+ vty_out(vty,
+ " Hello Option - Address List : %s\n",
+ option_address_list ? "yes" : "no");
+ vty_out(vty,
+ " Hello Option - DR Priority : %s\n",
+ option_dr_priority ? "yes" : "no");
+ vty_out(vty,
+ " Hello Option - Generation ID : %s\n",
+ option_generation_id ? "yes" : "no");
+ vty_out(vty,
+ " Hello Option - Holdtime : %s\n",
+ option_holdtime ? "yes" : "no");
+ vty_out(vty,
+ " Hello Option - LAN Prune Delay : %s\n",
+ option_lan_prune_delay ? "yes" : "no");
+ vty_out(vty,
+ " Hello Option - T-bit : %s\n",
+ option_t_bit ? "yes" : "no");
+ bfd_sess_show(vty, json_ifp,
+ neigh->bfd_session);
+ vty_out(vty, "\n");
+ }
+ }
+ }
+
+ if (!found_neighbor && !json)
+ vty_out(vty, "%% No such interface or neighbor\n");
+}
+
+void pim_show_neighbors(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct listnode *neighnode;
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ time_t now;
+ char uptime[10];
+ char expire[10];
+ char neigh_src_str[PIM_ADDRSTRLEN];
+ json_object *json_ifp_rows = NULL;
+ json_object *json_row = NULL;
+
+ now = pim_time_monotonic_sec();
+
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Interface|Neighbor|Uptime|Holdtime|DR Pri");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (pim_ifp->pim_sock_fd < 0)
+ continue;
+
+ if (json)
+ json_ifp_rows = json_object_new_object();
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode,
+ neigh)) {
+ snprintfrr(neigh_src_str, sizeof(neigh_src_str),
+ "%pPAs", &neigh->source_addr);
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - neigh->creation);
+ pim_time_timer_to_hhmmss(expire, sizeof(expire),
+ neigh->t_expire_timer);
+
+ if (json) {
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "interface",
+ ifp->name);
+ json_object_string_add(json_row, "neighbor",
+ neigh_src_str);
+ json_object_string_add(json_row, "upTime",
+ uptime);
+ json_object_string_add(json_row, "holdTime",
+ expire);
+ json_object_int_add(json_row, "holdTimeMax",
+ neigh->holdtime);
+ json_object_int_add(json_row, "drPriority",
+ neigh->dr_priority);
+ json_object_object_add(json_ifp_rows,
+ neigh_src_str, json_row);
+
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%s|%s|%d",
+ ifp->name, &neigh->source_addr,
+ uptime, expire,
+ neigh->dr_priority);
+ }
+ }
+
+ if (json) {
+ json_object_object_add(json, ifp->name, json_ifp_rows);
+ json_ifp_rows = NULL;
+ }
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+int gm_process_query_max_response_time_cmd(struct vty *vty,
+ const char *qmrt_str)
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_MODIFY,
+ qmrt_str);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int gm_process_no_query_max_response_time_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_DESTROY,
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int gm_process_last_member_query_count_cmd(struct vty *vty,
+ const char *lmqc_str)
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_MODIFY,
+ lmqc_str);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int gm_process_no_last_member_query_count_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_DESTROY,
+ NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int gm_process_last_member_query_interval_cmd(struct vty *vty,
+ const char *lmqi_str)
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./last-member-query-interval", NB_OP_MODIFY,
+ lmqi_str);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int gm_process_no_last_member_query_interval_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./last-member-query-interval",
+ NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ssmpingd_cmd(struct vty *vty, enum nb_operation operation,
+ const char *src_str)
+{
+ const char *vrfname;
+ char ssmpingd_ip_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(ssmpingd_ip_xpath, sizeof(ssmpingd_ip_xpath),
+ FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL);
+ strlcat(ssmpingd_ip_xpath, "/ssm-pingd-source-ip",
+ sizeof(ssmpingd_ip_xpath));
+
+ nb_cli_enqueue_change(vty, ssmpingd_ip_xpath, operation, src_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_bsm_cmd(struct vty *vty)
+{
+ const struct lyd_node *gm_enable_dnode;
+
+ gm_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ if (!gm_enable_dnode)
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ else {
+ if (!yang_dnode_get_bool(gm_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./bsm", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_bsm_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./bsm", NB_OP_MODIFY, "false");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_unicast_bsm_cmd(struct vty *vty)
+{
+ const struct lyd_node *gm_enable_dnode;
+
+ gm_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+ if (!gm_enable_dnode)
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ else {
+ if (!yang_dnode_get_bool(gm_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./unicast-bsm", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_unicast_bsm_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./unicast-bsm", NB_OP_MODIFY, "false");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+static void show_scan_oil_stats(struct pim_instance *pim, struct vty *vty,
+ time_t now)
+{
+ char uptime_scan_oil[10];
+ char uptime_mroute_add[10];
+ char uptime_mroute_del[10];
+
+ pim_time_uptime_begin(uptime_scan_oil, sizeof(uptime_scan_oil), now,
+ pim->scan_oil_last);
+ pim_time_uptime_begin(uptime_mroute_add, sizeof(uptime_mroute_add), now,
+ pim->mroute_add_last);
+ pim_time_uptime_begin(uptime_mroute_del, sizeof(uptime_mroute_del), now,
+ pim->mroute_del_last);
+
+ vty_out(vty,
+ "Scan OIL - Last: %s Events: %lld\n"
+ "MFC Add - Last: %s Events: %lld\n"
+ "MFC Del - Last: %s Events: %lld\n",
+ uptime_scan_oil, (long long)pim->scan_oil_events,
+ uptime_mroute_add, (long long)pim->mroute_add_events,
+ uptime_mroute_del, (long long)pim->mroute_del_events);
+}
+
+void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct interface *ifp;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json_row = NULL;
+
+ vty_out(vty, "\n");
+
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Interface|Address|ifi|Vif|PktsIn|PktsOut|BytesIn|BytesOut");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+#if PIM_IPV == 4
+ struct sioc_vif_req vreq;
+#else
+ struct sioc_mif_req6 vreq;
+#endif
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ memset(&vreq, 0, sizeof(vreq));
+#if PIM_IPV == 4
+ vreq.vifi = pim_ifp->mroute_vif_index;
+ if (ioctl(pim->mroute_socket, SIOCGETVIFCNT, &vreq)) {
+ zlog_warn(
+ "ioctl(SIOCGETVIFCNT=%lu) failure for interface %s vif_index=%d: errno=%d: %s",
+ (unsigned long)SIOCGETVIFCNT, ifp->name,
+ pim_ifp->mroute_vif_index, errno,
+ safe_strerror(errno));
+ }
+#else
+ vreq.mifi = pim_ifp->mroute_vif_index;
+ if (ioctl(pim->mroute_socket, SIOCGETMIFCNT_IN6, &vreq)) {
+ zlog_warn(
+ "ioctl(SIOCGETMIFCNT_IN6=%lu) failure for interface %s vif_index=%d: errno=%d: %s",
+ (unsigned long)SIOCGETMIFCNT_IN6, ifp->name,
+ pim_ifp->mroute_vif_index, errno,
+ safe_strerror(errno));
+ }
+#endif
+
+ if (json) {
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "name", ifp->name);
+ json_object_string_add(json_row, "state",
+ if_is_up(ifp) ? "up" : "down");
+ json_object_string_addf(json_row, "address", "%pPA",
+ &pim_ifp->primary_address);
+ json_object_int_add(json_row, "ifIndex", ifp->ifindex);
+ json_object_int_add(json_row, "vif",
+ pim_ifp->mroute_vif_index);
+ json_object_int_add(json_row, "pktsIn",
+ (unsigned long)vreq.icount);
+ json_object_int_add(json_row, "pktsOut",
+ (unsigned long)vreq.ocount);
+ json_object_int_add(json_row, "bytesIn",
+ (unsigned long)vreq.ibytes);
+ json_object_int_add(json_row, "bytesOut",
+ (unsigned long)vreq.obytes);
+ json_object_object_add(json, ifp->name, json_row);
+ } else {
+ ttable_add_row(tt, "%s|%pPAs|%d|%d|%lu|%lu|%lu|%lu",
+ ifp->name, &pim_ifp->primary_address,
+ ifp->ifindex, pim_ifp->mroute_vif_index,
+ (unsigned long)vreq.icount,
+ (unsigned long)vreq.ocount,
+ (unsigned long)vreq.ibytes,
+ (unsigned long)vreq.obytes);
+ }
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, struct vty *vty)
+{
+ struct vrf *vrf = pim->vrf;
+ time_t now = pim_time_monotonic_sec();
+ char uptime[10];
+ char mlag_role[80];
+
+ pim = vrf->info;
+
+ vty_out(vty, "Router MLAG Role: %s\n",
+ mlag_role2str(router->mlag_role, mlag_role, sizeof(mlag_role)));
+ vty_out(vty, "Mroute socket descriptor:");
+
+ vty_out(vty, " %d(%s)\n", pim->mroute_socket, vrf->name);
+ vty_out(vty, "PIM Register socket descriptor:");
+ vty_out(vty, " %d(%s)\n", pim->reg_sock, vrf->name);
+
+ pim_time_uptime(uptime, sizeof(uptime),
+ now - pim->mroute_socket_creation);
+ vty_out(vty, "Mroute socket uptime: %s\n", uptime);
+
+ vty_out(vty, "\n");
+
+ pim_zebra_zclient_update(vty);
+ pim_zlookup_show_ip_multicast(vty);
+
+ vty_out(vty, "\n");
+ vty_out(vty, "Maximum highest VifIndex: %d\n", PIM_MAX_USABLE_VIFS);
+
+ vty_out(vty, "\n");
+ vty_out(vty, "Upstream Join Timer: %d secs\n", router->t_periodic);
+ vty_out(vty, "Join/Prune Holdtime: %d secs\n", PIM_JP_HOLDTIME);
+ vty_out(vty, "PIM ECMP: %s\n", pim->ecmp_enable ? "Enable" : "Disable");
+ vty_out(vty, "PIM ECMP Rebalance: %s\n",
+ pim->ecmp_rebalance_enable ? "Enable" : "Disable");
+
+ vty_out(vty, "\n");
+
+ pim_show_rpf_refresh_stats(vty, pim, now, NULL);
+
+ vty_out(vty, "\n");
+
+ show_scan_oil_stats(pim, vty, now);
+
+ show_multicast_interfaces(pim, vty, NULL);
+}
+
+void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ bool fill, json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *s_route;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ time_t now;
+ json_object *json_group = NULL;
+ json_object *json_source = NULL;
+ json_object *json_oil = NULL;
+ json_object *json_ifp_out = NULL;
+ int found_oif;
+ int first;
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+ char in_ifname[INTERFACE_NAMSIZ + 1];
+ char out_ifname[INTERFACE_NAMSIZ + 1];
+ int oif_vif_index;
+ struct interface *ifp_in;
+ char proto[100];
+ char state_str[PIM_REG_STATE_STR_LEN];
+ char mroute_uptime[10];
+
+ if (!json) {
+ vty_out(vty, "IP Multicast Routing Table\n");
+ vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n");
+ vty_out(vty,
+ " R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n");
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt, "Source|Group|Flags|Proto|Input|Output|TTL|Uptime");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ now = pim_time_monotonic_sec();
+
+ /* print list of PIM and IGMP routes */
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ found_oif = 0;
+ first = 1;
+ if (!c_oil->installed)
+ continue;
+
+ if (!pim_addr_is_any(sg->grp) &&
+ pim_addr_cmp(sg->grp, *oil_mcastgrp(c_oil)))
+ continue;
+ if (!pim_addr_is_any(sg->src) &&
+ pim_addr_cmp(sg->src, *oil_origin(c_oil)))
+ continue;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ oil_mcastgrp(c_oil));
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ oil_origin(c_oil));
+
+ strlcpy(state_str, "S", sizeof(state_str));
+ /* When a non DR receives a igmp join, it creates a (*,G)
+ * channel_oil without any upstream creation
+ */
+ if (c_oil->up) {
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags))
+ strlcat(state_str, "C", sizeof(state_str));
+ if (pim_upstream_is_sg_rpt(c_oil->up))
+ strlcat(state_str, "R", sizeof(state_str));
+ if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags))
+ strlcat(state_str, "F", sizeof(state_str));
+ if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
+ strlcat(state_str, "T", sizeof(state_str));
+ }
+ if (pim_channel_oil_empty(c_oil))
+ strlcat(state_str, "P", sizeof(state_str));
+
+ ifp_in = pim_if_find_by_vif_index(pim, *oil_incoming_vif(c_oil));
+
+ if (ifp_in)
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
+ else
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
+
+
+ pim_time_uptime(mroute_uptime, sizeof(mroute_uptime),
+ now - c_oil->mroute_creation);
+
+ if (json) {
+
+ /* Find the group, create it if it doesn't exist */
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ /* Find the source nested under the group, create it if
+ * it doesn't exist
+ */
+ json_object_object_get_ex(json_group, src_str,
+ &json_source);
+
+ if (!json_source) {
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, src_str,
+ json_source);
+ }
+
+ /* Find the inbound interface nested under the source,
+ * create it if it doesn't exist
+ */
+ json_object_string_add(json_source, "source", src_str);
+ json_object_string_add(json_source, "group", grp_str);
+ json_object_int_add(json_source, "installed",
+ c_oil->installed);
+ json_object_int_add(json_source, "refCount",
+ c_oil->oil_ref_count);
+ json_object_int_add(json_source, "oilSize",
+ c_oil->oil_size);
+ json_object_int_add(json_source, "oilInheritedRescan",
+ c_oil->oil_inherited_rescan);
+ json_object_string_add(json_source, "iif", in_ifname);
+ json_object_string_add(json_source, "upTime",
+ mroute_uptime);
+ json_oil = NULL;
+ }
+
+ for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
+ ++oif_vif_index) {
+ struct interface *ifp_out;
+ int ttl;
+
+ ttl = oil_if_has(c_oil, oif_vif_index);
+ if (ttl < 1)
+ continue;
+
+ /* do not display muted OIFs */
+ if (c_oil->oif_flags[oif_vif_index] & PIM_OIF_FLAG_MUTE)
+ continue;
+
+ if (*oil_incoming_vif(c_oil) == oif_vif_index &&
+ !pim_mroute_allow_iif_in_oil(c_oil, oif_vif_index))
+ continue;
+
+ ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
+ found_oif = 1;
+
+ if (ifp_out)
+ strlcpy(out_ifname, ifp_out->name,
+ sizeof(out_ifname));
+ else
+ strlcpy(out_ifname, "<oif?>",
+ sizeof(out_ifname));
+
+ if (json) {
+ json_ifp_out = json_object_new_object();
+ json_object_string_add(json_ifp_out, "source",
+ src_str);
+ json_object_string_add(json_ifp_out, "group",
+ grp_str);
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_PIM)
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolPim");
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_GM)
+#if PIM_IPV == 4
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolIgmp");
+#else
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolMld");
+#endif
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_VXLAN)
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolVxlan");
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_STAR)
+ json_object_boolean_true_add(
+ json_ifp_out,
+ "protocolInherited");
+
+ json_object_string_add(json_ifp_out,
+ "inboundInterface",
+ in_ifname);
+ json_object_int_add(json_ifp_out, "iVifI",
+ *oil_incoming_vif(c_oil));
+ json_object_string_add(json_ifp_out,
+ "outboundInterface",
+ out_ifname);
+ json_object_int_add(json_ifp_out, "oVifI",
+ oif_vif_index);
+ json_object_int_add(json_ifp_out, "ttl", ttl);
+ json_object_string_add(json_ifp_out, "upTime",
+ mroute_uptime);
+ json_object_string_add(json_source, "flags",
+ state_str);
+ if (!json_oil) {
+ json_oil = json_object_new_object();
+ json_object_object_add(json_source,
+ "oil", json_oil);
+ }
+ json_object_object_add(json_oil, out_ifname,
+ json_ifp_out);
+ } else {
+ proto[0] = '\0';
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_PIM) {
+ strlcpy(proto, "PIM", sizeof(proto));
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_GM) {
+#if PIM_IPV == 4
+ strlcpy(proto, "IGMP", sizeof(proto));
+#else
+ strlcpy(proto, "MLD", sizeof(proto));
+#endif
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_VXLAN) {
+ strlcpy(proto, "VxLAN", sizeof(proto));
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_STAR) {
+ strlcpy(proto, "STAR", sizeof(proto));
+ }
+
+ ttable_add_row(tt, "%s|%s|%s|%s|%s|%s|%d|%s",
+ src_str, grp_str, state_str,
+ proto, in_ifname, out_ifname,
+ ttl, mroute_uptime);
+
+ if (first) {
+ src_str[0] = '\0';
+ grp_str[0] = '\0';
+ in_ifname[0] = '\0';
+ state_str[0] = '\0';
+ mroute_uptime[0] = '\0';
+ first = 0;
+ }
+ }
+ }
+
+ if (!json && !found_oif) {
+ ttable_add_row(tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
+ oil_origin(c_oil), oil_mcastgrp(c_oil),
+ state_str, "none", in_ifname, "none", 0,
+ "--:--:--");
+ }
+ }
+
+ /* Print list of static routes */
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ first = 1;
+
+ if (!s_route->c_oil.installed)
+ continue;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &s_route->group);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &s_route->source);
+ ifp_in = pim_if_find_by_vif_index(pim, s_route->iif);
+ found_oif = 0;
+
+ if (ifp_in)
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
+ else
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
+
+ if (json) {
+
+ /* Find the group, create it if it doesn't exist */
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ /* Find the source nested under the group, create it if
+ * it doesn't exist
+ */
+ json_object_object_get_ex(json_group, src_str,
+ &json_source);
+
+ if (!json_source) {
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, src_str,
+ json_source);
+ }
+
+ json_object_string_add(json_source, "iif", in_ifname);
+ json_oil = NULL;
+ } else {
+ strlcpy(proto, "STATIC", sizeof(proto));
+ }
+
+ for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
+ ++oif_vif_index) {
+ struct interface *ifp_out;
+ char oif_uptime[10];
+ int ttl;
+
+ ttl = s_route->oif_ttls[oif_vif_index];
+ if (ttl < 1)
+ continue;
+
+ ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
+ pim_time_uptime(
+ oif_uptime, sizeof(oif_uptime),
+ now - s_route->c_oil
+ .oif_creation[oif_vif_index]);
+ found_oif = 1;
+
+ if (ifp_out)
+ strlcpy(out_ifname, ifp_out->name,
+ sizeof(out_ifname));
+ else
+ strlcpy(out_ifname, "<oif?>",
+ sizeof(out_ifname));
+
+ if (json) {
+ json_ifp_out = json_object_new_object();
+ json_object_string_add(json_ifp_out, "source",
+ src_str);
+ json_object_string_add(json_ifp_out, "group",
+ grp_str);
+ json_object_boolean_true_add(json_ifp_out,
+ "protocolStatic");
+ json_object_string_add(json_ifp_out,
+ "inboundInterface",
+ in_ifname);
+ json_object_int_add(json_ifp_out, "iVifI",
+ *oil_incoming_vif(
+ &s_route->c_oil));
+ json_object_string_add(json_ifp_out,
+ "outboundInterface",
+ out_ifname);
+ json_object_int_add(json_ifp_out, "oVifI",
+ oif_vif_index);
+ json_object_int_add(json_ifp_out, "ttl", ttl);
+ json_object_string_add(json_ifp_out, "upTime",
+ oif_uptime);
+ if (!json_oil) {
+ json_oil = json_object_new_object();
+ json_object_object_add(json_source,
+ "oil", json_oil);
+ }
+ json_object_object_add(json_oil, out_ifname,
+ json_ifp_out);
+ } else {
+ ttable_add_row(
+ tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
+ &s_route->source, &s_route->group, "-",
+ proto, in_ifname, out_ifname, ttl,
+ oif_uptime);
+ if (first && !fill) {
+ src_str[0] = '\0';
+ grp_str[0] = '\0';
+ in_ifname[0] = '\0';
+ first = 0;
+ }
+ }
+ }
+
+ if (!json && !found_oif) {
+ ttable_add_row(tt, "%pPAs|%pPAs|%s|%s|%s|%s|%d|%s",
+ &s_route->source, &s_route->group, "-",
+ proto, in_ifname, "none", 0, "--:--:--");
+ }
+ }
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
+ json_object *json,
+ struct ttable *tt)
+{
+ json_object *json_group = NULL;
+ json_object *json_source = NULL;
+
+ if (!c_oil->installed)
+ return;
+
+ pim_mroute_update_counters(c_oil);
+
+ if (json) {
+ char group_str[PIM_ADDRSTRLEN];
+ char source_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(group_str, sizeof(group_str), "%pPAs",
+ oil_mcastgrp(c_oil));
+ snprintfrr(source_str, sizeof(source_str), "%pPAs",
+ oil_origin(c_oil));
+
+ json_object_object_get_ex(json, group_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, group_str, json_group);
+ }
+
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, source_str, json_source);
+ json_object_int_add(json_source, "lastUsed",
+ c_oil->cc.lastused / 100);
+ json_object_int_add(json_source, "packets", c_oil->cc.pktcnt);
+ json_object_int_add(json_source, "bytes", c_oil->cc.bytecnt);
+ json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if);
+
+ } else {
+ ttable_add_row(tt, "%pPAs|%pPAs|%llu|%ld|%ld|%ld",
+ oil_origin(c_oil), oil_mcastgrp(c_oil),
+ c_oil->cc.lastused / 100,
+ c_oil->cc.pktcnt - c_oil->cc.origpktcnt,
+ c_oil->cc.bytecnt - c_oil->cc.origbytecnt,
+ c_oil->cc.wrong_if - c_oil->cc.origwrong_if);
+ }
+}
+
+void show_mroute_count(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *sr;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+
+ if (!json) {
+ vty_out(vty, "\n");
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt,
+ "Source|Group|LastUsed|Packets|Bytes|WrongIf");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ /* Print PIM and IGMP route counts */
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
+ show_mroute_count_per_channel_oil(c_oil, json, tt);
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
+ show_mroute_count_per_channel_oil(&sr->c_oil, json, tt);
+
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+}
+
+void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *s_route;
+ uint32_t starg_sw_mroute_cnt = 0;
+ uint32_t sg_sw_mroute_cnt = 0;
+ uint32_t starg_hw_mroute_cnt = 0;
+ uint32_t sg_hw_mroute_cnt = 0;
+ json_object *json_starg = NULL;
+ json_object *json_sg = NULL;
+
+ if (!json)
+ vty_out(vty, "Mroute Type Installed/Total\n");
+
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ if (!c_oil->installed) {
+ if (pim_addr_is_any(*oil_origin(c_oil)))
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (pim_addr_is_any(*oil_origin(c_oil)))
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ if (!s_route->c_oil.installed) {
+ if (pim_addr_is_any(*oil_origin(&s_route->c_oil)))
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (pim_addr_is_any(*oil_origin(&s_route->c_oil)))
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ if (!json) {
+ vty_out(vty, "%-20s %u/%u\n", "(*, G)", starg_hw_mroute_cnt,
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt);
+ vty_out(vty, "%-20s %u/%u\n", "(S, G)", sg_hw_mroute_cnt,
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt);
+ vty_out(vty, "------\n");
+ vty_out(vty, "%-20s %u/%u\n", "Total",
+ (starg_hw_mroute_cnt + sg_hw_mroute_cnt),
+ (starg_sw_mroute_cnt + starg_hw_mroute_cnt +
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt));
+ } else {
+ /* (*,G) route details */
+ json_starg = json_object_new_object();
+ json_object_object_add(json, "wildcardGroup", json_starg);
+
+ json_object_int_add(json_starg, "installed",
+ starg_hw_mroute_cnt);
+ json_object_int_add(json_starg, "total",
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt);
+
+ /* (S, G) route details */
+ json_sg = json_object_new_object();
+ json_object_object_add(json, "sourceGroup", json_sg);
+
+ json_object_int_add(json_sg, "installed", sg_hw_mroute_cnt);
+ json_object_int_add(json_sg, "total",
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt);
+
+ json_object_int_add(json, "totalNumOfInstalledMroutes",
+ starg_hw_mroute_cnt + sg_hw_mroute_cnt);
+ json_object_int_add(json, "totalNumOfMroutes",
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt +
+ sg_sw_mroute_cnt +
+ sg_hw_mroute_cnt);
+ }
+}
+
+int clear_ip_mroute_count_command(struct vty *vty, const char *name)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *sr;
+ struct vrf *v = pim_cmd_lookup(vty, name);
+ struct pim_instance *pim;
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ if (!c_oil->installed)
+ continue;
+
+ pim_mroute_update_counters(c_oil);
+ c_oil->cc.origpktcnt = c_oil->cc.pktcnt;
+ c_oil->cc.origbytecnt = c_oil->cc.bytecnt;
+ c_oil->cc.origwrong_if = c_oil->cc.wrong_if;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) {
+ if (!sr->c_oil.installed)
+ continue;
+
+ pim_mroute_update_counters(&sr->c_oil);
+
+ sr->c_oil.cc.origpktcnt = sr->c_oil.cc.pktcnt;
+ sr->c_oil.cc.origbytecnt = sr->c_oil.cc.bytecnt;
+ sr->c_oil.cc.origwrong_if = sr->c_oil.cc.wrong_if;
+ }
+ return CMD_SUCCESS;
+}
+
+struct vrf *pim_cmd_lookup(struct vty *vty, const char *name)
+{
+ struct vrf *vrf;
+
+ if (name)
+ vrf = vrf_lookup_by_name(name);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf)
+ vty_out(vty, "Specified VRF: %s does not exist\n", name);
+
+ return vrf;
+}
+
+void clear_mroute(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+ struct interface *ifp;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_ifchannel *ch;
+
+ if (!pim_ifp)
+ continue;
+
+ /* deleting all ifchannels */
+ while (!RB_EMPTY(pim_ifchannel_rb, &pim_ifp->ifchannel_rb)) {
+ ch = RB_ROOT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb);
+
+ pim_ifchannel_delete(ch);
+ }
+
+#if PIM_IPV == 4
+ /* clean up all igmp groups */
+ struct gm_group *grp;
+
+ if (pim_ifp->gm_group_list) {
+ while (pim_ifp->gm_group_list->count) {
+ grp = listnode_head(pim_ifp->gm_group_list);
+ igmp_group_delete(grp);
+ }
+ }
+#else
+ struct gm_if *gm_ifp;
+
+ gm_ifp = pim_ifp->mld;
+ if (gm_ifp)
+ gm_group_delete(gm_ifp);
+#endif
+ }
+
+ /* clean up all upstreams*/
+ while ((up = rb_pim_upstream_first(&pim->upstream_head)))
+ pim_upstream_del(pim, up, __func__);
+}
+
+void clear_pim_statistics(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ pim->bsm_rcvd = 0;
+ pim->bsm_sent = 0;
+ pim->bsm_dropped = 0;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ }
+}
+
+int clear_pim_interface_traffic(const char *vrf, struct vty *vty)
+{
+ struct interface *ifp = NULL;
+ struct pim_interface *pim_ifp = NULL;
+
+ struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+ if (!v)
+ return CMD_WARNING;
+
+ FOR_ALL_INTERFACES (v, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_ifp->pim_ifstat_hello_recv = 0;
+ pim_ifp->pim_ifstat_hello_sent = 0;
+ pim_ifp->pim_ifstat_join_recv = 0;
+ pim_ifp->pim_ifstat_join_send = 0;
+ pim_ifp->pim_ifstat_prune_recv = 0;
+ pim_ifp->pim_ifstat_prune_send = 0;
+ pim_ifp->pim_ifstat_reg_recv = 0;
+ pim_ifp->pim_ifstat_reg_send = 0;
+ pim_ifp->pim_ifstat_reg_stop_recv = 0;
+ pim_ifp->pim_ifstat_reg_stop_send = 0;
+ pim_ifp->pim_ifstat_assert_recv = 0;
+ pim_ifp->pim_ifstat_assert_send = 0;
+ pim_ifp->pim_ifstat_bsm_rx = 0;
+ pim_ifp->pim_ifstat_bsm_tx = 0;
+#if PIM_IPV == 4
+ pim_ifp->igmp_ifstat_joins_sent = 0;
+ pim_ifp->igmp_ifstat_joins_failed = 0;
+ pim_ifp->igmp_peak_group_count = 0;
+#endif
+ }
+
+ return CMD_SUCCESS;
+}
+
+int pim_debug_pim_cmd(void)
+{
+ PIM_DO_DEBUG_PIM_EVENTS;
+ PIM_DO_DEBUG_PIM_PACKETS;
+ PIM_DO_DEBUG_PIM_TRACE;
+ PIM_DO_DEBUG_MSDP_EVENTS;
+ PIM_DO_DEBUG_MSDP_PACKETS;
+ PIM_DO_DEBUG_BSM;
+ PIM_DO_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+int pim_no_debug_pim_cmd(void)
+{
+ PIM_DONT_DEBUG_PIM_EVENTS;
+ PIM_DONT_DEBUG_PIM_PACKETS;
+ PIM_DONT_DEBUG_PIM_TRACE;
+ PIM_DONT_DEBUG_MSDP_EVENTS;
+ PIM_DONT_DEBUG_MSDP_PACKETS;
+
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ PIM_DONT_DEBUG_BSM;
+ PIM_DONT_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+int pim_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty)
+{
+ if (hello) {
+ PIM_DO_DEBUG_PIM_HELLO;
+ vty_out(vty, "PIM Hello debugging is on\n");
+ } else if (joins) {
+ PIM_DO_DEBUG_PIM_J_P;
+ vty_out(vty, "PIM Join/Prune debugging is on\n");
+ } else if (registers) {
+ PIM_DO_DEBUG_PIM_REG;
+ vty_out(vty, "PIM Register debugging is on\n");
+ } else {
+ PIM_DO_DEBUG_PIM_PACKETS;
+ vty_out(vty, "PIM Packet debugging is on\n");
+ }
+ return CMD_SUCCESS;
+}
+
+int pim_no_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty)
+{
+ if (hello) {
+ PIM_DONT_DEBUG_PIM_HELLO;
+ vty_out(vty, "PIM Hello debugging is off\n");
+ } else if (joins) {
+ PIM_DONT_DEBUG_PIM_J_P;
+ vty_out(vty, "PIM Join/Prune debugging is off\n");
+ } else if (registers) {
+ PIM_DONT_DEBUG_PIM_REG;
+ vty_out(vty, "PIM Register debugging is off\n");
+ } else {
+ PIM_DONT_DEBUG_PIM_PACKETS;
+ vty_out(vty, "PIM Packet debugging is off\n");
+ }
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_rpf_helper(const char *vrf, struct vty *vty, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ pim_show_rpf(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_rpf_vrf_all_helper(struct vty *vty, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ pim_show_rpf(vrf->info, vty, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_rp_helper(const char *vrf, struct vty *vty, const char *group_str,
+ const struct prefix *group, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+ struct prefix *range = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (group_str) {
+ range = prefix_new();
+ prefix_copy(range, group);
+ apply_mask(range);
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ pim_rp_show_information(pim, range, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ prefix_free(&range);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_rp_vrf_all_helper(struct vty *vty, const char *group_str,
+ const struct prefix *group, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+ struct prefix *range = NULL;
+
+ if (group_str) {
+ range = prefix_new();
+ prefix_copy(range, group);
+ apply_mask(range);
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ pim_rp_show_information(vrf->info, range, vty, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ prefix_free(&range);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_secondary_helper(const char *vrf, struct vty *vty)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_neighbors_secondary(pim, vty);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_statistics_helper(const char *vrf, struct vty *vty,
+ const char *word, bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (word)
+ pim_show_statistics(pim, vty, word, uj);
+ else
+ pim_show_statistics(pim, vty, NULL, uj);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_upstream_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, bool json)
+{
+ pim_sgaddr sg = {0};
+ struct vrf *v;
+ struct pim_instance *pim;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
+ return CMD_WARNING;
+ }
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ if (!pim_addr_is_any(s_or_g)) {
+ if (!pim_addr_is_any(g)) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+
+ pim_show_upstream(pim, vty, &sg, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_upstream_vrf_all_helper(struct vty *vty, bool json)
+{
+ pim_sgaddr sg = {0};
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ pim_show_upstream(vrf->info, vty, &sg, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_upstream_join_desired_helper(const char *vrf, struct vty *vty,
+ bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_join_desired(pim, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_upstream_rpf_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_upstream_rpf(pim, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_state_helper(const char *vrf, struct vty *vty,
+ const char *s_or_g_str, const char *g_str, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ pim_show_state(pim, vty, s_or_g_str, g_str, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_state_vrf_all_helper(struct vty *vty, const char *s_or_g_str,
+ const char *g_str, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ pim_show_state(vrf->info, vty, s_or_g_str, g_str, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_multicast_helper(const char *vrf, struct vty *vty)
+{
+ struct vrf *v;
+ struct pim_instance *pim;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_cmd_show_ip_multicast_helper(pim, vty);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_multicast_vrf_all_helper(struct vty *vty)
+{
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ pim_cmd_show_ip_multicast_helper(vrf->info, vty);
+ }
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_multicast_count_helper(const char *vrf, struct vty *vty, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_multicast_interfaces(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_multicast_count_vrf_all_helper(struct vty *vty, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+
+ show_multicast_interfaces(vrf->info, vty, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, bool fill, bool json)
+{
+ pim_sgaddr sg = {0};
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ if (!pim_addr_is_any(s_or_g)) {
+ if (!pim_addr_is_any(g)) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+
+ show_mroute(pim, vty, &sg, fill, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_vrf_all_helper(struct vty *vty, bool fill, bool json)
+{
+ pim_sgaddr sg = {0};
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ show_mroute(vrf->info, vty, &sg, fill, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_count_helper(const char *vrf, struct vty *vty, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_mroute_count(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_count_vrf_all_helper(struct vty *vty, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+
+ show_mroute_count(vrf->info, vty, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_summary_helper(const char *vrf, struct vty *vty, bool json)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_mroute_summary(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+int pim_show_mroute_summary_vrf_all_helper(struct vty *vty, bool json)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+
+ show_mroute_summary(vrf->info, vty, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
+ bool uj)
+{
+ struct interface *ifp = NULL;
+ struct pim_interface *pim_ifp = NULL;
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+ else {
+ vty_out(vty, "\n");
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ "Interface", " HELLO", " JOIN",
+ " PRUNE", " REGISTER", "REGISTER-STOP",
+ " ASSERT", " BSM");
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " Rx/Tx", " Rx/Tx", " Rx/Tx",
+ " Rx/Tx", " Rx/Tx", " Rx/Tx", " Rx/Tx");
+ vty_out(vty,
+ "---------------------------------------------------------------------------------------------------------------\n");
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+ json_object_int_add(json_row, "helloRx",
+ pim_ifp->pim_ifstat_hello_recv);
+ json_object_int_add(json_row, "helloTx",
+ pim_ifp->pim_ifstat_hello_sent);
+ json_object_int_add(json_row, "joinRx",
+ pim_ifp->pim_ifstat_join_recv);
+ json_object_int_add(json_row, "joinTx",
+ pim_ifp->pim_ifstat_join_send);
+ json_object_int_add(json_row, "pruneRx",
+ pim_ifp->pim_ifstat_prune_recv);
+ json_object_int_add(json_row, "pruneTx",
+ pim_ifp->pim_ifstat_prune_send);
+ json_object_int_add(json_row, "registerRx",
+ pim_ifp->pim_ifstat_reg_recv);
+ json_object_int_add(json_row, "registerTx",
+ pim_ifp->pim_ifstat_reg_send);
+ json_object_int_add(json_row, "registerStopRx",
+ pim_ifp->pim_ifstat_reg_stop_recv);
+ json_object_int_add(json_row, "registerStopTx",
+ pim_ifp->pim_ifstat_reg_stop_send);
+ json_object_int_add(json_row, "assertRx",
+ pim_ifp->pim_ifstat_assert_recv);
+ json_object_int_add(json_row, "assertTx",
+ pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
+ json_object_object_add(json, ifp->name, json_row);
+ } else {
+ vty_out(vty,
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7" PRIu64
+ "/%-7" PRIu64 "\n",
+ ifp->name, pim_ifp->pim_ifstat_hello_recv,
+ pim_ifp->pim_ifstat_hello_sent,
+ pim_ifp->pim_ifstat_join_recv,
+ pim_ifp->pim_ifstat_join_send,
+ pim_ifp->pim_ifstat_prune_recv,
+ pim_ifp->pim_ifstat_prune_send,
+ pim_ifp->pim_ifstat_reg_recv,
+ pim_ifp->pim_ifstat_reg_send,
+ pim_ifp->pim_ifstat_reg_stop_recv,
+ pim_ifp->pim_ifstat_reg_stop_send,
+ pim_ifp->pim_ifstat_assert_recv,
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
+ }
+ }
+ if (uj)
+ vty_json(vty, json);
+}
+
+void pim_show_interface_traffic_single(struct pim_instance *pim,
+ struct vty *vty, const char *ifname,
+ bool uj)
+{
+ struct interface *ifp = NULL;
+ struct pim_interface *pim_ifp = NULL;
+ json_object *json = NULL;
+ json_object *json_row = NULL;
+ uint8_t found_ifname = 0;
+
+ if (uj)
+ json = json_object_new_object();
+ else {
+ vty_out(vty, "\n");
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ "Interface", " HELLO", " JOIN", " PRUNE",
+ " REGISTER", " REGISTER-STOP", " ASSERT",
+ " BSM");
+ vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " Rx/Tx", " Rx/Tx", " Rx/Tx", " Rx/Tx",
+ " Rx/Tx", " Rx/Tx", " Rx/Tx");
+ vty_out(vty,
+ "-------------------------------------------------------------------------------------------------------------------------------\n");
+ }
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ if (strcmp(ifname, ifp->name))
+ continue;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ found_ifname = 1;
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_pim_ifp_add(json_row, ifp);
+ json_object_int_add(json_row, "helloRx",
+ pim_ifp->pim_ifstat_hello_recv);
+ json_object_int_add(json_row, "helloTx",
+ pim_ifp->pim_ifstat_hello_sent);
+ json_object_int_add(json_row, "joinRx",
+ pim_ifp->pim_ifstat_join_recv);
+ json_object_int_add(json_row, "joinTx",
+ pim_ifp->pim_ifstat_join_send);
+ json_object_int_add(json_row, "pruneRx",
+ pim_ifp->pim_ifstat_prune_recv);
+ json_object_int_add(json_row, "pruneTx",
+ pim_ifp->pim_ifstat_prune_send);
+ json_object_int_add(json_row, "registerRx",
+ pim_ifp->pim_ifstat_reg_recv);
+ json_object_int_add(json_row, "registerTx",
+ pim_ifp->pim_ifstat_reg_send);
+ json_object_int_add(json_row, "registerStopRx",
+ pim_ifp->pim_ifstat_reg_stop_recv);
+ json_object_int_add(json_row, "registerStopTx",
+ pim_ifp->pim_ifstat_reg_stop_send);
+ json_object_int_add(json_row, "assertRx",
+ pim_ifp->pim_ifstat_assert_recv);
+ json_object_int_add(json_row, "assertTx",
+ pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
+
+ json_object_object_add(json, ifp->name, json_row);
+ } else {
+ vty_out(vty,
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7" PRIu64
+ "/%-7" PRIu64 "\n",
+ ifp->name, pim_ifp->pim_ifstat_hello_recv,
+ pim_ifp->pim_ifstat_hello_sent,
+ pim_ifp->pim_ifstat_join_recv,
+ pim_ifp->pim_ifstat_join_send,
+ pim_ifp->pim_ifstat_prune_recv,
+ pim_ifp->pim_ifstat_prune_send,
+ pim_ifp->pim_ifstat_reg_recv,
+ pim_ifp->pim_ifstat_reg_send,
+ pim_ifp->pim_ifstat_reg_stop_recv,
+ pim_ifp->pim_ifstat_reg_stop_send,
+ pim_ifp->pim_ifstat_assert_recv,
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
+ }
+ }
+ if (uj)
+ vty_json(vty, json);
+ else if (!found_ifname)
+ vty_out(vty, "%% No such interface\n");
+}
+
+int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
+ struct vty *vty, bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (if_name)
+ pim_show_interface_traffic_single(v->info, vty, if_name, uj);
+ else
+ pim_show_interface_traffic(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+void clear_pim_interfaces(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ if (ifp->info)
+ pim_neighbor_delete_all(ifp, "interface cleared");
+ }
+}
+
+void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ char uptime[10];
+ char last_bsm_seen[10];
+ time_t now;
+ char bsr_state[20];
+ json_object *json = NULL;
+
+ if (pim_addr_is_any(pim->global_scope.current_bsr)) {
+ pim_time_uptime(uptime, sizeof(uptime),
+ pim->global_scope.current_bsr_first_ts);
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ pim->global_scope.current_bsr_last_ts);
+ }
+
+ else {
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(uptime, sizeof(uptime),
+ (now - pim->global_scope.current_bsr_first_ts));
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ now - pim->global_scope.current_bsr_last_ts);
+ }
+
+ switch (pim->global_scope.state) {
+ case NO_INFO:
+ strlcpy(bsr_state, "NO_INFO", sizeof(bsr_state));
+ break;
+ case ACCEPT_ANY:
+ strlcpy(bsr_state, "ACCEPT_ANY", sizeof(bsr_state));
+ break;
+ case ACCEPT_PREFERRED:
+ strlcpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state));
+ break;
+ default:
+ strlcpy(bsr_state, "", sizeof(bsr_state));
+ }
+
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_addf(json, "bsr", "%pPA",
+ &pim->global_scope.current_bsr);
+ json_object_int_add(json, "priority",
+ pim->global_scope.current_bsr_prio);
+ json_object_int_add(json, "fragmentTag",
+ pim->global_scope.bsm_frag_tag);
+ json_object_string_add(json, "state", bsr_state);
+ json_object_string_add(json, "upTime", uptime);
+ json_object_string_add(json, "lastBsmSeen", last_bsm_seen);
+ }
+
+ else {
+ vty_out(vty, "PIMv2 Bootstrap information\n");
+ vty_out(vty, "Current preferred BSR address: %pPA\n",
+ &pim->global_scope.current_bsr);
+ vty_out(vty,
+ "Priority Fragment-Tag State UpTime\n");
+ vty_out(vty, " %-12d %-12d %-13s %7s\n",
+ pim->global_scope.current_bsr_prio,
+ pim->global_scope.bsm_frag_tag, bsr_state, uptime);
+ vty_out(vty, "Last BSM seen: %s\n", last_bsm_seen);
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+int pim_show_bsr_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_bsr(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+/*Display the group-rp mappings */
+static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ struct bsgrp_node *bsgrp;
+ struct bsm_rpinfo *bsm_rp;
+ struct route_node *rn;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+ struct ttable *tt = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_addf(json, "BSR Address", "%pPA",
+ &pim->global_scope.current_bsr);
+ } else
+ vty_out(vty, "BSR Address %pPA\n",
+ &pim->global_scope.current_bsr);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = (struct bsgrp_node *)rn->info;
+
+ if (!bsgrp)
+ continue;
+
+ char grp_str[PREFIX_STRLEN];
+
+ prefix2str(&bsgrp->group, grp_str, sizeof(grp_str));
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group Address %pFX\n", &bsgrp->group);
+ vty_out(vty, "--------------------------\n");
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Rp Address|priority|Holdtime|Hash");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+
+ ttable_add_row(tt, "%s|%c|%c|%c", "(ACTIVE)", ' ', ' ',
+ ' ');
+ }
+
+ frr_each (bsm_rpinfos, bsgrp->bsrp_list, bsm_rp) {
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "Rp Address",
+ "%pPA",
+ &bsm_rp->rp_address);
+ json_object_int_add(json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row, "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row, "Hash Val",
+ bsm_rp->hash);
+ json_object_object_addf(json_group, json_row,
+ "%pPA",
+ &bsm_rp->rp_address);
+
+ } else {
+ ttable_add_row(
+ tt, "%pPA|%u|%u|%u",
+ &bsm_rp->rp_address, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime, bsm_rp->hash);
+ }
+ }
+ /* Dump the generated table. */
+ if (tt) {
+ char *table = NULL;
+
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ tt = NULL;
+ }
+ if (!bsm_rpinfos_count(bsgrp->bsrp_list) && !uj)
+ vty_out(vty, "Active List is empty.\n");
+
+ if (uj) {
+ json_object_int_add(json_group, "Pending RP count",
+ bsgrp->pend_rp_cnt);
+ } else {
+ vty_out(vty, "(PENDING)\n");
+ vty_out(vty, "Pending RP count :%d\n",
+ bsgrp->pend_rp_cnt);
+ if (bsgrp->pend_rp_cnt) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "Rp Address|priority|Holdtime|Hash");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+ }
+
+ frr_each (bsm_rpinfos, bsgrp->partial_bsrp_list, bsm_rp) {
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "Rp Address",
+ "%pPA",
+ &bsm_rp->rp_address);
+ json_object_int_add(json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row, "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row, "Hash Val",
+ bsm_rp->hash);
+ json_object_object_addf(json_group, json_row,
+ "%pPA",
+ &bsm_rp->rp_address);
+ } else {
+ ttable_add_row(
+ tt, "%pPA|%u|%u|%u",
+ &bsm_rp->rp_address, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime, bsm_rp->hash);
+ }
+ }
+ /* Dump the generated table. */
+ if (tt) {
+ char *table = NULL;
+
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ }
+ if (!bsm_rpinfos_count(bsgrp->partial_bsrp_list) && !uj)
+ vty_out(vty, "Partial List is empty\n");
+
+ if (!uj)
+ vty_out(vty, "\n");
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+int pim_show_group_rp_mappings_info_helper(const char *vrf, struct vty *vty,
+ bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_group_rp_mappings_info(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+/* Display the bsm database details */
+static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ int count = 0;
+ int fragment = 1;
+ struct bsm_frag *bsfrag;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ count = bsm_frags_count(pim->global_scope.bsm_frags);
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Number of the fragments", count);
+ } else {
+ vty_out(vty, "Scope Zone: Global\n");
+ vty_out(vty, "Number of the fragments: %d\n", count);
+ vty_out(vty, "\n");
+ }
+
+ frr_each (bsm_frags, pim->global_scope.bsm_frags, bsfrag) {
+ char grp_str[PREFIX_STRLEN];
+ struct bsmmsg_grpinfo *group;
+ struct bsmmsg_rpinfo *bsm_rpinfo;
+ struct prefix grp;
+ struct bsm_hdr *hdr;
+ pim_addr bsr_addr;
+ uint32_t offset = 0;
+ uint8_t *buf;
+ uint32_t len = 0;
+ uint32_t frag_rp_cnt = 0;
+
+ buf = bsfrag->data;
+ len = bsfrag->size;
+
+ /* skip pim header */
+ buf += PIM_MSG_HEADER_LEN;
+ len -= PIM_MSG_HEADER_LEN;
+
+ hdr = (struct bsm_hdr *)buf;
+ /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+ memcpy(&bsr_addr, &hdr->bsr_addr.addr, sizeof(bsr_addr));
+
+ /* BSM starts with bsr header */
+ buf += sizeof(struct bsm_hdr);
+ len -= sizeof(struct bsm_hdr);
+
+ if (uj) {
+ json_object_string_addf(json, "BSR address", "%pPA",
+ &bsr_addr);
+ json_object_int_add(json, "BSR priority",
+ hdr->bsr_prio);
+ json_object_int_add(json, "Hashmask Length",
+ hdr->hm_len);
+ json_object_int_add(json, "Fragment Tag",
+ ntohs(hdr->frag_tag));
+ } else {
+ vty_out(vty, "BSM Fragment : %d\n", fragment);
+ vty_out(vty, "------------------\n");
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
+ "BSR-Priority", "Hashmask-len", "Fragment-Tag");
+ vty_out(vty, "%-15pPA %-15d %-15d %-15d\n", &bsr_addr,
+ hdr->bsr_prio, hdr->hm_len,
+ ntohs(hdr->frag_tag));
+ }
+
+ vty_out(vty, "\n");
+
+ while (offset < len) {
+ group = (struct bsmmsg_grpinfo *)buf;
+
+ if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
+ grp.family = AF_INET;
+ else if (group->group.family ==
+ PIM_MSG_ADDRESS_FAMILY_IPV6)
+ grp.family = AF_INET6;
+
+ grp.prefixlen = group->group.mask;
+#if PIM_IPV == 4
+ grp.u.prefix4 = group->group.addr;
+#else
+ grp.u.prefix6 = group->group.addr;
+#endif
+
+ prefix2str(&grp, grp_str, sizeof(grp_str));
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str,
+ &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_int_add(json_group,
+ "Rp Count",
+ group->rp_count);
+ json_object_int_add(
+ json_group, "Fragment Rp count",
+ group->frag_rp_count);
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group : %s\n", grp_str);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty, "Rp Count:%d\n", group->rp_count);
+ vty_out(vty, "Fragment Rp Count : %d\n",
+ group->frag_rp_count);
+ }
+
+ frag_rp_cnt = group->frag_rp_count;
+
+ if (!frag_rp_cnt)
+ continue;
+
+ if (!uj)
+ vty_out(vty,
+ "RpAddress HoldTime Priority\n");
+
+ while (frag_rp_cnt--) {
+ pim_addr rp_addr;
+
+ bsm_rpinfo = (struct bsmmsg_rpinfo *)buf;
+ /* unaligned, again */
+ memcpy(&rp_addr, &bsm_rpinfo->rpaddr.addr,
+ sizeof(rp_addr));
+
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_addf(
+ json_row, "Rp Address", "%pPA",
+ &rp_addr);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ ntohs(bsm_rpinfo->rp_holdtime));
+ json_object_int_add(json_row,
+ "Rp Priority",
+ bsm_rpinfo->rp_pri);
+ json_object_object_addf(
+ json_group, json_row, "%pPA",
+ &rp_addr);
+ } else {
+ vty_out(vty, "%-15pPA %-12d %d\n",
+ &rp_addr,
+ ntohs(bsm_rpinfo->rp_holdtime),
+ bsm_rpinfo->rp_pri);
+ }
+ }
+ vty_out(vty, "\n");
+ }
+
+ fragment++;
+ }
+
+ if (uj)
+ vty_json(vty, json);
+}
+
+int pim_show_bsm_db_helper(const char *vrf, struct vty *vty, bool uj)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ pim_show_bsm_db(v->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h
new file mode 100644
index 0000000..e30203f
--- /dev/null
+++ b/pimd/pim_cmd_common.h
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for IPv6 FRR
+ * Copyright (C) 2022 Vmware, Inc.
+ * Mobashshera Rasool <mrasool@vmware.com>
+ */
+#ifndef PIM_CMD_COMMON_H
+#define PIM_CMD_COMMON_H
+
+struct pim_upstream;
+struct pim_instance;
+
+const char *pim_cli_get_vrf_name(struct vty *vty);
+int pim_process_join_prune_cmd(struct vty *vty, const char *jpi_str);
+int pim_process_no_join_prune_cmd(struct vty *vty);
+int pim_process_spt_switchover_infinity_cmd(struct vty *vty);
+int pim_process_spt_switchover_prefixlist_cmd(struct vty *vty,
+ const char *plist);
+int pim_process_no_spt_switchover_cmd(struct vty *vty);
+int pim_process_pim_packet_cmd(struct vty *vty, const char *packet);
+int pim_process_no_pim_packet_cmd(struct vty *vty);
+int pim_process_keepalivetimer_cmd(struct vty *vty, const char *kat);
+int pim_process_no_keepalivetimer_cmd(struct vty *vty);
+int pim_process_rp_kat_cmd(struct vty *vty, const char *rpkat);
+int pim_process_no_rp_kat_cmd(struct vty *vty);
+int pim_process_register_suppress_cmd(struct vty *vty, const char *rst);
+int pim_process_no_register_suppress_cmd(struct vty *vty);
+int pim_process_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str);
+int pim_process_no_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str);
+int pim_process_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list);
+int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list);
+
+int pim_process_ip_pim_cmd(struct vty *vty);
+int pim_process_no_ip_pim_cmd(struct vty *vty);
+int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable);
+int pim_process_ip_pim_drprio_cmd(struct vty *vty, const char *drpriority_str);
+int pim_process_no_ip_pim_drprio_cmd(struct vty *vty);
+int pim_process_ip_pim_hello_cmd(struct vty *vty, const char *hello_str,
+ const char *hold_str);
+int pim_process_no_ip_pim_hello_cmd(struct vty *vty);
+int pim_process_ip_pim_activeactive_cmd(struct vty *vty, const char *no);
+int pim_process_ip_pim_boundary_oil_cmd(struct vty *vty, const char *oil);
+int pim_process_no_ip_pim_boundary_oil_cmd(struct vty *vty);
+int pim_process_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str);
+int pim_process_no_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *src_str);
+int pim_process_bsm_cmd(struct vty *vty);
+int pim_process_no_bsm_cmd(struct vty *vty);
+int pim_process_unicast_bsm_cmd(struct vty *vty);
+int pim_process_no_unicast_bsm_cmd(struct vty *vty);
+void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up);
+void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json);
+void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty);
+void pim_show_state(struct pim_instance *pim, struct vty *vty,
+ const char *src_or_group, const char *group,
+ json_object *json);
+void pim_show_statistics(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool uj);
+void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
+ pim_sgaddr *sg, json_object *json);
+void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj);
+void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj);
+void pim_show_rpf_refresh_stats(struct vty *vty, struct pim_instance *pim,
+ time_t now, json_object *json);
+bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match);
+void json_object_pim_ifp_add(struct json_object *json, struct interface *ifp);
+void pim_print_ifp_flags(struct vty *vty, struct interface *ifp);
+void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up);
+int pim_show_join_cmd_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, const char *json);
+int pim_show_join_vrf_all_cmd_helper(struct vty *vty, const char *json);
+void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ json_object *json);
+int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty);
+void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty);
+int pim_show_membership_cmd_helper(const char *vrf, struct vty *vty, bool uj);
+void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj);
+void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj);
+int pim_show_channel_cmd_helper(const char *vrf, struct vty *vty, bool uj);
+int pim_show_interface_cmd_helper(const char *vrf, struct vty *vty, bool uj,
+ bool mlag, const char *interface);
+int pim_show_interface_vrf_all_cmd_helper(struct vty *vty, bool uj, bool mlag,
+ const char *interface);
+void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag,
+ json_object *json);
+void pim_show_interfaces_single(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool mlag,
+ json_object *json);
+void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
+ bool uj);
+int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
+ pim_addr source, pim_addr group);
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj);
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj);
+int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
+ const char *json, const char *interface);
+int pim_show_neighbors_vrf_all_cmd_helper(struct vty *vty, const char *json,
+ const char *interface);
+void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty,
+ const char *neighbor, json_object *json);
+void pim_show_neighbors(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+int pim_show_group_rp_mappings_info_helper(const char *vrf, struct vty *vty,
+ bool uj);
+int pim_show_bsm_db_helper(const char *vrf, struct vty *vty, bool uj);
+int gm_process_query_max_response_time_cmd(struct vty *vty,
+ const char *qmrt_str);
+int gm_process_no_query_max_response_time_cmd(struct vty *vty);
+int gm_process_last_member_query_count_cmd(struct vty *vty,
+ const char *lmqc_str);
+int gm_process_no_last_member_query_count_cmd(struct vty *vty);
+int gm_process_last_member_query_interval_cmd(struct vty *vty,
+ const char *lmqi_str);
+int gm_process_no_last_member_query_interval_cmd(struct vty *vty);
+int pim_process_ssmpingd_cmd(struct vty *vty, enum nb_operation operation,
+ const char *src_str);
+void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim,
+ struct vty *vty);
+void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ bool fill, json_object *json);
+void show_mroute_count(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+int clear_ip_mroute_count_command(struct vty *vty, const char *name);
+struct vrf *pim_cmd_lookup(struct vty *vty, const char *name);
+void clear_mroute(struct pim_instance *pim);
+void clear_pim_statistics(struct pim_instance *pim);
+int clear_pim_interface_traffic(const char *vrf, struct vty *vty);
+int pim_debug_pim_cmd(void);
+int pim_no_debug_pim_cmd(void);
+int pim_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty);
+int pim_no_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty);
+int pim_show_rpf_helper(const char *vrf, struct vty *vty, bool json);
+int pim_show_rpf_vrf_all_helper(struct vty *vty, bool json);
+int pim_show_rp_helper(const char *vrf, struct vty *vty, const char *group_str,
+ const struct prefix *group, bool json);
+int pim_show_rp_vrf_all_helper(struct vty *vty, const char *group_str,
+ const struct prefix *group, bool json);
+int pim_show_secondary_helper(const char *vrf, struct vty *vty);
+int pim_show_statistics_helper(const char *vrf, struct vty *vty,
+ const char *word, bool uj);
+int pim_show_upstream_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, bool json);
+int pim_show_upstream_vrf_all_helper(struct vty *vty, bool json);
+int pim_show_upstream_join_desired_helper(const char *vrf, struct vty *vty,
+ bool uj);
+int pim_show_upstream_rpf_helper(const char *vrf, struct vty *vty, bool uj);
+int pim_show_state_helper(const char *vrf, struct vty *vty,
+ const char *s_or_g_str, const char *g_str, bool json);
+int pim_show_state_vrf_all_helper(struct vty *vty, const char *s_or_g_str,
+ const char *g_str, bool json);
+int pim_show_multicast_helper(const char *vrf, struct vty *vty);
+int pim_show_multicast_vrf_all_helper(struct vty *vty);
+int pim_show_multicast_count_helper(const char *vrf, struct vty *vty,
+ bool json);
+int pim_show_multicast_count_vrf_all_helper(struct vty *vty, bool json);
+int pim_show_mroute_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
+ pim_addr g, bool fill, bool json);
+int pim_show_mroute_vrf_all_helper(struct vty *vty, bool fill, bool json);
+int pim_show_mroute_count_helper(const char *vrf, struct vty *vty, bool json);
+int pim_show_mroute_count_vrf_all_helper(struct vty *vty, bool json);
+int pim_show_mroute_summary_helper(const char *vrf, struct vty *vty, bool json);
+int pim_show_mroute_summary_vrf_all_helper(struct vty *vty, bool json);
+
+void pim_show_interface_traffic_single(struct pim_instance *pim,
+ struct vty *vty, const char *ifname,
+ bool uj);
+void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
+ bool uj);
+int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
+ struct vty *vty, bool uj);
+void clear_pim_interfaces(struct pim_instance *pim);
+void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj);
+int pim_show_bsr_helper(const char *vrf, struct vty *vty, bool uj);
+/*
+ * Special Macro to allow us to get the correct pim_instance;
+ */
+#define PIM_DECLVAR_CONTEXT_VRF(vrfptr, pimptr) \
+ VTY_DECLVAR_CONTEXT_VRF(vrfptr); \
+ struct pim_instance *pimptr = vrfptr->info; \
+ MACRO_REQUIRE_SEMICOLON() /* end */
+
+#endif /* PIM_CMD_COMMON_H */
diff --git a/pimd/pim_errors.c b/pimd/pim_errors.c
new file mode 100644
index 0000000..1f98cec
--- /dev/null
+++ b/pimd/pim_errors.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM-specific error messages.
+ * Copyright (C) 2018 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+
+#include <zebra.h>
+
+#include "lib/ferr.h"
+#include "pim_errors.h"
+
+/* clang-format off */
+static struct log_ref ferr_pim_err[] = {
+ {
+ .code = EC_PIM_MSDP_PACKET,
+ .title = "PIM MSDP Packet Error",
+ .description = "PIM has received a packet from a peer that does not correctly decode",
+ .suggestion = "Check MSDP peer and ensure it is correctly working"
+ },
+ {
+ .code = EC_PIM_CONFIG,
+ .title = "PIM Configuration Error",
+ .description = "PIM has detected a configuration error",
+ .suggestion = "Ensure the configuration is correct and apply correct configuration"
+ },
+ {
+ .code = END_FERR,
+ }
+};
+/* clang-format on */
+
+void pim_error_init(void)
+{
+ log_ref_add(ferr_pim_err);
+}
diff --git a/pimd/pim_errors.h b/pimd/pim_errors.h
new file mode 100644
index 0000000..7de35f0
--- /dev/null
+++ b/pimd/pim_errors.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM-specific error messages.
+ * Copyright (C) 2018 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+
+#ifndef __PIM_ERRORS_H__
+#define __PIM_ERRORS_H__
+
+#include "lib/ferr.h"
+
+enum pim_log_refs {
+ EC_PIM_MSDP_PACKET = PIM_FERR_START,
+ EC_PIM_CONFIG,
+};
+
+extern void pim_error_init(void);
+
+#endif
diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c
new file mode 100644
index 0000000..978607d
--- /dev/null
+++ b/pimd/pim_hello.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "if.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_pim.h"
+#include "pim_str.h"
+#include "pim_tlv.h"
+#include "pim_util.h"
+#include "pim_hello.h"
+#include "pim_iface.h"
+#include "pim_neighbor.h"
+#include "pim_upstream.h"
+#include "pim_bsm.h"
+
+static void on_trace(const char *label, struct interface *ifp, pim_addr src)
+{
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: from %pPAs on %s", label, &src, ifp->name);
+}
+
+static void tlv_trace_bool(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr, int isset,
+ int value)
+{
+ if (isset)
+ zlog_debug(
+ "%s: PIM hello option from %pPAs on interface %s: %s=%d",
+ label, &src_addr, ifname, tlv_name, value);
+}
+
+static void tlv_trace_uint16(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr, int isset,
+ uint16_t value)
+{
+ if (isset)
+ zlog_debug(
+ "%s: PIM hello option from %pPAs on interface %s: %s=%u",
+ label, &src_addr, ifname, tlv_name, value);
+}
+
+static void tlv_trace_uint32(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr, int isset,
+ uint32_t value)
+{
+ if (isset)
+ zlog_debug(
+ "%s: PIM hello option from %pPAs on interface %s: %s=%u",
+ label, &src_addr, ifname, tlv_name, value);
+}
+
+static void tlv_trace_uint32_hex(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr,
+ int isset, uint32_t value)
+{
+ if (isset)
+ zlog_debug(
+ "%s: PIM hello option from %pPAs on interface %s: %s=%08x",
+ label, &src_addr, ifname, tlv_name, value);
+}
+
+static void tlv_trace_list(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr, int isset,
+ struct list *addr_list)
+{
+ if (isset)
+ zlog_debug(
+ "%s: PIM hello option from %pPAs on interface %s: %s size=%d list=%p",
+ label, &src_addr, ifname, tlv_name,
+ addr_list ? ((int)listcount(addr_list)) : -1,
+ (void *)addr_list);
+}
+
+#define FREE_ADDR_LIST \
+ if (hello_option_addr_list) { \
+ list_delete(&hello_option_addr_list); \
+ }
+
+#define FREE_ADDR_LIST_THEN_RETURN(code) \
+ { \
+ FREE_ADDR_LIST \
+ return (code); \
+ }
+
+int pim_hello_recv(struct interface *ifp, pim_addr src_addr, uint8_t *tlv_buf,
+ int tlv_buf_size)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+ uint8_t *tlv_curr;
+ uint8_t *tlv_pastend;
+ pim_hello_options hello_options =
+ 0; /* bit array recording options found */
+ uint16_t hello_option_holdtime = 0;
+ uint16_t hello_option_propagation_delay = 0;
+ uint16_t hello_option_override_interval = 0;
+ uint32_t hello_option_dr_priority = 0;
+ uint32_t hello_option_generation_id = 0;
+ struct list *hello_option_addr_list = 0;
+
+ if (PIM_DEBUG_PIM_HELLO)
+ on_trace(__func__, ifp, src_addr);
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+ ++pim_ifp->pim_ifstat_hello_recv;
+
+ /*
+ Parse PIM hello TLVs
+ */
+ assert(tlv_buf_size >= 0);
+ tlv_curr = tlv_buf;
+ tlv_pastend = tlv_buf + tlv_buf_size;
+
+ while (tlv_curr < tlv_pastend) {
+ uint16_t option_type;
+ uint16_t option_len;
+ int remain = tlv_pastend - tlv_curr;
+
+ if (remain < PIM_TLV_MIN_SIZE) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: short PIM hello TLV size=%d < min=%d from %pPAs on interface %s",
+ __func__, remain, PIM_TLV_MIN_SIZE,
+ &src_addr, ifp->name);
+ FREE_ADDR_LIST_THEN_RETURN(-1);
+ }
+
+ option_type = PIM_TLV_GET_TYPE(tlv_curr);
+ tlv_curr += PIM_TLV_TYPE_SIZE;
+ option_len = PIM_TLV_GET_LENGTH(tlv_curr);
+ tlv_curr += PIM_TLV_LENGTH_SIZE;
+
+ if ((tlv_curr + option_len) > tlv_pastend) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: long PIM hello TLV type=%d length=%d > left=%td from %pPAs on interface %s",
+ __func__, option_type, option_len,
+ tlv_pastend - tlv_curr, &src_addr,
+ ifp->name);
+ FREE_ADDR_LIST_THEN_RETURN(-2);
+ }
+
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: parse left_size=%d: PIM hello TLV type=%d length=%d from %pPAs on %s",
+ __func__, remain, option_type, option_len,
+ &src_addr, ifp->name);
+
+ switch (option_type) {
+ case PIM_MSG_OPTION_TYPE_HOLDTIME:
+ if (pim_tlv_parse_holdtime(ifp->name, src_addr,
+ &hello_options,
+ &hello_option_holdtime,
+ option_len, tlv_curr)) {
+ FREE_ADDR_LIST_THEN_RETURN(-3);
+ }
+ break;
+ case PIM_MSG_OPTION_TYPE_LAN_PRUNE_DELAY:
+ if (pim_tlv_parse_lan_prune_delay(
+ ifp->name, src_addr, &hello_options,
+ &hello_option_propagation_delay,
+ &hello_option_override_interval, option_len,
+ tlv_curr)) {
+ FREE_ADDR_LIST_THEN_RETURN(-4);
+ }
+ break;
+ case PIM_MSG_OPTION_TYPE_DR_PRIORITY:
+ if (pim_tlv_parse_dr_priority(ifp->name, src_addr,
+ &hello_options,
+ &hello_option_dr_priority,
+ option_len, tlv_curr)) {
+ FREE_ADDR_LIST_THEN_RETURN(-5);
+ }
+ break;
+ case PIM_MSG_OPTION_TYPE_GENERATION_ID:
+ if (pim_tlv_parse_generation_id(
+ ifp->name, src_addr, &hello_options,
+ &hello_option_generation_id, option_len,
+ tlv_curr)) {
+ FREE_ADDR_LIST_THEN_RETURN(-6);
+ }
+ break;
+ case PIM_MSG_OPTION_TYPE_ADDRESS_LIST:
+ if (pim_tlv_parse_addr_list(ifp->name, src_addr,
+ &hello_options,
+ &hello_option_addr_list,
+ option_len, tlv_curr)) {
+ return -7;
+ }
+ break;
+ case PIM_MSG_OPTION_TYPE_DM_STATE_REFRESH:
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: ignoring PIM hello dense-mode state refresh TLV option type=%d length=%d from %pPAs on interface %s",
+ __func__, option_type, option_len,
+ &src_addr, ifp->name);
+ break;
+ default:
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: ignoring unknown PIM hello TLV type=%d length=%d from %pPAs on interface %s",
+ __func__, option_type, option_len,
+ &src_addr, ifp->name);
+ }
+
+ tlv_curr += option_len;
+ }
+
+ /*
+ Check received PIM hello options
+ */
+
+ if (PIM_DEBUG_PIM_HELLO) {
+ tlv_trace_uint16(__func__, "holdtime", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_HOLDTIME),
+ hello_option_holdtime);
+ tlv_trace_uint16(
+ __func__, "propagation_delay", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY),
+ hello_option_propagation_delay);
+ tlv_trace_uint16(
+ __func__, "override_interval", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY),
+ hello_option_override_interval);
+ tlv_trace_bool(
+ __func__, "can_disable_join_suppression", ifp->name,
+ src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY),
+ PIM_OPTION_IS_SET(
+ hello_options,
+ PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION));
+ tlv_trace_uint32(__func__, "dr_priority", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY),
+ hello_option_dr_priority);
+ tlv_trace_uint32_hex(
+ __func__, "generation_id", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_GENERATION_ID),
+ hello_option_generation_id);
+ tlv_trace_list(__func__, "address_list", ifp->name, src_addr,
+ PIM_OPTION_IS_SET(hello_options,
+ PIM_OPTION_MASK_ADDRESS_LIST),
+ hello_option_addr_list);
+ }
+
+ if (!PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_HOLDTIME)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: PIM hello missing holdtime from %pPAs on interface %s",
+ __func__, &src_addr, ifp->name);
+ }
+
+ /*
+ New neighbor?
+ */
+
+ neigh = pim_neighbor_find(ifp, src_addr, false);
+ if (!neigh) {
+ /* Add as new neighbor */
+
+ neigh = pim_neighbor_add(
+ ifp, src_addr, hello_options, hello_option_holdtime,
+ hello_option_propagation_delay,
+ hello_option_override_interval,
+ hello_option_dr_priority, hello_option_generation_id,
+ hello_option_addr_list, PIM_NEIGHBOR_SEND_DELAY);
+ if (!neigh) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_warn(
+ "%s: failure creating PIM neighbor %pPAs on interface %s",
+ __func__, &src_addr, ifp->name);
+ FREE_ADDR_LIST_THEN_RETURN(-8);
+ }
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: forwarding bsm to new nbr failed",
+ __func__);
+ }
+
+ /* actual addr list has been saved under neighbor */
+ return 0;
+ }
+
+ /*
+ Received generation ID ?
+ */
+
+ if (PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_GENERATION_ID)) {
+ /* GenID mismatch ? */
+ if (!PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_GENERATION_ID)
+ || (hello_option_generation_id != neigh->generation_id)) {
+ /* GenID mismatch, then replace neighbor */
+
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: GenId mismatch new=%08x old=%08x: replacing neighbor %pPAs on %s",
+ __func__, hello_option_generation_id,
+ neigh->generation_id, &src_addr,
+ ifp->name);
+
+ pim_upstream_rpf_genid_changed(pim_ifp->pim,
+ neigh->source_addr);
+
+ pim_neighbor_delete(ifp, neigh, "GenID mismatch");
+ neigh = pim_neighbor_add(ifp, src_addr, hello_options,
+ hello_option_holdtime,
+ hello_option_propagation_delay,
+ hello_option_override_interval,
+ hello_option_dr_priority,
+ hello_option_generation_id,
+ hello_option_addr_list,
+ PIM_NEIGHBOR_SEND_NOW);
+ if (!neigh) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: failure re-creating PIM neighbor %pPAs on interface %s",
+ __func__, &src_addr, ifp->name);
+ FREE_ADDR_LIST_THEN_RETURN(-9);
+ }
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: forwarding bsm to new nbr failed",
+ __func__);
+ }
+ /* actual addr list is saved under neighbor */
+ return 0;
+
+ } /* GenId mismatch: replace neighbor */
+
+ } /* GenId received */
+
+ /*
+ Update existing neighbor
+ */
+
+ pim_neighbor_update(neigh, hello_options, hello_option_holdtime,
+ hello_option_dr_priority, hello_option_addr_list);
+ /* actual addr list is saved under neighbor */
+ return 0;
+}
+
+int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf,
+ int tlv_buf_size, uint16_t holdtime,
+ uint32_t dr_priority, uint32_t generation_id,
+ uint16_t propagation_delay, uint16_t override_interval,
+ int can_disable_join_suppression)
+{
+ uint8_t *curr = tlv_buf;
+ uint8_t *pastend = tlv_buf + tlv_buf_size;
+ uint8_t *tmp;
+#if PIM_IPV == 4
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+#endif
+
+ /*
+ * Append options
+ */
+
+ /* Holdtime */
+ curr = pim_tlv_append_uint16(curr, pastend,
+ PIM_MSG_OPTION_TYPE_HOLDTIME, holdtime);
+ if (!curr) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not set PIM hello Holdtime option for interface %s",
+ __func__, ifp->name);
+ }
+ return -1;
+ }
+
+ /* LAN Prune Delay */
+ tmp = pim_tlv_append_2uint16(curr, pastend,
+ PIM_MSG_OPTION_TYPE_LAN_PRUNE_DELAY,
+ propagation_delay, override_interval);
+ if (!tmp) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not set PIM LAN Prune Delay option for interface %s",
+ __func__, ifp->name);
+ }
+ return -1;
+ }
+ if (can_disable_join_suppression) {
+ *(curr + 4) |= 0x80; /* enable T bit */
+ }
+ curr = tmp;
+
+ /* DR Priority */
+ curr = pim_tlv_append_uint32(
+ curr, pastend, PIM_MSG_OPTION_TYPE_DR_PRIORITY, dr_priority);
+ if (!curr) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not set PIM hello DR Priority option for interface %s",
+ __func__, ifp->name);
+ }
+ return -2;
+ }
+
+ /* Generation ID */
+ curr = pim_tlv_append_uint32(curr, pastend,
+ PIM_MSG_OPTION_TYPE_GENERATION_ID,
+ generation_id);
+ if (!curr) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not set PIM hello Generation ID option for interface %s",
+ __func__, ifp->name);
+ }
+ return -3;
+ }
+
+ /* Secondary Address List */
+ if (ifp->connected->count) {
+ curr = pim_tlv_append_addrlist_ucast(curr, pastend, ifp,
+ PIM_AF);
+ if (!curr) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not set PIM hello %s Secondary Address List option for interface %s",
+ __func__, PIM_AF_NAME, ifp->name);
+ }
+ return -4;
+ }
+#if PIM_IPV == 4
+ if (pim->send_v6_secondary) {
+ curr = pim_tlv_append_addrlist_ucast(curr, pastend, ifp,
+ AF_INET6);
+ if (!curr) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not sent PIM hello v6 secondary Address List option for interface %s",
+ __func__, ifp->name);
+ }
+ return -4;
+ }
+ }
+#endif
+ }
+
+ return curr - tlv_buf;
+}
+
+/*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Thus, if a router needs to send a Join/Prune or Assert message on an
+ interface on which it has not yet sent a Hello message with the
+ currently configured IP address, then it MUST immediately send the
+ relevant Hello message without waiting for the Hello Timer to
+ expire, followed by the Join/Prune or Assert message.
+*/
+void pim_hello_require(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+
+ pim_ifp = ifp->info;
+
+ assert(pim_ifp);
+
+ if (PIM_IF_FLAG_TEST_HELLO_SENT(pim_ifp->flags))
+ return;
+
+ pim_hello_restart_now(ifp); /* Send hello and restart timer */
+}
diff --git a/pimd/pim_hello.h b/pimd/pim_hello.h
new file mode 100644
index 0000000..0e57c8f
--- /dev/null
+++ b/pimd/pim_hello.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_HELLO_H
+#define PIM_HELLO_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+int pim_hello_recv(struct interface *ifp, pim_addr src_addr, uint8_t *tlv_buf,
+ int tlv_buf_size);
+
+int pim_hello_build_tlv(struct interface *ifp, uint8_t *tlv_buf,
+ int tlv_buf_size, uint16_t holdtime,
+ uint32_t dr_priority, uint32_t generation_id,
+ uint16_t propagation_delay, uint16_t override_interval,
+ int can_disable_join_suppression);
+
+void pim_hello_require(struct interface *ifp);
+
+#endif /* PIM_HELLO_H */
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
new file mode 100644
index 0000000..5fa4715
--- /dev/null
+++ b/pimd/pim_iface.c
@@ -0,0 +1,1834 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+#include "log.h"
+#include "vty.h"
+#include "memory.h"
+#include "prefix.h"
+#include "vrf.h"
+#include "linklist.h"
+#include "plist.h"
+#include "hash.h"
+#include "ferr.h"
+#include "network.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_zebra.h"
+#include "pim_iface.h"
+#include "pim_igmp.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_str.h"
+#include "pim_pim.h"
+#include "pim_neighbor.h"
+#include "pim_ifchannel.h"
+#include "pim_sock.h"
+#include "pim_time.h"
+#include "pim_ssmpingd.h"
+#include "pim_rp.h"
+#include "pim_nht.h"
+#include "pim_jp_agg.h"
+#include "pim_igmp_join.h"
+#include "pim_vxlan.h"
+
+#include "pim6_mld.h"
+
+static void pim_if_gm_join_del_all(struct interface *ifp);
+
+static int gm_join_sock(const char *ifname, ifindex_t ifindex,
+ pim_addr group_addr, pim_addr source_addr,
+ struct pim_interface *pim_ifp);
+
+void pim_if_init(struct pim_instance *pim)
+{
+ int i;
+
+ for (i = 0; i < MAXVIFS; i++)
+ pim->iface_vif_index[i] = 0;
+}
+
+void pim_if_terminate(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_if_delete(ifp);
+ }
+ return;
+}
+
+static void pim_sec_addr_free(struct pim_secondary_addr *sec_addr)
+{
+ XFREE(MTYPE_PIM_SEC_ADDR, sec_addr);
+}
+
+__attribute__((unused))
+static int pim_sec_addr_comp(const void *p1, const void *p2)
+{
+ const struct pim_secondary_addr *sec1 = p1;
+ const struct pim_secondary_addr *sec2 = p2;
+
+ if (sec1->addr.family == AF_INET && sec2->addr.family == AF_INET6)
+ return -1;
+
+ if (sec1->addr.family == AF_INET6 && sec2->addr.family == AF_INET)
+ return 1;
+
+ if (sec1->addr.family == AF_INET) {
+ if (ntohl(sec1->addr.u.prefix4.s_addr)
+ < ntohl(sec2->addr.u.prefix4.s_addr))
+ return -1;
+
+ if (ntohl(sec1->addr.u.prefix4.s_addr)
+ > ntohl(sec2->addr.u.prefix4.s_addr))
+ return 1;
+ } else {
+ return memcmp(&sec1->addr.u.prefix6, &sec2->addr.u.prefix6,
+ sizeof(struct in6_addr));
+ }
+
+ return 0;
+}
+
+struct pim_interface *pim_if_new(struct interface *ifp, bool gm, bool pim,
+ bool ispimreg, bool is_vxlan_term)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+ assert(!ifp->info);
+
+ pim_ifp = XCALLOC(MTYPE_PIM_INTERFACE, sizeof(*pim_ifp));
+
+ pim_ifp->pim = ifp->vrf->info;
+ pim_ifp->mroute_vif_index = -1;
+
+ pim_ifp->igmp_version = IGMP_DEFAULT_VERSION;
+ pim_ifp->mld_version = MLD_DEFAULT_VERSION;
+ pim_ifp->gm_default_robustness_variable =
+ GM_DEFAULT_ROBUSTNESS_VARIABLE;
+ pim_ifp->gm_default_query_interval = GM_GENERAL_QUERY_INTERVAL;
+ pim_ifp->gm_query_max_response_time_dsec =
+ GM_QUERY_MAX_RESPONSE_TIME_DSEC;
+ pim_ifp->gm_specific_query_max_response_time_dsec =
+ GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;
+ pim_ifp->gm_last_member_query_count = GM_DEFAULT_ROBUSTNESS_VARIABLE;
+
+ /* BSM config on interface: true by default */
+ pim_ifp->bsm_enable = true;
+ pim_ifp->ucast_bsm_accept = true;
+ pim_ifp->am_i_dr = false;
+
+ /*
+ RFC 3376: 8.3. Query Response Interval
+ The number of seconds represented by the [Query Response Interval]
+ must be less than the [Query Interval].
+ */
+ assert(pim_ifp->gm_query_max_response_time_dsec <
+ pim_ifp->gm_default_query_interval);
+
+ pim_ifp->pim_enable = pim;
+ pim_ifp->pim_passive_enable = false;
+ pim_ifp->gm_enable = gm;
+
+ pim_ifp->gm_join_list = NULL;
+ pim_ifp->pim_neighbor_list = NULL;
+ pim_ifp->upstream_switch_list = NULL;
+ pim_ifp->pim_generation_id = 0;
+
+ /* list of struct gm_sock */
+ pim_igmp_if_init(pim_ifp, ifp);
+
+ /* list of struct pim_neighbor */
+ pim_ifp->pim_neighbor_list = list_new();
+ pim_ifp->pim_neighbor_list->del = (void (*)(void *))pim_neighbor_free;
+
+ pim_ifp->upstream_switch_list = list_new();
+ pim_ifp->upstream_switch_list->del =
+ (void (*)(void *))pim_jp_agg_group_list_free;
+ pim_ifp->upstream_switch_list->cmp = pim_jp_agg_group_list_cmp;
+
+ pim_ifp->sec_addr_list = list_new();
+ pim_ifp->sec_addr_list->del = (void (*)(void *))pim_sec_addr_free;
+ pim_ifp->sec_addr_list->cmp =
+ (int (*)(void *, void *))pim_sec_addr_comp;
+
+ pim_ifp->activeactive = false;
+
+ RB_INIT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb);
+
+ ifp->info = pim_ifp;
+
+ pim_sock_reset(ifp);
+
+ pim_if_add_vif(ifp, ispimreg, is_vxlan_term);
+ pim_ifp->pim->mcast_if_count++;
+
+ return pim_ifp;
+}
+
+void pim_if_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ pim_ifp->pim->mcast_if_count--;
+ if (pim_ifp->gm_join_list) {
+ pim_if_gm_join_del_all(ifp);
+ }
+
+ pim_ifchannel_delete_all(ifp);
+#if PIM_IPV == 4
+ igmp_sock_delete_all(ifp);
+#endif
+ if (pim_ifp->pim_sock_fd >= 0)
+ pim_sock_delete(ifp, "Interface removed from configuration");
+
+ pim_if_del_vif(ifp);
+
+ pim_igmp_if_fini(pim_ifp);
+
+ list_delete(&pim_ifp->pim_neighbor_list);
+ list_delete(&pim_ifp->upstream_switch_list);
+ list_delete(&pim_ifp->sec_addr_list);
+
+ if (pim_ifp->bfd_config.profile)
+ XFREE(MTYPE_TMP, pim_ifp->bfd_config.profile);
+
+ XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
+ XFREE(MTYPE_PIM_INTERFACE, pim_ifp);
+
+ ifp->info = NULL;
+}
+
+void pim_if_update_could_assert(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_ifchannel_update_could_assert(ch);
+ }
+}
+
+static void pim_if_update_my_assert_metric(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_ifchannel_update_my_assert_metric(ch);
+ }
+}
+
+static void pim_addr_change(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ pim_if_dr_election(ifp); /* router's own DR Priority (addr) changes --
+ Done TODO T30 */
+ pim_if_update_join_desired(pim_ifp); /* depends on DR */
+ pim_if_update_could_assert(ifp); /* depends on DR */
+ pim_if_update_my_assert_metric(ifp); /* depends on could_assert */
+ pim_if_update_assert_tracking_desired(
+ ifp); /* depends on DR, join_desired */
+
+ /*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ 1) Before an interface goes down or changes primary IP address, a
+ Hello message with a zero HoldTime should be sent immediately
+ (with the old IP address if the IP address changed).
+ -- Done at the caller of the function as new ip already updated here
+
+ 2) After an interface has changed its IP address, it MUST send a
+ Hello message with its new IP address.
+ -- DONE below
+
+ 3) If an interface changes one of its secondary IP addresses, a
+ Hello message with an updated Address_List option and a non-zero
+ HoldTime should be sent immediately.
+ -- FIXME See TODO T31
+ */
+ PIM_IF_FLAG_UNSET_HELLO_SENT(pim_ifp->flags);
+ if (pim_ifp->pim_sock_fd < 0)
+ return;
+ pim_hello_restart_now(ifp); /* send hello and restart timer */
+}
+
+static int detect_primary_address_change(struct interface *ifp,
+ int force_prim_as_any,
+ const char *caller)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_addr new_prim_addr;
+ int changed;
+
+ if (force_prim_as_any)
+ new_prim_addr = PIMADDR_ANY;
+ else
+ new_prim_addr = pim_find_primary_addr(ifp);
+
+ changed = pim_addr_cmp(new_prim_addr, pim_ifp->primary_address);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: old=%pPA new=%pPA on interface %s: %s",
+ __func__, &pim_ifp->primary_address, &new_prim_addr,
+ ifp->name, changed ? "changed" : "unchanged");
+
+ if (changed) {
+ /* Before updating pim_ifp send Hello time with 0 hold time */
+ if (pim_ifp->pim_enable) {
+ pim_hello_send(ifp, 0 /* zero-sec holdtime */);
+ }
+ pim_ifp->primary_address = new_prim_addr;
+ }
+
+ return changed;
+}
+
+static struct pim_secondary_addr *
+pim_sec_addr_find(struct pim_interface *pim_ifp, struct prefix *addr)
+{
+ struct pim_secondary_addr *sec_addr;
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
+ if (prefix_cmp(&sec_addr->addr, addr) == 0) {
+ return sec_addr;
+ }
+ }
+
+ return NULL;
+}
+
+static void pim_sec_addr_del(struct pim_interface *pim_ifp,
+ struct pim_secondary_addr *sec_addr)
+{
+ listnode_delete(pim_ifp->sec_addr_list, sec_addr);
+ pim_sec_addr_free(sec_addr);
+}
+
+static int pim_sec_addr_add(struct pim_interface *pim_ifp, struct prefix *addr)
+{
+ int changed = 0;
+ struct pim_secondary_addr *sec_addr;
+
+ sec_addr = pim_sec_addr_find(pim_ifp, addr);
+ if (sec_addr) {
+ sec_addr->flags &= ~PIM_SEC_ADDRF_STALE;
+ return changed;
+ }
+
+ sec_addr = XCALLOC(MTYPE_PIM_SEC_ADDR, sizeof(*sec_addr));
+
+ changed = 1;
+ sec_addr->addr = *addr;
+ listnode_add_sort(pim_ifp->sec_addr_list, sec_addr);
+
+ return changed;
+}
+
+static int pim_sec_addr_del_all(struct pim_interface *pim_ifp)
+{
+ int changed = 0;
+
+ if (!list_isempty(pim_ifp->sec_addr_list)) {
+ changed = 1;
+ /* remove all nodes and free up the list itself */
+ list_delete_all_node(pim_ifp->sec_addr_list);
+ }
+
+ return changed;
+}
+
+static int pim_sec_addr_update(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct connected *ifc;
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct pim_secondary_addr *sec_addr;
+ int changed = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
+ sec_addr->flags |= PIM_SEC_ADDRF_STALE;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
+ pim_addr addr = pim_addr_from_prefix(ifc->address);
+
+ if (pim_addr_is_any(addr))
+ continue;
+
+ if (!pim_addr_cmp(addr, pim_ifp->primary_address)) {
+ /* don't add the primary address into the secondary
+ * address list */
+ continue;
+ }
+
+ if (pim_sec_addr_add(pim_ifp, ifc->address)) {
+ changed = 1;
+ }
+ }
+
+ /* Drop stale entries */
+ for (ALL_LIST_ELEMENTS(pim_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (sec_addr->flags & PIM_SEC_ADDRF_STALE) {
+ pim_sec_addr_del(pim_ifp, sec_addr);
+ changed = 1;
+ }
+ }
+
+ return changed;
+}
+
+static int detect_secondary_address_change(struct interface *ifp,
+ int force_prim_as_any,
+ const char *caller)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ int changed = 0;
+
+ if (force_prim_as_any) {
+ /* if primary address is being forced to zero just flush the
+ * secondary address list */
+ changed = pim_sec_addr_del_all(pim_ifp);
+ } else {
+ /* re-evaluate the secondary address list */
+ changed = pim_sec_addr_update(ifp);
+ }
+
+ return changed;
+}
+
+static void detect_address_change(struct interface *ifp, int force_prim_as_any,
+ const char *caller)
+{
+ int changed = 0;
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+
+ if (detect_primary_address_change(ifp, force_prim_as_any, caller)) {
+ changed = 1;
+ }
+
+ if (detect_secondary_address_change(ifp, force_prim_as_any, caller)) {
+ changed = 1;
+ }
+
+
+ if (changed) {
+ if (!pim_ifp->pim_enable) {
+ return;
+ }
+
+ pim_addr_change(ifp);
+ }
+
+ /* XXX: if we have unnumbered interfaces we need to run detect address
+ * address change on all of them when the lo address changes */
+}
+
+int pim_update_source_set(struct interface *ifp, pim_addr source)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ return PIM_IFACE_NOT_FOUND;
+ }
+
+ if (!pim_addr_cmp(pim_ifp->update_source, source)) {
+ return PIM_UPDATE_SOURCE_DUP;
+ }
+
+ pim_ifp->update_source = source;
+ detect_address_change(ifp, 0 /* force_prim_as_any */, __func__);
+
+ return PIM_SUCCESS;
+}
+
+void pim_if_addr_add(struct connected *ifc)
+{
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+ bool vxlan_term;
+
+ assert(ifc);
+
+ ifp = ifc->ifp;
+ assert(ifp);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+
+ if (!if_is_operative(ifp))
+ return;
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: %s ifindex=%d connected IP address %pFX %s",
+ __func__, ifp->name, ifp->ifindex, ifc->address,
+ CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)
+ ? "secondary"
+ : "primary");
+#if PIM_IPV != 4
+ if (IN6_IS_ADDR_LINKLOCAL(&ifc->address->u.prefix6) ||
+ IN6_IS_ADDR_LOOPBACK(&ifc->address->u.prefix6)) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&pim_ifp->ll_lowest))
+ pim_ifp->ll_lowest = ifc->address->u.prefix6;
+ else if (IPV6_ADDR_CMP(&ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest) < 0)
+ pim_ifp->ll_lowest = ifc->address->u.prefix6;
+
+ if (IPV6_ADDR_CMP(&ifc->address->u.prefix6,
+ &pim_ifp->ll_highest) > 0)
+ pim_ifp->ll_highest = ifc->address->u.prefix6;
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s: new link-local %pI6, lowest now %pI6, highest %pI6",
+ ifc->ifp->name, &ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest, &pim_ifp->ll_highest);
+ }
+#endif
+
+ detect_address_change(ifp, 0, __func__);
+
+ // if (ifc->address->family != AF_INET)
+ // return;
+
+#if PIM_IPV == 4
+ struct in_addr ifaddr = ifc->address->u.prefix4;
+
+ if (pim_ifp->gm_enable) {
+ struct gm_sock *igmp;
+
+ /* lookup IGMP socket */
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
+ ifaddr);
+ if (!igmp) {
+ /* if addr new, add IGMP socket */
+ if (ifc->address->family == AF_INET)
+ pim_igmp_sock_add(pim_ifp->gm_socket_list,
+ ifaddr, ifp, false);
+ } else if (igmp->mtrace_only) {
+ igmp_sock_delete(igmp);
+ pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp,
+ false);
+ }
+
+ /* Replay Static IGMP groups */
+ if (pim_ifp->gm_join_list) {
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct gm_join *ij;
+ int join_fd;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node,
+ nextnode, ij)) {
+ /* Close socket and reopen with Source and Group
+ */
+ close(ij->sock_fd);
+ join_fd = gm_join_sock(
+ ifp->name, ifp->ifindex, ij->group_addr,
+ ij->source_addr, pim_ifp);
+ if (join_fd < 0) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<grp?>", ij->group_addr,
+ group_str,
+ sizeof(group_str));
+ pim_inet4_dump(
+ "<src?>", ij->source_addr,
+ source_str, sizeof(source_str));
+ zlog_warn(
+ "%s: gm_join_sock() failure for IGMP group %s source %s on interface %s",
+ __func__, group_str, source_str,
+ ifp->name);
+ /* warning only */
+ } else
+ ij->sock_fd = join_fd;
+ }
+ }
+ } /* igmp */
+ else {
+ struct gm_sock *igmp;
+
+ /* lookup IGMP socket */
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
+ ifaddr);
+ if (ifc->address->family == AF_INET) {
+ if (igmp)
+ igmp_sock_delete(igmp);
+ /* if addr new, add IGMP socket */
+ pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp,
+ true);
+ }
+ } /* igmp mtrace only */
+#endif
+
+ if (pim_ifp->pim_enable) {
+
+ if (!pim_addr_is_any(pim_ifp->primary_address)) {
+
+ /* Interface has a valid socket ? */
+ if (pim_ifp->pim_sock_fd < 0) {
+ if (pim_sock_add(ifp)) {
+ zlog_warn(
+ "Failure creating PIM socket for interface %s",
+ ifp->name);
+ }
+ }
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_rpf rpf;
+ struct zclient *zclient = NULL;
+
+ zclient = pim_zebra_zclient_get();
+ /* RP config might come prior to (local RP's interface)
+ IF UP event.
+ In this case, pnc would not have pim enabled
+ nexthops.
+ Once Interface is UP and pim info is available,
+ reregister
+ with RNH address to receive update and add the
+ interface as nexthop. */
+ memset(&rpf, 0, sizeof(struct pim_rpf));
+ rpf.rpf_addr = pim_addr_from_prefix(ifc->address);
+ pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf);
+ if (pnc)
+ pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient,
+ pnc,
+ ZEBRA_NEXTHOP_REGISTER);
+ }
+ } /* pim */
+
+ /*
+ PIM or IGMP is enabled on interface, and there is at least one
+ address assigned, then try to create a vif_index.
+ */
+ if (pim_ifp->mroute_vif_index < 0) {
+ vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);
+ pim_if_add_vif(ifp, false, vxlan_term);
+ }
+ gm_ifp_update(ifp);
+ pim_ifchannel_scan_forward_start(ifp);
+}
+
+static void pim_if_addr_del_igmp(struct connected *ifc)
+{
+#if PIM_IPV == 4
+ struct pim_interface *pim_ifp = ifc->ifp->info;
+ struct gm_sock *igmp;
+ struct in_addr ifaddr;
+
+ if (ifc->address->family != AF_INET) {
+ /* non-IPv4 address */
+ return;
+ }
+
+ if (!pim_ifp) {
+ /* IGMP not enabled on interface */
+ return;
+ }
+
+ ifaddr = ifc->address->u.prefix4;
+
+ /* lookup IGMP socket */
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr);
+ if (igmp) {
+ /* if addr found, del IGMP socket */
+ igmp_sock_delete(igmp);
+ }
+#endif
+}
+
+static void pim_if_addr_del_pim(struct connected *ifc)
+{
+ struct pim_interface *pim_ifp = ifc->ifp->info;
+
+ if (ifc->address->family != PIM_AF) {
+ /* non-IPv4 address */
+ return;
+ }
+
+ if (!pim_ifp) {
+ /* PIM not enabled on interface */
+ return;
+ }
+
+ if (!pim_addr_is_any(pim_ifp->primary_address)) {
+ /* Interface keeps a valid primary address */
+ return;
+ }
+
+ if (pim_ifp->pim_sock_fd < 0) {
+ /* Interface does not hold a valid socket any longer */
+ return;
+ }
+
+ /*
+ pim_sock_delete() closes the socket, stops read and timer threads,
+ and kills all neighbors.
+ */
+ pim_sock_delete(ifc->ifp,
+ "last address has been removed from interface");
+}
+
+void pim_if_addr_del(struct connected *ifc, int force_prim_as_any)
+{
+ struct interface *ifp;
+
+ assert(ifc);
+ ifp = ifc->ifp;
+ assert(ifp);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: %s ifindex=%d disconnected IP address %pFX %s",
+ __func__, ifp->name, ifp->ifindex, ifc->address,
+ CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)
+ ? "secondary"
+ : "primary");
+
+#if PIM_IPV == 6
+ struct pim_interface *pim_ifp = ifc->ifp->info;
+
+ if (pim_ifp &&
+ (!IPV6_ADDR_CMP(&ifc->address->u.prefix6, &pim_ifp->ll_lowest) ||
+ !IPV6_ADDR_CMP(&ifc->address->u.prefix6, &pim_ifp->ll_highest))) {
+ struct listnode *cnode;
+ struct connected *cc;
+
+ memset(&pim_ifp->ll_lowest, 0xff, sizeof(pim_ifp->ll_lowest));
+ memset(&pim_ifp->ll_highest, 0, sizeof(pim_ifp->ll_highest));
+
+ for (ALL_LIST_ELEMENTS_RO(ifc->ifp->connected, cnode, cc)) {
+ if (!IN6_IS_ADDR_LINKLOCAL(&cc->address->u.prefix6) &&
+ !IN6_IS_ADDR_LOOPBACK(&cc->address->u.prefix6))
+ continue;
+
+ if (IPV6_ADDR_CMP(&cc->address->u.prefix6,
+ &pim_ifp->ll_lowest) < 0)
+ pim_ifp->ll_lowest = cc->address->u.prefix6;
+ if (IPV6_ADDR_CMP(&cc->address->u.prefix6,
+ &pim_ifp->ll_highest) > 0)
+ pim_ifp->ll_highest = cc->address->u.prefix6;
+ }
+
+ if (pim_ifp->ll_lowest.s6_addr[0] == 0xff)
+ memset(&pim_ifp->ll_lowest, 0,
+ sizeof(pim_ifp->ll_lowest));
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s: removed link-local %pI6, lowest now %pI6, highest %pI6",
+ ifc->ifp->name, &ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest, &pim_ifp->ll_highest);
+
+ gm_ifp_update(ifp);
+ }
+#endif
+
+ detect_address_change(ifp, force_prim_as_any, __func__);
+
+ pim_if_addr_del_igmp(ifc);
+ pim_if_addr_del_pim(ifc);
+}
+
+void pim_if_addr_add_all(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+ struct listnode *nextnode;
+ int v4_addrs = 0;
+ int v6_addrs = 0;
+ struct pim_interface *pim_ifp = ifp->info;
+ bool vxlan_term;
+
+
+ /* PIM/IGMP enabled ? */
+ if (!pim_ifp)
+ return;
+
+ for (ALL_LIST_ELEMENTS(ifp->connected, node, nextnode, ifc)) {
+ struct prefix *p = ifc->address;
+
+ if (p->family != AF_INET)
+ v6_addrs++;
+ else
+ v4_addrs++;
+ pim_if_addr_add(ifc);
+ }
+
+ if (!v4_addrs && v6_addrs && !if_is_loopback(ifp) &&
+ pim_ifp->pim_enable && !pim_addr_is_any(pim_ifp->primary_address) &&
+ pim_ifp->pim_sock_fd < 0 && pim_sock_add(ifp)) {
+ /* Interface has a valid primary address ? */
+ /* Interface has a valid socket ? */
+ zlog_warn("Failure creating PIM socket for interface %s",
+ ifp->name);
+ }
+ /*
+ * PIM or IGMP/MLD is enabled on interface, and there is at least one
+ * address assigned, then try to create a vif_index.
+ */
+ if (pim_ifp->mroute_vif_index < 0) {
+ vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);
+ pim_if_add_vif(ifp, false, vxlan_term);
+ }
+ gm_ifp_update(ifp);
+ pim_ifchannel_scan_forward_start(ifp);
+
+ pim_rp_setup(pim_ifp->pim);
+ pim_rp_check_on_if_add(pim_ifp);
+}
+
+void pim_if_addr_del_all(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct pim_instance *pim;
+
+ pim = ifp->vrf->info;
+ if (!pim)
+ return;
+
+ /* PIM/IGMP enabled ? */
+ if (!ifp->info)
+ return;
+
+ for (ALL_LIST_ELEMENTS(ifp->connected, node, nextnode, ifc)) {
+ struct prefix *p = ifc->address;
+
+ if (p->family != PIM_AF)
+ continue;
+
+ pim_if_addr_del(ifc, 1 /* force_prim_as_any=true */);
+ }
+
+ pim_rp_setup(pim);
+ pim_i_am_rp_re_evaluate(pim);
+}
+
+void pim_if_addr_del_all_igmp(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+ struct listnode *nextnode;
+
+ /* PIM/IGMP enabled ? */
+ if (!ifp->info)
+ return;
+
+ for (ALL_LIST_ELEMENTS(ifp->connected, node, nextnode, ifc)) {
+ struct prefix *p = ifc->address;
+
+ if (p->family != AF_INET)
+ continue;
+
+ pim_if_addr_del_igmp(ifc);
+ }
+}
+
+pim_addr pim_find_primary_addr(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp && !pim_addr_is_any(pim_ifp->update_source))
+ return pim_ifp->update_source;
+
+#if PIM_IPV == 6
+ if (pim_ifp && !pim_addr_is_any(pim_ifp->ll_highest))
+ return pim_ifp->ll_highest;
+
+ pim_addr best_addr = PIMADDR_ANY;
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
+ pim_addr addr;
+
+ if (ifc->address->family != AF_INET6)
+ continue;
+
+ addr = pim_addr_from_prefix(ifc->address);
+ if (!IN6_IS_ADDR_LINKLOCAL(&addr))
+ continue;
+ if (pim_addr_cmp(addr, best_addr) > 0)
+ best_addr = addr;
+ }
+
+ return best_addr;
+#else
+ int v4_addrs = 0;
+ int v6_addrs = 0;
+ struct connected *promote_ifc = NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
+ switch (ifc->address->family) {
+ case AF_INET:
+ v4_addrs++;
+ break;
+ case AF_INET6:
+ v6_addrs++;
+ break;
+ default:
+ continue;
+ }
+
+ if (ifc->address->family != PIM_AF)
+ continue;
+
+ if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)) {
+ promote_ifc = ifc;
+ continue;
+ }
+
+ return pim_addr_from_prefix(ifc->address);
+ }
+
+
+ /* Promote the new primary address. */
+ if (v4_addrs && promote_ifc) {
+ UNSET_FLAG(promote_ifc->flags, ZEBRA_IFA_SECONDARY);
+ return pim_addr_from_prefix(promote_ifc->address);
+ }
+
+ /*
+ * If we have no v4_addrs and v6 is configured
+ * We probably are using unnumbered
+ * So let's grab the loopbacks v4 address
+ * and use that as the primary address
+ */
+ if (!v4_addrs && v6_addrs) {
+ struct interface *lo_ifp;
+
+ // DBS - Come back and check here
+ if (ifp->vrf->vrf_id == VRF_DEFAULT)
+ lo_ifp = if_lookup_by_name("lo", ifp->vrf->vrf_id);
+ else
+ lo_ifp = if_lookup_by_name(ifp->vrf->name,
+ ifp->vrf->vrf_id);
+
+ if (lo_ifp && (lo_ifp != ifp))
+ return pim_find_primary_addr(lo_ifp);
+ }
+ return PIMADDR_ANY;
+#endif
+}
+
+static int pim_iface_next_vif_index(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+ int i;
+
+ /*
+ * The pimreg vif is always going to be in index 0
+ * of the table.
+ */
+ if (ifp->ifindex == PIM_OIF_PIM_REGISTER_VIF)
+ return 0;
+
+ for (i = 1; i < MAXVIFS; i++) {
+ if (pim->iface_vif_index[i] == 0)
+ return i;
+ }
+ return MAXVIFS;
+}
+
+/*
+ pim_if_add_vif() uses ifindex as vif_index
+
+ see also pim_if_find_vifindex_by_ifindex()
+ */
+int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_addr ifaddr;
+ unsigned char flags = 0;
+
+ assert(pim_ifp);
+
+ if (pim_ifp->mroute_vif_index > 0) {
+ zlog_warn("%s: vif_index=%d > 0 on interface %s ifindex=%d",
+ __func__, pim_ifp->mroute_vif_index, ifp->name,
+ ifp->ifindex);
+ return -1;
+ }
+
+ if (ifp->ifindex < 0) {
+ zlog_warn("%s: ifindex=%d < 0 on interface %s", __func__,
+ ifp->ifindex, ifp->name);
+ return -2;
+ } else if ((ifp->ifindex == PIM_OIF_PIM_REGISTER_VIF) &&
+ ((strncmp(ifp->name, "pimreg", 6)) &&
+ (strncmp(ifp->name, "pim6reg", 7)))) {
+ zlog_warn("%s: ifindex=%d on interface %s", __func__,
+ ifp->ifindex, ifp->name);
+ return -2;
+ }
+
+ ifaddr = pim_ifp->primary_address;
+#if PIM_IPV != 6
+ /* IPv6 API is always by interface index */
+ if (!ispimreg && !is_vxlan_term && pim_addr_is_any(ifaddr)) {
+ zlog_warn(
+ "%s: could not get address for interface %s ifindex=%d",
+ __func__, ifp->name, ifp->ifindex);
+ return -4;
+ }
+#endif
+
+ pim_ifp->mroute_vif_index = pim_iface_next_vif_index(ifp);
+
+ if (pim_ifp->mroute_vif_index >= MAXVIFS) {
+ zlog_warn(
+ "%s: Attempting to configure more than MAXVIFS=%d on pim enabled interface %s",
+ __func__, MAXVIFS, ifp->name);
+ return -3;
+ }
+
+ if (ifp->ifindex == PIM_OIF_PIM_REGISTER_VIF)
+ flags = VIFF_REGISTER;
+#ifdef VIFF_USE_IFINDEX
+ else
+ flags = VIFF_USE_IFINDEX;
+#endif
+
+ if (pim_mroute_add_vif(ifp, ifaddr, flags)) {
+ /* pim_mroute_add_vif reported error */
+ return -5;
+ }
+
+ pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 1;
+
+ if (!ispimreg)
+ gm_ifp_update(ifp);
+
+ /* if the device qualifies as pim_vxlan iif/oif update vxlan entries */
+ pim_vxlan_add_vif(ifp);
+ return 0;
+}
+
+int pim_if_del_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp->mroute_vif_index < 1) {
+ zlog_warn("%s: vif_index=%d < 1 on interface %s ifindex=%d",
+ __func__, pim_ifp->mroute_vif_index, ifp->name,
+ ifp->ifindex);
+ return -1;
+ }
+
+ /* if the device was a pim_vxlan iif/oif update vxlan mroute entries */
+ pim_vxlan_del_vif(ifp);
+
+ gm_ifp_teardown(ifp);
+
+ pim_mroute_del_vif(ifp);
+
+ /*
+ Update vif_index
+ */
+ pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 0;
+
+ pim_ifp->mroute_vif_index = -1;
+ return 0;
+}
+
+// DBS - VRF Revist
+struct interface *pim_if_find_by_vif_index(struct pim_instance *pim,
+ ifindex_t vif_index)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ if (ifp->info) {
+ struct pim_interface *pim_ifp;
+ pim_ifp = ifp->info;
+
+ if (vif_index == pim_ifp->mroute_vif_index)
+ return ifp;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ pim_if_add_vif() uses ifindex as vif_index
+ */
+int pim_if_find_vifindex_by_ifindex(struct pim_instance *pim, ifindex_t ifindex)
+{
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+
+ ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ return -1;
+ pim_ifp = ifp->info;
+
+ return pim_ifp->mroute_vif_index;
+}
+
+int pim_if_lan_delay_enabled(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+ assert(pim_ifp->pim_number_of_nonlandelay_neighbors >= 0);
+
+ return pim_ifp->pim_number_of_nonlandelay_neighbors == 0;
+}
+
+uint16_t pim_if_effective_propagation_delay_msec(struct interface *ifp)
+{
+ if (pim_if_lan_delay_enabled(ifp)) {
+ struct pim_interface *pim_ifp;
+ pim_ifp = ifp->info;
+ return pim_ifp->pim_neighbors_highest_propagation_delay_msec;
+ } else {
+ return PIM_DEFAULT_PROPAGATION_DELAY_MSEC;
+ }
+}
+
+uint16_t pim_if_effective_override_interval_msec(struct interface *ifp)
+{
+ if (pim_if_lan_delay_enabled(ifp)) {
+ struct pim_interface *pim_ifp;
+ pim_ifp = ifp->info;
+ return pim_ifp->pim_neighbors_highest_override_interval_msec;
+ } else {
+ return PIM_DEFAULT_OVERRIDE_INTERVAL_MSEC;
+ }
+}
+
+int pim_if_t_override_msec(struct interface *ifp)
+{
+ int effective_override_interval_msec;
+ int t_override_msec;
+
+ effective_override_interval_msec =
+ pim_if_effective_override_interval_msec(ifp);
+
+ t_override_msec =
+ frr_weak_random() % (effective_override_interval_msec + 1);
+
+ return t_override_msec;
+}
+
+uint16_t pim_if_jp_override_interval_msec(struct interface *ifp)
+{
+ return pim_if_effective_propagation_delay_msec(ifp)
+ + pim_if_effective_override_interval_msec(ifp);
+}
+
+/*
+ RFC 4601: 4.1.6. State Summarization Macros
+
+ The function NBR( I, A ) uses information gathered through PIM Hello
+ messages to map the IP address A of a directly connected PIM
+ neighbor router on interface I to the primary IP address of the same
+ router (Section 4.3.4). The primary IP address of a neighbor is the
+ address that it uses as the source of its PIM Hello messages.
+*/
+struct pim_neighbor *pim_if_find_neighbor(struct interface *ifp, pim_addr addr)
+{
+ struct listnode *neighnode;
+ struct pim_neighbor *neigh;
+ struct pim_interface *pim_ifp;
+ struct prefix p;
+
+ assert(ifp);
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ ifp->name);
+ return 0;
+ }
+
+ pim_addr_to_prefix(&p, addr);
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode,
+ neigh)) {
+
+ /* primary address ? */
+ if (!pim_addr_cmp(neigh->source_addr, addr))
+ return neigh;
+
+ /* secondary address ? */
+ if (pim_neighbor_find_secondary(neigh, &p))
+ return neigh;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: neighbor not found for address %pPA on interface %s",
+ __func__, &addr, ifp->name);
+
+ return NULL;
+}
+
+long pim_if_t_suppressed_msec(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ long t_suppressed_msec;
+ uint32_t ramount = 0;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ /* join suppression disabled ? */
+ if (pim_ifp->pim_can_disable_join_suppression)
+ return 0;
+
+ /* t_suppressed = t_periodic * rand(1.1, 1.4) */
+ ramount = 1100 + (frr_weak_random() % (1400 - 1100 + 1));
+ t_suppressed_msec = router->t_periodic * ramount;
+
+ return t_suppressed_msec;
+}
+
+static void gm_join_free(struct gm_join *ij)
+{
+ XFREE(MTYPE_PIM_IGMP_JOIN, ij);
+}
+
+static struct gm_join *gm_join_find(struct list *join_list, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct listnode *node;
+ struct gm_join *ij;
+
+ assert(join_list);
+
+ for (ALL_LIST_ELEMENTS_RO(join_list, node, ij)) {
+ if ((!pim_addr_cmp(group_addr, ij->group_addr)) &&
+ (!pim_addr_cmp(source_addr, ij->source_addr)))
+ return ij;
+ }
+
+ return 0;
+}
+
+static int gm_join_sock(const char *ifname, ifindex_t ifindex,
+ pim_addr group_addr, pim_addr source_addr,
+ struct pim_interface *pim_ifp)
+{
+ int join_fd;
+
+ pim_ifp->igmp_ifstat_joins_sent++;
+
+ join_fd = pim_socket_raw(IPPROTO_GM);
+ if (join_fd < 0) {
+ pim_ifp->igmp_ifstat_joins_failed++;
+ return -1;
+ }
+
+ if (pim_gm_join_source(join_fd, ifindex, group_addr, source_addr)) {
+ zlog_warn(
+ "%s: setsockopt(fd=%d) failure for " GM
+ " group %pPAs source %pPAs ifindex %d on interface %s: errno=%d: %s",
+ __func__, join_fd, &group_addr, &source_addr, ifindex,
+ ifname, errno, safe_strerror(errno));
+
+ pim_ifp->igmp_ifstat_joins_failed++;
+
+ close(join_fd);
+ return -2;
+ }
+
+ return join_fd;
+}
+
+static struct gm_join *gm_join_new(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct gm_join *ij;
+ int join_fd;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ join_fd = gm_join_sock(ifp->name, ifp->ifindex, group_addr, source_addr,
+ pim_ifp);
+ if (join_fd < 0) {
+ zlog_warn("%s: gm_join_sock() failure for " GM
+ " group %pPAs source %pPAs on interface %s",
+ __func__, &group_addr, &source_addr, ifp->name);
+ return 0;
+ }
+
+ ij = XCALLOC(MTYPE_PIM_IGMP_JOIN, sizeof(*ij));
+
+ ij->sock_fd = join_fd;
+ ij->group_addr = group_addr;
+ ij->source_addr = source_addr;
+ ij->sock_creation = pim_time_monotonic_sec();
+
+ listnode_add(pim_ifp->gm_join_list, ij);
+
+ return ij;
+}
+
+ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct gm_join *ij;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ return ferr_cfg_invalid("multicast not enabled on interface %s",
+ ifp->name);
+ }
+
+ if (!pim_ifp->gm_join_list) {
+ pim_ifp->gm_join_list = list_new();
+ pim_ifp->gm_join_list->del = (void (*)(void *))gm_join_free;
+ }
+
+ ij = gm_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
+
+ /* This interface has already been configured to join this IGMP/MLD
+ * group
+ */
+ if (ij) {
+ return ferr_ok();
+ }
+
+ (void)gm_join_new(ifp, group_addr, source_addr);
+
+ if (PIM_DEBUG_GM_EVENTS) {
+ zlog_debug(
+ "%s: issued static " GM
+ " join for channel (S,G)=(%pPA,%pPA) on interface %s",
+ __func__, &source_addr, &group_addr, ifp->name);
+ }
+
+ return ferr_ok();
+}
+
+int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct gm_join *ij;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ ifp->name);
+ return -1;
+ }
+
+ if (!pim_ifp->gm_join_list) {
+ zlog_warn("%s: no " GM " join on interface %s", __func__,
+ ifp->name);
+ return -2;
+ }
+
+ ij = gm_join_find(pim_ifp->gm_join_list, group_addr, source_addr);
+ if (!ij) {
+ zlog_warn("%s: could not find " GM
+ " group %pPAs source %pPAs on interface %s",
+ __func__, &group_addr, &source_addr, ifp->name);
+ return -3;
+ }
+
+ if (close(ij->sock_fd)) {
+ zlog_warn(
+ "%s: failure closing sock_fd=%d for " GM
+ " group %pPAs source %pPAs on interface %s: errno=%d: %s",
+ __func__, ij->sock_fd, &group_addr, &source_addr,
+ ifp->name, errno, safe_strerror(errno));
+ /* warning only */
+ }
+ listnode_delete(pim_ifp->gm_join_list, ij);
+ gm_join_free(ij);
+ if (listcount(pim_ifp->gm_join_list) < 1) {
+ list_delete(&pim_ifp->gm_join_list);
+ pim_ifp->gm_join_list = 0;
+ }
+
+ return 0;
+}
+
+__attribute__((unused))
+static void pim_if_gm_join_del_all(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct gm_join *ij;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ ifp->name);
+ return;
+ }
+
+ if (!pim_ifp->gm_join_list)
+ return;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij))
+ pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr);
+}
+
+/*
+ RFC 4601
+
+ Transitions from "I am Assert Loser" State
+
+ Current Winner's GenID Changes or NLT Expires
+
+ The Neighbor Liveness Timer associated with the current winner
+ expires or we receive a Hello message from the current winner
+ reporting a different GenID from the one it previously reported.
+ This indicates that the current winner's interface or router has
+ gone down (and may have come back up), and so we must assume it no
+ longer knows it was the winner.
+ */
+void pim_if_assert_on_neighbor_down(struct interface *ifp, pim_addr neigh_addr)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ /* Is (S,G,I) assert loser ? */
+ if (ch->ifassert_state != PIM_IFASSERT_I_AM_LOSER)
+ continue;
+ /* Dead neighbor was winner ? */
+ if (pim_addr_cmp(ch->ifassert_winner, neigh_addr))
+ continue;
+
+ assert_action_a5(ch);
+ }
+}
+
+void pim_if_update_join_desired(struct pim_interface *pim_ifp)
+{
+ struct pim_ifchannel *ch;
+
+ /* clear off flag from interface's upstreams */
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED_UPDATED(
+ ch->upstream->flags);
+ }
+
+ /* scan per-interface (S,G,I) state on this I interface */
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ struct pim_upstream *up = ch->upstream;
+
+ if (PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED_UPDATED(up->flags))
+ continue;
+
+ /* update join_desired for the global (S,G) state */
+ pim_upstream_update_join_desired(pim_ifp->pim, up);
+ PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED_UPDATED(up->flags);
+ }
+}
+
+void pim_if_update_assert_tracking_desired(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ pim_ifchannel_update_assert_tracking_desired(ch);
+ }
+}
+
+/*
+ * PIM wants to have an interface pointer for everything it does.
+ * The pimreg is a special interface that we have that is not
+ * quite an interface but a VIF is created for it.
+ */
+void pim_if_create_pimreg(struct pim_instance *pim)
+{
+ char pimreg_name[INTERFACE_NAMSIZ];
+
+ if (!pim->regiface) {
+ if (pim->vrf->vrf_id == VRF_DEFAULT)
+ strlcpy(pimreg_name, PIMREG, sizeof(pimreg_name));
+ else
+ snprintf(pimreg_name, sizeof(pimreg_name), PIMREG "%u",
+ pim->vrf->data.l.table_id);
+
+ pim->regiface = if_get_by_name(pimreg_name, pim->vrf->vrf_id,
+ pim->vrf->name);
+ pim->regiface->ifindex = PIM_OIF_PIM_REGISTER_VIF;
+
+ /*
+ * The pimreg interface might has been removed from
+ * kerenl with the VRF's deletion. It must be
+ * recreated, so delete the old one first.
+ */
+ if (pim->regiface->info)
+ pim_if_delete(pim->regiface);
+
+ pim_if_new(pim->regiface, false, false, true,
+ false /*vxlan_term*/);
+
+ /*
+ * On vrf moves we delete the interface if there
+ * is nothing going on with it. We cannot have
+ * the pimregiface deleted.
+ */
+ pim->regiface->configured = true;
+
+ }
+}
+
+struct prefix *pim_if_connected_to_source(struct interface *ifp, pim_addr src)
+{
+ struct listnode *cnode;
+ struct connected *c;
+ struct prefix p;
+
+ if (!ifp)
+ return NULL;
+
+ pim_addr_to_prefix(&p, src);
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, c)) {
+ if (c->address->family != PIM_AF)
+ continue;
+ if (prefix_match(c->address, &p))
+ return c->address;
+ if (CONNECTED_PEER(c) && prefix_match(c->destination, &p))
+ /* this is not a typo, on PtP we need to return the
+ * *local* address that lines up with src.
+ */
+ return c->address;
+ }
+
+ return NULL;
+}
+
+bool pim_if_is_vrf_device(struct interface *ifp)
+{
+ if (if_is_vrf(ifp))
+ return true;
+
+ return false;
+}
+
+int pim_if_ifchannel_count(struct pim_interface *pim_ifp)
+{
+ struct pim_ifchannel *ch;
+ int count = 0;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ count++;
+ }
+
+ return count;
+}
+
+static int pim_ifp_create(struct interface *ifp)
+{
+ struct pim_instance *pim;
+
+ pim = ifp->vrf->info;
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug(
+ "%s: %s index %d vrf %s(%u) flags %ld metric %d mtu %d operative %d",
+ __func__, ifp->name, ifp->ifindex, ifp->vrf->name,
+ ifp->vrf->vrf_id, (long)ifp->flags, ifp->metric,
+ ifp->mtu, if_is_operative(ifp));
+ }
+
+ if (if_is_operative(ifp)) {
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ /*
+ * If we have a pim_ifp already and this is an if_add
+ * that means that we probably have a vrf move event
+ * If that is the case, set the proper vrfness.
+ */
+ if (pim_ifp)
+ pim_ifp->pim = pim;
+ pim_if_addr_add_all(ifp);
+
+ /*
+ * Due to ordering issues based upon when
+ * a command is entered we should ensure that
+ * the pim reg is created for this vrf if we
+ * have configuration for it already.
+ *
+ * this is a no-op if it's already been done.
+ */
+ pim_if_create_pimreg(pim);
+ }
+
+#if PIM_IPV == 4
+ /*
+ * If we are a vrf device that is up, open up the pim_socket for
+ * listening
+ * to incoming pim messages irrelevant if the user has configured us
+ * for pim or not.
+ */
+ if (pim_if_is_vrf_device(ifp)) {
+ struct pim_interface *pim_ifp;
+
+ if (!ifp->info) {
+ pim_ifp = pim_if_new(ifp, false, false, false,
+ false /*vxlan_term*/);
+ ifp->info = pim_ifp;
+ }
+
+ pim_sock_add(ifp);
+ }
+
+ if (!strncmp(ifp->name, PIM_VXLAN_TERM_DEV_NAME,
+ sizeof(PIM_VXLAN_TERM_DEV_NAME))) {
+ if (pim->mcast_if_count < MAXVIFS)
+ pim_vxlan_add_term_dev(pim, ifp);
+ else
+ zlog_warn(
+ "%s: Cannot enable pim on %s. MAXVIFS(%d) reached. Deleting and readding the vxlan termimation device after unconfiguring pim from other interfaces may succeed.",
+ __func__, ifp->name, MAXVIFS);
+ }
+#endif
+
+ return 0;
+}
+
+static int pim_ifp_up(struct interface *ifp)
+{
+ uint32_t table_id;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug(
+ "%s: %s index %d vrf %s(%u) flags %ld metric %d mtu %d operative %d",
+ __func__, ifp->name, ifp->ifindex, ifp->vrf->name,
+ ifp->vrf->vrf_id, (long)ifp->flags, ifp->metric,
+ ifp->mtu, if_is_operative(ifp));
+ }
+
+ pim = ifp->vrf->info;
+
+ pim_ifp = ifp->info;
+ /*
+ * If we have a pim_ifp already and this is an if_add
+ * that means that we probably have a vrf move event
+ * If that is the case, set the proper vrfness.
+ */
+ if (pim_ifp)
+ pim_ifp->pim = pim;
+
+ /*
+ pim_if_addr_add_all() suffices for bringing up both IGMP and
+ PIM
+ */
+ pim_if_addr_add_all(ifp);
+
+ /*
+ * If we have a pimreg device callback and it's for a specific
+ * table set the master appropriately
+ */
+ if (sscanf(ifp->name, "" PIMREG "%" SCNu32, &table_id) == 1) {
+ struct vrf *vrf;
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if ((table_id == vrf->data.l.table_id)
+ && (ifp->vrf->vrf_id != vrf->vrf_id)) {
+ struct interface *master = if_lookup_by_name(
+ vrf->name, vrf->vrf_id);
+
+ if (!master) {
+ zlog_debug(
+ "%s: Unable to find Master interface for %s",
+ __func__, vrf->name);
+ return 0;
+ }
+ pim_zebra_interface_set_master(master, ifp);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pim_ifp_down(struct interface *ifp)
+{
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug(
+ "%s: %s index %d vrf %s(%u) flags %ld metric %d mtu %d operative %d",
+ __func__, ifp->name, ifp->ifindex, ifp->vrf->name,
+ ifp->vrf->vrf_id, (long)ifp->flags, ifp->metric,
+ ifp->mtu, if_is_operative(ifp));
+ }
+
+ if (!if_is_operative(ifp)) {
+ pim_ifchannel_delete_all(ifp);
+ /*
+ pim_if_addr_del_all() suffices for shutting down IGMP,
+ but not for shutting down PIM
+ */
+ pim_if_addr_del_all(ifp);
+
+ /*
+ pim_sock_delete() closes the socket, stops read and timer
+ threads,
+ and kills all neighbors.
+ */
+ if (ifp->info) {
+ pim_sock_delete(ifp, "link down");
+ }
+ }
+
+ if (ifp->info) {
+ pim_if_del_vif(ifp);
+ pim_ifstat_reset(ifp);
+ }
+
+ return 0;
+}
+
+static int pim_ifp_destroy(struct interface *ifp)
+{
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug(
+ "%s: %s index %d vrf %s(%u) flags %ld metric %d mtu %d operative %d",
+ __func__, ifp->name, ifp->ifindex, ifp->vrf->name,
+ ifp->vrf->vrf_id, (long)ifp->flags, ifp->metric,
+ ifp->mtu, if_is_operative(ifp));
+ }
+
+ if (!if_is_operative(ifp))
+ pim_if_addr_del_all(ifp);
+
+#if PIM_IPV == 4
+ struct pim_instance *pim;
+
+ pim = ifp->vrf->info;
+ if (pim && pim->vxlan.term_if == ifp)
+ pim_vxlan_del_term_dev(pim);
+#endif
+
+ return 0;
+}
+
+static int pim_if_new_hook(struct interface *ifp)
+{
+ return 0;
+}
+
+static int pim_if_delete_hook(struct interface *ifp)
+{
+ if (ifp->info)
+ pim_if_delete(ifp);
+
+ return 0;
+}
+
+void pim_iface_init(void)
+{
+ hook_register_prio(if_add, 0, pim_if_new_hook);
+ hook_register_prio(if_del, 0, pim_if_delete_hook);
+
+ if_zapi_callbacks(pim_ifp_create, pim_ifp_up, pim_ifp_down,
+ pim_ifp_destroy);
+}
+
+static void pim_if_membership_clear(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_enable && pim_ifp->gm_enable)
+ return;
+
+ pim_ifchannel_membership_clear(ifp);
+}
+
+void pim_pim_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->pim_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+ /*
+ * pim_sock_delete() removes all neighbors from
+ * pim_ifp->pim_neighbor_list.
+ */
+ pim_sock_delete(ifp, "pim unconfigured on interface");
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+
+ if (!pim_ifp->gm_enable) {
+ pim_if_addr_del_all(ifp);
+ pim_if_delete(ifp);
+ }
+}
+
+void pim_gm_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->gm_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+#if PIM_IPV == 4
+ igmp_sock_delete_all(ifp);
+#else
+ gm_ifp_teardown(ifp);
+#endif
+
+ if (!pim_ifp->pim_enable)
+ pim_if_delete(ifp);
+}
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
new file mode 100644
index 0000000..0312f71
--- /dev/null
+++ b/pimd/pim_iface.h
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_IFACE_H
+#define PIM_IFACE_H
+
+#include <zebra.h>
+
+#include "if.h"
+#include "vty.h"
+#include "vrf.h"
+#include "zclient.h"
+#include "ferr.h"
+
+#include "pim_igmp.h"
+#include "pim_upstream.h"
+#include "bfd.h"
+#include "pim_str.h"
+
+#define PIM_IF_IS_DELETED(ifp) ((ifp)->ifindex == IFINDEX_INTERNAL)
+
+#define PIM_I_am_DR(pim_ifp) \
+ !pim_addr_cmp((pim_ifp)->pim_dr_addr, (pim_ifp)->primary_address)
+#define PIM_I_am_DualActive(pim_ifp) (pim_ifp)->activeactive == true
+
+/* Macros for interface flags */
+
+/*
+ * PIM needs to know if hello is required to send before other PIM messages
+ * like Join, prune, assert would go out
+ */
+#define PIM_IF_FLAG_HELLO_SENT (1 << 0)
+
+#define PIM_IF_FLAG_TEST_HELLO_SENT(flags) ((flags)&PIM_IF_FLAG_HELLO_SENT)
+
+#define PIM_IF_FLAG_SET_HELLO_SENT(flags) ((flags) |= PIM_IF_FLAG_HELLO_SENT)
+
+#define PIM_IF_FLAG_UNSET_HELLO_SENT(flags) ((flags) &= ~PIM_IF_FLAG_HELLO_SENT)
+
+struct pim_iface_upstream_switch {
+ pim_addr address;
+ struct list *us;
+};
+
+enum pim_secondary_addr_flags {
+ PIM_SEC_ADDRF_NONE = 0,
+ PIM_SEC_ADDRF_STALE = (1 << 0)
+};
+
+struct pim_secondary_addr {
+ struct prefix addr;
+ enum pim_secondary_addr_flags flags;
+};
+
+struct gm_if;
+
+struct pim_interface {
+ bool pim_enable : 1;
+ bool pim_can_disable_join_suppression : 1;
+ bool pim_passive_enable : 1;
+
+ bool gm_enable : 1;
+
+ ifindex_t mroute_vif_index;
+ struct pim_instance *pim;
+
+#if PIM_IPV == 6
+ /* link-locals: MLD uses lowest addr, PIM uses highest... */
+ pim_addr ll_lowest;
+ pim_addr ll_highest;
+#endif
+
+ pim_addr primary_address; /* remember addr to detect change */
+ struct list *sec_addr_list; /* list of struct pim_secondary_addr */
+ pim_addr update_source; /* user can statically set the primary
+ * address of the interface */
+
+ int igmp_version; /* IGMP version */
+ int mld_version;
+ int gm_default_robustness_variable; /* IGMP or MLD QRV */
+ int gm_default_query_interval; /* IGMP or MLD secs between general
+ queries */
+ int gm_query_max_response_time_dsec; /* IGMP or MLD Max Response Time in
+ dsecs for general queries */
+ int gm_specific_query_max_response_time_dsec; /* IGMP or MLD Max
+ Response Time in dsecs
+ called as last member
+ query interval, defines
+ the maximum response
+ time advertised in IGMP
+ group-specific
+ queries */
+ int gm_last_member_query_count; /* IGMP or MLD last member
+ query count
+ */
+ struct list *gm_socket_list; /* list of struct IGMP or MLD sock */
+ struct list *gm_join_list; /* list of struct IGMP or MLD join */
+ struct list *gm_group_list; /* list of struct IGMP or MLD group */
+ struct hash *gm_group_hash;
+
+ struct gm_if *mld;
+
+ int pim_sock_fd; /* PIM socket file descriptor */
+ struct event *t_pim_sock_read; /* thread for reading PIM socket */
+ int64_t pim_sock_creation; /* timestamp of PIM socket creation */
+
+ struct event *t_pim_hello_timer;
+ int pim_hello_period;
+ int pim_default_holdtime;
+ int pim_triggered_hello_delay;
+ uint32_t pim_generation_id;
+ uint16_t pim_propagation_delay_msec; /* config */
+ uint16_t pim_override_interval_msec; /* config */
+ struct list *pim_neighbor_list; /* list of struct pim_neighbor */
+ struct list *upstream_switch_list;
+ struct pim_ifchannel_rb ifchannel_rb;
+
+ /* neighbors without lan_delay */
+ int pim_number_of_nonlandelay_neighbors;
+ uint16_t pim_neighbors_highest_propagation_delay_msec;
+ uint16_t pim_neighbors_highest_override_interval_msec;
+
+ /* DR Election */
+ int64_t pim_dr_election_last; /* timestamp */
+ int pim_dr_election_count;
+ int pim_dr_election_changes;
+ pim_addr pim_dr_addr;
+ uint32_t pim_dr_priority; /* config */
+ int pim_dr_num_nondrpri_neighbors; /* neighbors without dr_pri */
+
+ /* boundary prefix-list */
+ char *boundary_oil_plist;
+
+ /* Turn on Active-Active for this interface */
+ bool activeactive;
+ bool am_i_dr;
+
+ int64_t pim_ifstat_start; /* start timestamp for stats */
+ uint64_t pim_ifstat_bsm_rx;
+ uint64_t pim_ifstat_bsm_tx;
+ uint32_t pim_ifstat_hello_sent;
+ uint32_t pim_ifstat_hello_sendfail;
+ uint32_t pim_ifstat_hello_recv;
+ uint32_t pim_ifstat_hello_recvfail;
+ uint32_t pim_ifstat_join_recv;
+ uint32_t pim_ifstat_join_send;
+ uint32_t pim_ifstat_prune_recv;
+ uint32_t pim_ifstat_prune_send;
+ uint32_t pim_ifstat_reg_recv;
+ uint32_t pim_ifstat_reg_send;
+ uint32_t pim_ifstat_reg_stop_recv;
+ uint32_t pim_ifstat_reg_stop_send;
+ uint32_t pim_ifstat_assert_recv;
+ uint32_t pim_ifstat_assert_send;
+ uint32_t pim_ifstat_bsm_cfg_miss;
+ uint32_t pim_ifstat_ucast_bsm_cfg_miss;
+ uint32_t pim_ifstat_bsm_invalid_sz;
+ uint8_t flags;
+ bool bsm_enable; /* bsm processing enable */
+ bool ucast_bsm_accept; /* ucast bsm processing */
+
+ uint32_t igmp_ifstat_joins_sent;
+ uint32_t igmp_ifstat_joins_failed;
+ uint32_t igmp_peak_group_count;
+
+ struct {
+ bool enabled;
+ uint32_t min_rx;
+ uint32_t min_tx;
+ uint8_t detection_multiplier;
+ char *profile;
+ } bfd_config;
+};
+
+/*
+ * if default_holdtime is set (>= 0), use it;
+ * otherwise default_holdtime is 3.5 * hello_period
+ */
+#define PIM_IF_DEFAULT_HOLDTIME(pim_ifp) \
+ (((pim_ifp)->pim_default_holdtime < 0) \
+ ? ((pim_ifp)->pim_hello_period * 7 / 2) \
+ : ((pim_ifp)->pim_default_holdtime))
+
+void pim_if_init(struct pim_instance *pim);
+void pim_if_terminate(struct pim_instance *pim);
+
+struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
+ bool ispimreg, bool is_vxlan_term);
+void pim_if_delete(struct interface *ifp);
+void pim_if_addr_add(struct connected *ifc);
+void pim_if_addr_del(struct connected *ifc, int force_prim_as_any);
+void pim_if_addr_add_all(struct interface *ifp);
+void pim_if_addr_del_all(struct interface *ifp);
+void pim_if_addr_del_all_igmp(struct interface *ifp);
+
+int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term);
+int pim_if_del_vif(struct interface *ifp);
+void pim_if_add_vif_all(struct pim_instance *pim);
+void pim_if_del_vif_all(struct pim_instance *pim);
+
+struct interface *pim_if_find_by_vif_index(struct pim_instance *pim,
+ ifindex_t vif_index);
+int pim_if_find_vifindex_by_ifindex(struct pim_instance *pim,
+ ifindex_t ifindex);
+
+int pim_if_lan_delay_enabled(struct interface *ifp);
+uint16_t pim_if_effective_propagation_delay_msec(struct interface *ifp);
+uint16_t pim_if_effective_override_interval_msec(struct interface *ifp);
+uint16_t pim_if_jp_override_interval_msec(struct interface *ifp);
+struct pim_neighbor *pim_if_find_neighbor(struct interface *ifp, pim_addr addr);
+
+long pim_if_t_suppressed_msec(struct interface *ifp);
+int pim_if_t_override_msec(struct interface *ifp);
+
+pim_addr pim_find_primary_addr(struct interface *ifp);
+
+ferr_r pim_if_gm_join_add(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr);
+int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
+ pim_addr source_addr);
+
+void pim_if_update_could_assert(struct interface *ifp);
+
+void pim_if_assert_on_neighbor_down(struct interface *ifp, pim_addr neigh_addr);
+
+void pim_if_rpf_interface_changed(struct interface *old_rpf_ifp,
+ struct pim_upstream *up);
+
+void pim_if_update_join_desired(struct pim_interface *pim_ifp);
+
+void pim_if_update_assert_tracking_desired(struct interface *ifp);
+
+void pim_if_create_pimreg(struct pim_instance *pim);
+
+struct prefix *pim_if_connected_to_source(struct interface *ifp, pim_addr src);
+int pim_update_source_set(struct interface *ifp, pim_addr source);
+
+bool pim_if_is_vrf_device(struct interface *ifp);
+
+int pim_if_ifchannel_count(struct pim_interface *pim_ifp);
+
+void pim_iface_init(void);
+void pim_pim_interface_delete(struct interface *ifp);
+void pim_gm_interface_delete(struct interface *ifp);
+
+#endif /* PIM_IFACE_H */
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
new file mode 100644
index 0000000..da55189
--- /dev/null
+++ b/pimd/pim_ifchannel.c
@@ -0,0 +1,1494 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "linklist.h"
+#include "frrevent.h"
+#include "memory.h"
+#include "if.h"
+#include "vrf.h"
+#include "hash.h"
+#include "jhash.h"
+#include "prefix.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_ifchannel.h"
+#include "pim_zebra.h"
+#include "pim_time.h"
+#include "pim_msg.h"
+#include "pim_pim.h"
+#include "pim_join.h"
+#include "pim_rpf.h"
+#include "pim_macro.h"
+#include "pim_oil.h"
+#include "pim_upstream.h"
+#include "pim_ssm.h"
+#include "pim_rp.h"
+#include "pim_mlag.h"
+
+RB_GENERATE(pim_ifchannel_rb, pim_ifchannel, pim_ifp_rb, pim_ifchannel_compare);
+
+int pim_ifchannel_compare(const struct pim_ifchannel *ch1,
+ const struct pim_ifchannel *ch2)
+{
+ struct pim_interface *pim_ifp1;
+ struct pim_interface *pim_ifp2;
+
+ pim_ifp1 = ch1->interface->info;
+ pim_ifp2 = ch2->interface->info;
+
+ if (pim_ifp1->mroute_vif_index < pim_ifp2->mroute_vif_index)
+ return -1;
+
+ if (pim_ifp1->mroute_vif_index > pim_ifp2->mroute_vif_index)
+ return 1;
+
+ return pim_sgaddr_cmp(ch1->sg, ch2->sg);
+}
+
+/*
+ * A (*,G) or a (*,*) is going away
+ * remove the parent pointer from
+ * those pointing at us
+ */
+static void pim_ifchannel_remove_children(struct pim_ifchannel *ch)
+{
+ struct pim_ifchannel *child;
+
+ if (!ch->sources)
+ return;
+
+ while (!list_isempty(ch->sources)) {
+ child = listnode_head(ch->sources);
+ child->parent = NULL;
+ listnode_delete(ch->sources, child);
+ }
+}
+
+/*
+ * A (*,G) or a (*,*) is being created
+ * find all the children that would point
+ * at us.
+ */
+static void pim_ifchannel_find_new_children(struct pim_ifchannel *ch)
+{
+ struct pim_interface *pim_ifp = ch->interface->info;
+ struct pim_ifchannel *child;
+
+ // Basic Sanity that we are not being silly
+ if (!pim_addr_is_any(ch->sg.src) && !pim_addr_is_any(ch->sg.grp))
+ return;
+
+ if (pim_addr_is_any(ch->sg.src) && pim_addr_is_any(ch->sg.grp))
+ return;
+
+ RB_FOREACH (child, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ if (!pim_addr_is_any(ch->sg.grp) &&
+ !pim_addr_cmp(child->sg.grp, ch->sg.grp) && (child != ch)) {
+ child->parent = ch;
+ listnode_add_sort(ch->sources, child);
+ }
+ }
+}
+
+void pim_ifchannel_delete(struct pim_ifchannel *ch)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_upstream *up;
+
+ pim_ifp = ch->interface->info;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: ifchannel entry %s(%s) del start", __func__,
+ ch->sg_str, ch->interface->name);
+
+ if (PIM_I_am_DualActive(pim_ifp)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: if-chnanel-%s is deleted from a Dual active Interface",
+ __func__, ch->sg_str);
+ /* Post Delete only if it is the last Dual-active Interface */
+ if (ch->upstream->dualactive_ifchannel_count == 1) {
+ pim_mlag_up_local_del(pim_ifp->pim, ch->upstream);
+ PIM_UPSTREAM_FLAG_UNSET_MLAG_INTERFACE(
+ ch->upstream->flags);
+ }
+ ch->upstream->dualactive_ifchannel_count--;
+ }
+
+ if (ch->upstream->channel_oil) {
+ uint32_t mask = PIM_OIF_FLAG_PROTO_PIM;
+ if (ch->upstream->flags & PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+ mask |= PIM_OIF_FLAG_PROTO_GM;
+
+ /*
+ * A S,G RPT channel can have an empty oil, we also
+ * need to take into account the fact that a ifchannel
+ * might have been suppressing a *,G ifchannel from
+ * being inherited. So let's figure out what
+ * needs to be done here
+ */
+ if (!pim_addr_is_any(ch->sg.src) && ch->parent &&
+ pim_upstream_evaluate_join_desired_interface(
+ ch->upstream, ch, ch->parent))
+ pim_channel_add_oif(ch->upstream->channel_oil,
+ ch->interface,
+ PIM_OIF_FLAG_PROTO_STAR,
+ __func__);
+
+ pim_channel_del_oif(ch->upstream->channel_oil,
+ ch->interface, mask, __func__);
+ /*
+ * Do we have any S,G's that are inheriting?
+ * Nuke from on high too.
+ */
+ if (ch->upstream->sources) {
+ struct pim_upstream *child;
+ struct listnode *up_node;
+
+ for (ALL_LIST_ELEMENTS_RO(ch->upstream->sources,
+ up_node, child))
+ pim_channel_del_inherited_oif(
+ child->channel_oil,
+ ch->interface,
+ __func__);
+ }
+ }
+
+ /*
+ * When this channel is removed
+ * we need to find all our children
+ * and make sure our pointers are fixed
+ */
+ pim_ifchannel_remove_children(ch);
+
+ if (ch->sources)
+ list_delete(&ch->sources);
+
+ listnode_delete(ch->upstream->ifchannels, ch);
+
+ up = ch->upstream;
+
+ /* upstream is common across ifchannels, check if upstream's
+ ifchannel list is empty before deleting upstream_del
+ ref count will take care of it.
+ */
+ if (ch->upstream->ref_count > 0)
+ up = pim_upstream_del(pim_ifp->pim, ch->upstream, __func__);
+
+ else {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Avoiding deletion of upstream with ref_count %d from ifchannel(%s): %s",
+ __func__, ch->upstream->ref_count,
+ ch->interface->name, ch->sg_str);
+ }
+
+ ch->upstream = NULL;
+
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifassert_timer);
+
+ if (ch->parent) {
+ listnode_delete(ch->parent->sources, ch);
+ ch->parent = NULL;
+ }
+
+ RB_REMOVE(pim_ifchannel_rb, &pim_ifp->ifchannel_rb, ch);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: ifchannel entry %s(%s) is deleted ", __func__,
+ ch->sg_str, ch->interface->name);
+
+ XFREE(MTYPE_PIM_IFCHANNEL, ch);
+
+ if (up)
+ pim_upstream_update_join_desired(pim_ifp->pim, up);
+}
+
+void pim_ifchannel_delete_all(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+
+ while (!RB_EMPTY(pim_ifchannel_rb, &pim_ifp->ifchannel_rb)) {
+ ch = RB_ROOT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb);
+
+ pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_NOINFO);
+ pim_ifchannel_delete(ch);
+ }
+}
+
+void delete_on_noinfo(struct pim_ifchannel *ch)
+{
+ if (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO
+ && ch->ifjoin_state == PIM_IFJOIN_NOINFO
+ && ch->t_ifjoin_expiry_timer == NULL)
+ pim_ifchannel_delete(ch);
+}
+
+void pim_ifchannel_ifjoin_switch(const char *caller, struct pim_ifchannel *ch,
+ enum pim_ifjoin_state new_state)
+{
+ enum pim_ifjoin_state old_state = ch->ifjoin_state;
+ struct pim_interface *pim_ifp = ch->interface->info;
+ struct pim_ifchannel *child_ch;
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "PIM_IFCHANNEL(%s): %s is switching from %s to %s",
+ ch->interface->name, ch->sg_str,
+ pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags),
+ pim_ifchannel_ifjoin_name(new_state, 0));
+
+
+ if (old_state == new_state) {
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug(
+ "%s called by %s: non-transition on state %d (%s)",
+ __func__, caller, new_state,
+ pim_ifchannel_ifjoin_name(new_state, 0));
+ }
+ return;
+ }
+
+ ch->ifjoin_state = new_state;
+
+ if (pim_addr_is_any(ch->sg.src)) {
+ struct pim_upstream *up = ch->upstream;
+ struct pim_upstream *child;
+ struct listnode *up_node;
+
+ if (up) {
+ if (ch->ifjoin_state == PIM_IFJOIN_NOINFO) {
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node,
+ child)) {
+ struct channel_oil *c_oil =
+ child->channel_oil;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s %s: Prune(S,G)=%s from %s",
+ __FILE__, __func__,
+ child->sg_str,
+ up->sg_str);
+ if (!c_oil)
+ continue;
+
+ /*
+ * If the S,G has no if channel and the
+ * c_oil still
+ * has output here then the *,G was
+ * supplying the implied
+ * if channel. So remove it.
+ */
+ if (oil_if_has(c_oil,
+ pim_ifp->mroute_vif_index))
+ pim_channel_del_inherited_oif(
+ c_oil, ch->interface,
+ __func__);
+ }
+ }
+ if (ch->ifjoin_state == PIM_IFJOIN_JOIN) {
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node,
+ child)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s %s: Join(S,G)=%s from %s",
+ __FILE__, __func__,
+ child->sg_str,
+ up->sg_str);
+
+ /* check if the channel can be
+ * inherited into the SG's OIL
+ */
+ child_ch = pim_ifchannel_find(
+ ch->interface,
+ &child->sg);
+ if (pim_upstream_eval_inherit_if(
+ child, child_ch, ch)) {
+ pim_channel_add_oif(
+ child->channel_oil,
+ ch->interface,
+ PIM_OIF_FLAG_PROTO_STAR,
+ __func__);
+ pim_upstream_update_join_desired(
+ pim_ifp->pim, child);
+ }
+ }
+ }
+ }
+ }
+ /* Transition to/from NOINFO ? */
+ if ((old_state == PIM_IFJOIN_NOINFO)
+ || (new_state == PIM_IFJOIN_NOINFO)) {
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug("PIM_IFCHANNEL_%s: (S,G)=%s on interface %s",
+ ((new_state == PIM_IFJOIN_NOINFO) ? "DOWN"
+ : "UP"),
+ ch->sg_str, ch->interface->name);
+ }
+
+ /*
+ Record uptime of state transition to/from NOINFO
+ */
+ ch->ifjoin_creation = pim_time_monotonic_sec();
+
+ pim_upstream_update_join_desired(pim_ifp->pim, ch->upstream);
+ pim_ifchannel_update_could_assert(ch);
+ pim_ifchannel_update_assert_tracking_desired(ch);
+ }
+}
+
+const char *pim_ifchannel_ifjoin_name(enum pim_ifjoin_state ifjoin_state,
+ int flags)
+{
+ switch (ifjoin_state) {
+ case PIM_IFJOIN_NOINFO:
+ if (PIM_IF_FLAG_TEST_S_G_RPT(flags))
+ return "SGRpt(NI)";
+ else
+ return "NOINFO";
+ case PIM_IFJOIN_JOIN:
+ return "JOIN";
+ case PIM_IFJOIN_PRUNE:
+ if (PIM_IF_FLAG_TEST_S_G_RPT(flags))
+ return "SGRpt(P)";
+ else
+ return "PRUNE";
+ case PIM_IFJOIN_PRUNE_PENDING:
+ if (PIM_IF_FLAG_TEST_S_G_RPT(flags))
+ return "SGRpt(PP)";
+ else
+ return "PRUNEP";
+ case PIM_IFJOIN_PRUNE_TMP:
+ if (PIM_IF_FLAG_TEST_S_G_RPT(flags))
+ return "SGRpt(P')";
+ else
+ return "PRUNET";
+ case PIM_IFJOIN_PRUNE_PENDING_TMP:
+ if (PIM_IF_FLAG_TEST_S_G_RPT(flags))
+ return "SGRpt(PP')";
+ else
+ return "PRUNEPT";
+ }
+
+ return "ifjoin_bad_state";
+}
+
+const char *pim_ifchannel_ifassert_name(enum pim_ifassert_state ifassert_state)
+{
+ switch (ifassert_state) {
+ case PIM_IFASSERT_NOINFO:
+ return "NOINFO";
+ case PIM_IFASSERT_I_AM_WINNER:
+ return "WINNER";
+ case PIM_IFASSERT_I_AM_LOSER:
+ return "LOSER";
+ }
+
+ return "ifassert_bad_state";
+}
+
+/*
+ RFC 4601: 4.6.5. Assert State Macros
+
+ AssertWinner(S,G,I) defaults to NULL and AssertWinnerMetric(S,G,I)
+ defaults to Infinity when in the NoInfo state.
+*/
+void reset_ifassert_state(struct pim_ifchannel *ch)
+{
+ EVENT_OFF(ch->t_ifassert_timer);
+
+ pim_ifassert_winner_set(ch, PIM_IFASSERT_NOINFO, PIMADDR_ANY,
+ router->infinite_assert_metric);
+}
+
+struct pim_ifchannel *pim_ifchannel_find(struct interface *ifp, pim_sgaddr *sg)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct pim_ifchannel lookup;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ zlog_warn("%s: (S,G)=%pSG: multicast not enabled on interface %s",
+ __func__, sg, ifp->name);
+ return NULL;
+ }
+
+ lookup.sg = *sg;
+ lookup.interface = ifp;
+ ch = RB_FIND(pim_ifchannel_rb, &pim_ifp->ifchannel_rb, &lookup);
+
+ return ch;
+}
+
+static void ifmembership_set(struct pim_ifchannel *ch,
+ enum pim_ifmembership membership)
+{
+ struct pim_interface *pim_ifp = ch->interface->info;
+
+ if (ch->local_ifmembership == membership)
+ return;
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug("%s: (S,G)=%s membership now is %s on interface %s",
+ __func__, ch->sg_str,
+ membership == PIM_IFMEMBERSHIP_INCLUDE ? "INCLUDE"
+ : "NOINFO",
+ ch->interface->name);
+ }
+
+ ch->local_ifmembership = membership;
+
+ pim_upstream_update_join_desired(pim_ifp->pim, ch->upstream);
+ pim_ifchannel_update_could_assert(ch);
+ pim_ifchannel_update_assert_tracking_desired(ch);
+}
+
+
+void pim_ifchannel_membership_clear(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb)
+ ifmembership_set(ch, PIM_IFMEMBERSHIP_NOINFO);
+}
+
+void pim_ifchannel_delete_on_noinfo(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch, *ch_tmp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ RB_FOREACH_SAFE (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb, ch_tmp)
+ delete_on_noinfo(ch);
+}
+
+/*
+ * For a given Interface, if we are given a S,G
+ * Find the *,G (If we have it).
+ * If we are passed a *,G, find the *,* ifchannel
+ * if we have it.
+ */
+static struct pim_ifchannel *pim_ifchannel_find_parent(struct pim_ifchannel *ch)
+{
+ pim_sgaddr parent_sg = ch->sg;
+ struct pim_ifchannel *parent = NULL;
+
+ // (S,G)
+ if (!pim_addr_is_any(parent_sg.src) &&
+ !pim_addr_is_any(parent_sg.grp)) {
+ parent_sg.src = PIMADDR_ANY;
+ parent = pim_ifchannel_find(ch->interface, &parent_sg);
+
+ if (parent)
+ listnode_add(parent->sources, ch);
+ return parent;
+ }
+
+ return NULL;
+}
+
+struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp, pim_sgaddr *sg,
+ uint8_t source_flags, int up_flags)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+ struct pim_upstream *up;
+
+ ch = pim_ifchannel_find(ifp, sg);
+ if (ch) {
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+ PIM_IF_FLAG_SET_PROTO_PIM(ch->flags);
+
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+ PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags);
+
+ ch->upstream->flags |= up_flags;
+
+ return ch;
+ }
+
+ pim_ifp = ifp->info;
+
+ ch = XCALLOC(MTYPE_PIM_IFCHANNEL, sizeof(*ch));
+
+ ch->flags = 0;
+ if ((source_flags & PIM_ENCODE_RPT_BIT)
+ && !(source_flags & PIM_ENCODE_WC_BIT))
+ PIM_IF_FLAG_SET_S_G_RPT(ch->flags);
+
+ ch->interface = ifp;
+ ch->sg = *sg;
+ snprintfrr(ch->sg_str, sizeof(ch->sg_str), "%pSG", sg);
+ ch->parent = pim_ifchannel_find_parent(ch);
+ if (pim_addr_is_any(ch->sg.src)) {
+ ch->sources = list_new();
+ ch->sources->cmp =
+ (int (*)(void *, void *))pim_ifchannel_compare;
+ } else
+ ch->sources = NULL;
+
+ pim_ifchannel_find_new_children(ch);
+ ch->local_ifmembership = PIM_IFMEMBERSHIP_NOINFO;
+
+ ch->ifjoin_state = PIM_IFJOIN_NOINFO;
+ ch->t_ifjoin_expiry_timer = NULL;
+ ch->t_ifjoin_prune_pending_timer = NULL;
+ ch->ifjoin_creation = 0;
+
+ RB_INSERT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb, ch);
+
+ up = pim_upstream_add(pim_ifp->pim, sg, NULL, up_flags, __func__, ch);
+
+ ch->upstream = up;
+
+ listnode_add_sort(up->ifchannels, ch);
+
+ ch->ifassert_my_metric = pim_macro_ch_my_assert_metric_eval(ch);
+ ch->ifassert_winner_metric = pim_macro_ch_my_assert_metric_eval(ch);
+
+ ch->ifassert_winner = PIMADDR_ANY;
+
+ /* Assert state */
+ ch->t_ifassert_timer = NULL;
+ ch->ifassert_state = PIM_IFASSERT_NOINFO;
+ reset_ifassert_state(ch);
+ if (pim_macro_ch_could_assert_eval(ch))
+ PIM_IF_FLAG_SET_COULD_ASSERT(ch->flags);
+ else
+ PIM_IF_FLAG_UNSET_COULD_ASSERT(ch->flags);
+
+ if (pim_macro_assert_tracking_desired_eval(ch))
+ PIM_IF_FLAG_SET_ASSERT_TRACKING_DESIRED(ch->flags);
+ else
+ PIM_IF_FLAG_UNSET_ASSERT_TRACKING_DESIRED(ch->flags);
+
+ /*
+ * advertise MLAG Data to MLAG peer
+ */
+ if (PIM_I_am_DualActive(pim_ifp)) {
+ up->dualactive_ifchannel_count++;
+ /* Sync once for upstream */
+ if (up->dualactive_ifchannel_count == 1) {
+ PIM_UPSTREAM_FLAG_SET_MLAG_INTERFACE(up->flags);
+ pim_mlag_up_local_add(pim_ifp->pim, up);
+ }
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: New Dual active if-chnanel is added to upstream:%s count:%d, flags:0x%x",
+ __func__, up->sg_str,
+ up->dualactive_ifchannel_count, up->flags);
+ }
+
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+ PIM_IF_FLAG_SET_PROTO_PIM(ch->flags);
+
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+ PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: ifchannel %s(%s) is created ", __func__,
+ ch->sg_str, ch->interface->name);
+
+ return ch;
+}
+
+static void ifjoin_to_noinfo(struct pim_ifchannel *ch)
+{
+ pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_NOINFO);
+ pim_forward_stop(ch);
+
+ PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(ch->upstream->flags);
+
+ PIM_IF_FLAG_UNSET_PROTO_PIM(ch->flags);
+
+ delete_on_noinfo(ch);
+}
+
+static void on_ifjoin_expiry_timer(struct event *t)
+{
+ struct pim_ifchannel *ch;
+
+ ch = EVENT_ARG(t);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: ifchannel %s expiry timer", __func__,
+ ch->sg_str);
+
+ ifjoin_to_noinfo(ch);
+ /* ch may have been deleted */
+}
+
+static void on_ifjoin_prune_pending_timer(struct event *t)
+{
+ struct pim_ifchannel *ch;
+ int send_prune_echo; /* boolean */
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ ch = EVENT_ARG(t);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: IFCHANNEL%pSG %s Prune Pending Timer Popped",
+ __func__, &ch->sg,
+ pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags));
+
+ if (ch->ifjoin_state == PIM_IFJOIN_PRUNE_PENDING) {
+ ifp = ch->interface;
+ pim_ifp = ifp->info;
+ if (!PIM_IF_FLAG_TEST_S_G_RPT(ch->flags)) {
+ /* Send PruneEcho(S,G) ? */
+ send_prune_echo =
+ (listcount(pim_ifp->pim_neighbor_list) > 1);
+
+ if (send_prune_echo) {
+ struct pim_rpf rpf;
+
+ rpf.source_nexthop.interface = ifp;
+ rpf.rpf_addr = pim_ifp->primary_address;
+ pim_jp_agg_single_upstream_send(
+ &rpf, ch->upstream, 0);
+ }
+
+ ifjoin_to_noinfo(ch);
+ } else {
+ /* If SGRpt flag is set on ifchannel, Trigger SGRpt
+ * message on RP path upon prune timer expiry.
+ */
+ ch->ifjoin_state = PIM_IFJOIN_PRUNE;
+ struct pim_upstream *parent =
+ ch->upstream->parent;
+
+ pim_upstream_update_join_desired(pim_ifp->pim,
+ ch->upstream);
+
+ pim_jp_agg_single_upstream_send(&parent->rpf,
+ parent, true);
+ /*
+ * SGRpt prune pending expiry has to install
+ * SG entry with empty olist to drop the SG
+ * traffic incase no other intf exists.
+ * On that scenario, SG entry wouldn't have
+ * got installed until Prune pending timer
+ * expired. So install now.
+ */
+ pim_channel_del_oif(
+ ch->upstream->channel_oil, ifp,
+ PIM_OIF_FLAG_PROTO_STAR, __func__);
+ pim_channel_del_oif(ch->upstream->channel_oil, ifp,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ if (!ch->upstream->channel_oil->installed)
+ pim_upstream_mroute_add(
+ ch->upstream->channel_oil,
+ __func__);
+ }
+ /* from here ch may have been deleted */
+ }
+}
+
+static void check_recv_upstream(int is_join, struct interface *recv_ifp,
+ pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags, int holdtime)
+{
+ struct pim_upstream *up;
+ struct pim_interface *pim_ifp = recv_ifp->info;
+ pim_addr rpf_addr;
+
+ /* Upstream (S,G) in Joined state ? */
+ up = pim_upstream_find(pim_ifp->pim, sg);
+ if (!up)
+ return;
+ if (up->join_state != PIM_UPSTREAM_JOINED)
+ return;
+
+ /* Upstream (S,G) in Joined state */
+
+ if (pim_rpf_addr_is_inaddr_any(&up->rpf)) {
+ /* RPF'(S,G) not found */
+ zlog_warn("%s %s: RPF'%s not found", __FILE__, __func__,
+ up->sg_str);
+ return;
+ }
+
+ rpf_addr = up->rpf.rpf_addr;
+
+ /* upstream directed to RPF'(S,G) ? */
+ if (pim_addr_cmp(upstream, rpf_addr)) {
+ zlog_warn(
+ "%s %s: (S,G)=%s upstream=%pPAs not directed to RPF'(S,G)=%pPAs on interface %s",
+ __FILE__, __func__, up->sg_str, &upstream, &rpf_addr,
+ recv_ifp->name);
+ return;
+ }
+ /* upstream directed to RPF'(S,G) */
+
+ if (is_join) {
+ /* Join(S,G) to RPF'(S,G) */
+ pim_upstream_join_suppress(up, up->rpf.rpf_addr, holdtime);
+ return;
+ }
+
+ /* Prune to RPF'(S,G) */
+
+ if (source_flags & PIM_RPT_BIT_MASK) {
+ if (source_flags & PIM_WILDCARD_BIT_MASK) {
+ /* Prune(*,G) to RPF'(S,G) */
+ pim_upstream_join_timer_decrease_to_t_override(
+ "Prune(*,G)", up);
+ return;
+ }
+
+ /* Prune(S,G,rpt) to RPF'(S,G) */
+ pim_upstream_join_timer_decrease_to_t_override("Prune(S,G,rpt)",
+ up);
+ return;
+ }
+
+ /* Prune(S,G) to RPF'(S,G) */
+ pim_upstream_join_timer_decrease_to_t_override("Prune(S,G)", up);
+}
+
+static int nonlocal_upstream(int is_join, struct interface *recv_ifp,
+ pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags, uint16_t holdtime)
+{
+ struct pim_interface *recv_pim_ifp;
+ int is_local; /* boolean */
+
+ recv_pim_ifp = recv_ifp->info;
+ assert(recv_pim_ifp);
+
+ is_local = !pim_addr_cmp(upstream, recv_pim_ifp->primary_address);
+
+ if (is_local)
+ return 0;
+
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_warn(
+ "%s: recv %s (S,G)=%pSG to non-local upstream=%pPAs on %s",
+ __func__, is_join ? "join" : "prune", sg, &upstream,
+ recv_ifp->name);
+
+ /*
+ * Since recv upstream addr was not directed to our primary
+ * address, check if we should react to it in any way.
+ */
+ check_recv_upstream(is_join, recv_ifp, upstream, sg, source_flags,
+ holdtime);
+
+ return 1; /* non-local */
+}
+
+static void pim_ifchannel_ifjoin_handler(struct pim_ifchannel *ch,
+ struct pim_interface *pim_ifp)
+{
+ pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_JOIN);
+ PIM_IF_FLAG_UNSET_S_G_RPT(ch->flags);
+ /* check if the interface qualifies as an immediate
+ * OIF
+ */
+ if (pim_upstream_evaluate_join_desired_interface(
+ ch->upstream, ch,
+ NULL /*starch*/)) {
+ pim_channel_add_oif(ch->upstream->channel_oil,
+ ch->interface,
+ PIM_OIF_FLAG_PROTO_PIM,
+ __func__);
+ pim_upstream_update_join_desired(pim_ifp->pim,
+ ch->upstream);
+ }
+}
+
+
+void pim_ifchannel_join_add(struct interface *ifp, pim_addr neigh_addr,
+ pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags, uint16_t holdtime)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ if (nonlocal_upstream(1 /* join */, ifp, upstream, sg, source_flags,
+ holdtime)) {
+ return;
+ }
+
+ ch = pim_ifchannel_add(ifp, sg, source_flags,
+ PIM_UPSTREAM_FLAG_MASK_SRC_PIM);
+
+ /*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ Transitions from "I am Assert Loser" State
+
+ Receive Join(S,G) on Interface I
+
+ We receive a Join(S,G) that has the Upstream Neighbor Address
+ field set to my primary IP address on interface I. The action is
+ to transition to NoInfo state, delete this (S,G) assert state
+ (Actions A5 below), and allow the normal PIM Join/Prune mechanisms
+ to operate.
+
+ Notice: The nonlocal_upstream() test above ensures the upstream
+ address of the join message is our primary address.
+ */
+ if (ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
+ zlog_warn("%s: Assert Loser recv Join%s from %pPA on %s",
+ __func__, ch->sg_str, &neigh_addr, ifp->name);
+
+ assert_action_a5(ch);
+ }
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ switch (ch->ifjoin_state) {
+ case PIM_IFJOIN_NOINFO:
+ pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_JOIN);
+ if (pim_macro_chisin_oiflist(ch)) {
+ pim_upstream_inherited_olist(pim_ifp->pim,
+ ch->upstream);
+ pim_forward_start(ch);
+ }
+ /*
+ * If we are going to be a LHR, we need to note it
+ */
+ if (ch->upstream->parent &&
+ (PIM_UPSTREAM_FLAG_TEST_CAN_BE_LHR(
+ ch->upstream->parent->flags))
+ && !(ch->upstream->flags
+ & PIM_UPSTREAM_FLAG_MASK_SRC_LHR)) {
+ pim_upstream_ref(ch->upstream,
+ PIM_UPSTREAM_FLAG_MASK_SRC_LHR,
+ __func__);
+ pim_upstream_keep_alive_timer_start(
+ ch->upstream, pim_ifp->pim->keep_alive_time);
+ }
+ break;
+ case PIM_IFJOIN_JOIN:
+ assert(!ch->t_ifjoin_prune_pending_timer);
+
+ /*
+ In the JOIN state ch->t_ifjoin_expiry_timer may be NULL due to
+ a
+ previously received join message with holdtime=0xFFFF.
+ */
+ if (ch->t_ifjoin_expiry_timer) {
+ unsigned long remain = event_timer_remain_second(
+ ch->t_ifjoin_expiry_timer);
+ if (remain > holdtime) {
+ /*
+ RFC 4601: 4.5.3. Receiving (S,G) Join/Prune
+ Messages
+
+ Transitions from Join State
+
+ The (S,G) downstream state machine on
+ interface I remains in
+ Join state, and the Expiry Timer (ET) is
+ restarted, set to
+ maximum of its current value and the HoldTime
+ from the
+ triggering Join/Prune message.
+
+ Conclusion: Do not change the ET if the
+ current value is
+ higher than the received join holdtime.
+ */
+ return;
+ }
+ }
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ break;
+ case PIM_IFJOIN_PRUNE:
+ if (source_flags & PIM_ENCODE_RPT_BIT) {
+ pim_ifchannel_ifjoin_switch(__func__, ch,
+ PIM_IFJOIN_NOINFO);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ delete_on_noinfo(ch);
+ return;
+ } else
+ pim_ifchannel_ifjoin_handler(ch, pim_ifp);
+ break;
+ case PIM_IFJOIN_PRUNE_PENDING:
+ /*
+ * Transitions from Prune-Pending State (Receive Join)
+ * RFC 7761 Sec 4.5.2:
+ * The (S,G) downstream state machine on interface I
+ * transitions to the Join state. The Prune-Pending Timer is
+ * canceled (without triggering an expiry event). The
+ * Expiry Timer (ET) is restarted and is then set to the
+ * maximum of its current value and the HoldTime from the
+ * triggering Join/Prune message.
+ */
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+
+ /* Check if SGRpt join Received */
+ if ((source_flags & PIM_ENCODE_RPT_BIT) &&
+ !pim_addr_is_any(sg->src)) {
+ /*
+ * Transitions from Prune-Pending State (Rcv SGRpt Join)
+ * RFC 7761 Sec 4.5.3:
+ * The (S,G,rpt) downstream state machine on interface
+ * I transitions to the NoInfo state.The ET and PPT are
+ * cancelled.
+ */
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ pim_ifchannel_ifjoin_switch(__func__, ch,
+ PIM_IFJOIN_NOINFO);
+ return;
+ }
+
+ pim_ifchannel_ifjoin_handler(ch, pim_ifp);
+
+ if (ch->t_ifjoin_expiry_timer) {
+ unsigned long remain = event_timer_remain_second(
+ ch->t_ifjoin_expiry_timer);
+
+ if (remain > holdtime)
+ return;
+ }
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+
+ break;
+ case PIM_IFJOIN_PRUNE_TMP:
+ break;
+ case PIM_IFJOIN_PRUNE_PENDING_TMP:
+ break;
+ }
+
+ if (holdtime != 0xFFFF) {
+ event_add_timer(router->master, on_ifjoin_expiry_timer, ch,
+ holdtime, &ch->t_ifjoin_expiry_timer);
+ }
+}
+
+void pim_ifchannel_prune(struct interface *ifp, pim_addr upstream,
+ pim_sgaddr *sg, uint8_t source_flags,
+ uint16_t holdtime)
+{
+ struct pim_ifchannel *ch;
+ struct pim_interface *pim_ifp;
+ int jp_override_interval_msec;
+
+ if (nonlocal_upstream(0 /* prune */, ifp, upstream, sg, source_flags,
+ holdtime)) {
+ return;
+ }
+
+ ch = pim_ifchannel_find(ifp, sg);
+ if (!ch && !(source_flags & PIM_ENCODE_RPT_BIT)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: Received prune with no relevant ifchannel %s%pSG state: %d",
+ __func__, ifp->name, sg,
+ source_flags);
+ return;
+ }
+
+ ch = pim_ifchannel_add(ifp, sg, source_flags,
+ PIM_UPSTREAM_FLAG_MASK_SRC_PIM);
+
+ pim_ifp = ifp->info;
+
+ switch (ch->ifjoin_state) {
+ case PIM_IFJOIN_NOINFO:
+ if (source_flags & PIM_ENCODE_RPT_BIT) {
+ if (!(source_flags & PIM_ENCODE_WC_BIT))
+ PIM_IF_FLAG_SET_S_G_RPT(ch->flags);
+
+ ch->ifjoin_state = PIM_IFJOIN_PRUNE_PENDING;
+ if (listcount(pim_ifp->pim_neighbor_list) > 1)
+ jp_override_interval_msec =
+ pim_if_jp_override_interval_msec(ifp);
+ else
+ jp_override_interval_msec =
+ 0; /* schedule to expire immediately */
+ /* If we called ifjoin_prune() directly instead, care
+ should
+ be taken not to use "ch" afterwards since it would be
+ deleted. */
+
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
+ pim_upstream_update_join_desired(pim_ifp->pim,
+ ch->upstream);
+ }
+ break;
+ case PIM_IFJOIN_PRUNE_PENDING:
+ /* nothing to do */
+ break;
+ case PIM_IFJOIN_JOIN:
+ /*
+ * The (S,G) downstream state machine on interface I
+ * transitions to the Prune-Pending state. The
+ * Prune-Pending Timer is started. It is set to the
+ * J/P_Override_Interval(I) if the router has more than one
+ * neighbor on that interface; otherwise, it is set to zero,
+ * causing it to expire immediately.
+ */
+
+ pim_ifchannel_ifjoin_switch(__func__, ch,
+ PIM_IFJOIN_PRUNE_PENDING);
+
+ if (listcount(pim_ifp->pim_neighbor_list) > 1)
+ jp_override_interval_msec =
+ pim_if_jp_override_interval_msec(ifp);
+ else
+ jp_override_interval_msec =
+ 0; /* schedule to expire immediately */
+ /* If we called ifjoin_prune() directly instead, care should
+ be taken not to use "ch" afterwards since it would be
+ deleted. */
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
+ break;
+ case PIM_IFJOIN_PRUNE:
+ if (source_flags & PIM_ENCODE_RPT_BIT) {
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ /*
+ * While in Prune State, Receive SGRpt Prune.
+ * RFC 7761 Sec 4.5.3:
+ * The (S,G,rpt) downstream state machine on interface I
+ * remains in Prune state. The Expiry Timer (ET) is
+ * restarted and is then set to the maximum of its
+ * current value and the HoldTime from the triggering
+ * Join/Prune message.
+ */
+ if (ch->t_ifjoin_expiry_timer) {
+ unsigned long rem = event_timer_remain_second(
+ ch->t_ifjoin_expiry_timer);
+
+ if (rem > holdtime)
+ return;
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ }
+
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
+ }
+ break;
+ case PIM_IFJOIN_PRUNE_TMP:
+ if (source_flags & PIM_ENCODE_RPT_BIT) {
+ ch->ifjoin_state = PIM_IFJOIN_PRUNE;
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
+ }
+ break;
+ case PIM_IFJOIN_PRUNE_PENDING_TMP:
+ if (source_flags & PIM_ENCODE_RPT_BIT) {
+ ch->ifjoin_state = PIM_IFJOIN_PRUNE_PENDING;
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
+ }
+ break;
+ }
+}
+
+int pim_ifchannel_local_membership_add(struct interface *ifp, pim_sgaddr *sg,
+ bool is_vxlan)
+{
+ struct pim_ifchannel *ch, *starch;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+ int up_flags;
+
+ /* PIM enabled on interface? */
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_EVENTS)
+ zlog_debug("%s:%pSG Expected pim interface setup for %s",
+ __func__, sg, ifp->name);
+ return 0;
+ }
+
+ if (!pim_ifp->pim_enable) {
+ if (PIM_DEBUG_EVENTS)
+ zlog_debug("%s:%pSG PIM is not configured on this interface %s",
+ __func__, sg, ifp->name);
+ return 0;
+ }
+
+ pim = pim_ifp->pim;
+
+ /* skip (*,G) ch creation if G is of type SSM */
+ if (pim_addr_is_any(sg->src)) {
+ if (pim_is_grp_ssm(pim, sg->grp)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s: local membership (S,G)=%pSG ignored as group is SSM",
+ __func__, sg);
+ return 1;
+ }
+ }
+
+ /* vxlan term mroutes use ipmr-lo as local member to
+ * pull down multicast vxlan tunnel traffic
+ */
+ up_flags = is_vxlan ? PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM :
+ PIM_UPSTREAM_FLAG_MASK_SRC_IGMP;
+ ch = pim_ifchannel_add(ifp, sg, 0, up_flags);
+
+ ifmembership_set(ch, PIM_IFMEMBERSHIP_INCLUDE);
+
+ if (pim_addr_is_any(sg->src)) {
+ struct pim_upstream *up = pim_upstream_find(pim, sg);
+ struct pim_upstream *child;
+ struct listnode *up_node;
+
+ starch = ch;
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node, child)) {
+ if (PIM_DEBUG_EVENTS)
+ zlog_debug("%s %s: IGMP (S,G)=%s(%s) from %s",
+ __FILE__, __func__, child->sg_str,
+ ifp->name, up->sg_str);
+
+ if (!child->rpf.source_nexthop.interface) {
+ /* when iif unknown, do not inherit */
+ if (PIM_DEBUG_EVENTS)
+ zlog_debug(
+ "Skipped (S,G)=%s(%s) from %s: no iif",
+ child->sg_str, ifp->name,
+ up->sg_str);
+ continue;
+ }
+
+ ch = pim_ifchannel_find(ifp, &child->sg);
+ if (pim_upstream_evaluate_join_desired_interface(
+ child, ch, starch)) {
+ pim_channel_add_oif(child->channel_oil, ifp,
+ PIM_OIF_FLAG_PROTO_STAR,
+ __func__);
+ pim_upstream_update_join_desired(pim, child);
+ }
+ }
+
+ if (pim->spt.switchover == PIM_SPT_INFINITY) {
+ if (pim->spt.plist) {
+ struct prefix_list *plist = prefix_list_lookup(
+ AFI_IP, pim->spt.plist);
+ struct prefix g;
+
+ pim_addr_to_prefix(&g, up->sg.grp);
+ if (prefix_list_apply_ext(plist, NULL, &g,
+ true) ==
+ PREFIX_DENY) {
+ pim_channel_add_oif(
+ up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM,
+ __func__);
+ }
+ }
+ } else
+ pim_channel_add_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM, __func__);
+ }
+
+ return 1;
+}
+
+void pim_ifchannel_local_membership_del(struct interface *ifp, pim_sgaddr *sg)
+{
+ struct pim_ifchannel *starch, *ch, *orig;
+ struct pim_interface *pim_ifp;
+
+ /* PIM enabled on interface? */
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return;
+ if (!pim_ifp->pim_enable)
+ return;
+
+ orig = ch = pim_ifchannel_find(ifp, sg);
+ if (!ch)
+ return;
+ ifmembership_set(ch, PIM_IFMEMBERSHIP_NOINFO);
+
+ if (pim_addr_is_any(sg->src)) {
+ struct pim_upstream *up = pim_upstream_find(pim_ifp->pim, sg);
+ struct pim_upstream *child;
+ struct listnode *up_node, *up_nnode;
+
+ starch = ch;
+
+ for (ALL_LIST_ELEMENTS(up->sources, up_node, up_nnode, child)) {
+ struct channel_oil *c_oil = child->channel_oil;
+ struct pim_ifchannel *chchannel =
+ pim_ifchannel_find(ifp, &child->sg);
+
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_EVENTS)
+ zlog_debug("%s %s: Prune(S,G)=%s(%s) from %s",
+ __FILE__, __func__, up->sg_str,
+ ifp->name, child->sg_str);
+
+ ch = pim_ifchannel_find(ifp, &child->sg);
+ /*
+ * If the S,G has no if channel and the c_oil still
+ * has output here then the *,G was supplying the
+ * implied
+ * if channel. So remove it.
+ */
+ if (!pim_upstream_evaluate_join_desired_interface(
+ child, ch, starch) ||
+ (!chchannel &&
+ oil_if_has(c_oil, pim_ifp->mroute_vif_index))) {
+ pim_channel_del_inherited_oif(c_oil, ifp,
+ __func__);
+ }
+
+ /* Child node removal/ref count-- will happen as part of
+ * parent' delete_no_info */
+ }
+ }
+
+ /* Resettng the IGMP flags here */
+ if (orig->upstream)
+ PIM_UPSTREAM_FLAG_UNSET_SRC_IGMP(orig->upstream->flags);
+
+ PIM_IF_FLAG_UNSET_PROTO_IGMP(orig->flags);
+
+ delete_on_noinfo(orig);
+}
+
+void pim_ifchannel_update_could_assert(struct pim_ifchannel *ch)
+{
+ int old_couldassert =
+ PIM_FORCE_BOOLEAN(PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags));
+ int new_couldassert =
+ PIM_FORCE_BOOLEAN(pim_macro_ch_could_assert_eval(ch));
+
+ if (new_couldassert == old_couldassert)
+ return;
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s: CouldAssert(%pPAs,%pPAs,%s) changed from %d to %d",
+ __func__, &ch->sg.src, &ch->sg.grp,
+ ch->interface->name, old_couldassert,
+ new_couldassert);
+
+ if (new_couldassert) {
+ /* CouldAssert(S,G,I) switched from false to true */
+ PIM_IF_FLAG_SET_COULD_ASSERT(ch->flags);
+ } else {
+ /* CouldAssert(S,G,I) switched from true to false */
+ PIM_IF_FLAG_UNSET_COULD_ASSERT(ch->flags);
+
+ if (ch->ifassert_state == PIM_IFASSERT_I_AM_WINNER) {
+ assert_action_a4(ch);
+ }
+ }
+
+ pim_ifchannel_update_my_assert_metric(ch);
+}
+
+/*
+ my_assert_metric may be affected by:
+
+ CouldAssert(S,G)
+ pim_ifp->primary_address
+ rpf->source_nexthop.mrib_metric_preference;
+ rpf->source_nexthop.mrib_route_metric;
+ */
+void pim_ifchannel_update_my_assert_metric(struct pim_ifchannel *ch)
+{
+ struct pim_assert_metric my_metric_new =
+ pim_macro_ch_my_assert_metric_eval(ch);
+
+ if (pim_assert_metric_match(&my_metric_new, &ch->ifassert_my_metric))
+ return;
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "%s: my_assert_metric(%pPAs,%pPAs,%s) changed from %u,%u,%u,%pPAs to %u,%u,%u,%pPAs",
+ __func__, &ch->sg.src, &ch->sg.grp, ch->interface->name,
+ ch->ifassert_my_metric.rpt_bit_flag,
+ ch->ifassert_my_metric.metric_preference,
+ ch->ifassert_my_metric.route_metric,
+ &ch->ifassert_my_metric.ip_address,
+ my_metric_new.rpt_bit_flag,
+ my_metric_new.metric_preference,
+ my_metric_new.route_metric, &my_metric_new.ip_address);
+
+ ch->ifassert_my_metric = my_metric_new;
+
+ if (pim_assert_metric_better(&ch->ifassert_my_metric,
+ &ch->ifassert_winner_metric)) {
+ assert_action_a5(ch);
+ }
+}
+
+void pim_ifchannel_update_assert_tracking_desired(struct pim_ifchannel *ch)
+{
+ int old_atd = PIM_FORCE_BOOLEAN(
+ PIM_IF_FLAG_TEST_ASSERT_TRACKING_DESIRED(ch->flags));
+ int new_atd =
+ PIM_FORCE_BOOLEAN(pim_macro_assert_tracking_desired_eval(ch));
+
+ if (new_atd == old_atd)
+ return;
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "%s: AssertTrackingDesired(%pPAs,%pPAs,%s) changed from %d to %d",
+ __func__, &ch->sg.src, &ch->sg.grp, ch->interface->name,
+ old_atd, new_atd);
+
+ if (new_atd) {
+ /* AssertTrackingDesired(S,G,I) switched from false to true */
+ PIM_IF_FLAG_SET_ASSERT_TRACKING_DESIRED(ch->flags);
+ } else {
+ /* AssertTrackingDesired(S,G,I) switched from true to false */
+ PIM_IF_FLAG_UNSET_ASSERT_TRACKING_DESIRED(ch->flags);
+
+ if (ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
+ assert_action_a5(ch);
+ }
+ }
+}
+
+/*
+ * If we have a new pim interface, check to
+ * see if any of the pre-existing channels have
+ * their upstream out that way and turn on forwarding
+ * for that ifchannel then.
+ */
+void pim_ifchannel_scan_forward_start(struct interface *new_ifp)
+{
+ struct pim_interface *new_pim_ifp = new_ifp->info;
+ struct pim_instance *pim = new_pim_ifp->pim;
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *loop_pim_ifp = ifp->info;
+ struct pim_ifchannel *ch;
+
+ if (!loop_pim_ifp)
+ continue;
+
+ if (new_pim_ifp == loop_pim_ifp)
+ continue;
+
+ RB_FOREACH (ch, pim_ifchannel_rb, &loop_pim_ifp->ifchannel_rb) {
+ if (ch->ifjoin_state == PIM_IFJOIN_JOIN) {
+ struct pim_upstream *up = ch->upstream;
+ if ((!up->channel_oil)
+ && (up->rpf.source_nexthop
+ .interface == new_ifp))
+ pim_forward_start(ch);
+ }
+ }
+ }
+}
+
+/*
+ * Downstream per-interface (S,G,rpt) state machine
+ * states that we need to move (S,G,rpt) items
+ * into different states at the start of the
+ * reception of a *,G join as well, when
+ * we get End of Message
+ */
+void pim_ifchannel_set_star_g_join_state(struct pim_ifchannel *ch, int eom,
+ uint8_t join)
+{
+ bool send_upstream_starg = false;
+ struct pim_ifchannel *child;
+ struct listnode *ch_node, *nch_node;
+ struct pim_instance *pim =
+ ((struct pim_interface *)ch->interface->info)->pim;
+ struct pim_upstream *starup = ch->upstream;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: %s %s eom: %d join %u", __func__,
+ pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags),
+ ch->sg_str, eom, join);
+ if (!ch->sources)
+ return;
+
+ for (ALL_LIST_ELEMENTS(ch->sources, ch_node, nch_node, child)) {
+ if (!PIM_IF_FLAG_TEST_S_G_RPT(child->flags))
+ continue;
+
+ switch (child->ifjoin_state) {
+ case PIM_IFJOIN_NOINFO:
+ case PIM_IFJOIN_JOIN:
+ break;
+ case PIM_IFJOIN_PRUNE:
+ if (!eom)
+ child->ifjoin_state = PIM_IFJOIN_PRUNE_TMP;
+ break;
+ case PIM_IFJOIN_PRUNE_PENDING:
+ if (!eom)
+ child->ifjoin_state =
+ PIM_IFJOIN_PRUNE_PENDING_TMP;
+ break;
+ case PIM_IFJOIN_PRUNE_TMP:
+ case PIM_IFJOIN_PRUNE_PENDING_TMP:
+ if (!eom)
+ break;
+
+ if (child->ifjoin_state == PIM_IFJOIN_PRUNE_PENDING_TMP)
+ EVENT_OFF(child->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(child->t_ifjoin_expiry_timer);
+
+ PIM_IF_FLAG_UNSET_S_G_RPT(child->flags);
+ child->ifjoin_state = PIM_IFJOIN_NOINFO;
+
+ if ((I_am_RP(pim, child->sg.grp)) &&
+ (!pim_upstream_empty_inherited_olist(
+ child->upstream))) {
+ pim_channel_add_oif(
+ child->upstream->channel_oil,
+ ch->interface, PIM_OIF_FLAG_PROTO_STAR,
+ __func__);
+ pim_upstream_update_join_desired(pim,
+ child->upstream);
+ }
+ send_upstream_starg = true;
+
+ delete_on_noinfo(child);
+ break;
+ }
+ }
+
+ if (send_upstream_starg)
+ pim_jp_agg_single_upstream_send(&starup->rpf, starup, true);
+}
diff --git a/pimd/pim_ifchannel.h b/pimd/pim_ifchannel.h
new file mode 100644
index 0000000..4b0ff95
--- /dev/null
+++ b/pimd/pim_ifchannel.h
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_IFCHANNEL_H
+#define PIM_IFCHANNEL_H
+
+#include <zebra.h>
+
+#include "if.h"
+#include "prefix.h"
+
+#include "pim_assert.h"
+
+struct pim_ifchannel;
+#include "pim_upstream.h"
+
+enum pim_ifmembership { PIM_IFMEMBERSHIP_NOINFO, PIM_IFMEMBERSHIP_INCLUDE };
+
+enum pim_ifjoin_state {
+ PIM_IFJOIN_NOINFO,
+ PIM_IFJOIN_JOIN,
+ PIM_IFJOIN_PRUNE,
+ PIM_IFJOIN_PRUNE_PENDING,
+ PIM_IFJOIN_PRUNE_TMP,
+ PIM_IFJOIN_PRUNE_PENDING_TMP,
+};
+
+/*
+ Flag to detect change in CouldAssert(S,G,I)
+*/
+#define PIM_IF_FLAG_MASK_COULD_ASSERT (1 << 0)
+#define PIM_IF_FLAG_TEST_COULD_ASSERT(flags) ((flags) & PIM_IF_FLAG_MASK_COULD_ASSERT)
+#define PIM_IF_FLAG_SET_COULD_ASSERT(flags) ((flags) |= PIM_IF_FLAG_MASK_COULD_ASSERT)
+#define PIM_IF_FLAG_UNSET_COULD_ASSERT(flags) ((flags) &= ~PIM_IF_FLAG_MASK_COULD_ASSERT)
+/*
+ Flag to detect change in AssertTrackingDesired(S,G,I)
+*/
+#define PIM_IF_FLAG_MASK_ASSERT_TRACKING_DESIRED (1 << 1)
+#define PIM_IF_FLAG_TEST_ASSERT_TRACKING_DESIRED(flags) ((flags) & PIM_IF_FLAG_MASK_ASSERT_TRACKING_DESIRED)
+#define PIM_IF_FLAG_SET_ASSERT_TRACKING_DESIRED(flags) ((flags) |= PIM_IF_FLAG_MASK_ASSERT_TRACKING_DESIRED)
+#define PIM_IF_FLAG_UNSET_ASSERT_TRACKING_DESIRED(flags) ((flags) &= ~PIM_IF_FLAG_MASK_ASSERT_TRACKING_DESIRED)
+
+/*
+ * Flag to tell us if the ifchannel is (S,G,rpt)
+ */
+#define PIM_IF_FLAG_MASK_S_G_RPT (1 << 2)
+#define PIM_IF_FLAG_TEST_S_G_RPT(flags) ((flags) & PIM_IF_FLAG_MASK_S_G_RPT)
+#define PIM_IF_FLAG_SET_S_G_RPT(flags) ((flags) |= PIM_IF_FLAG_MASK_S_G_RPT)
+#define PIM_IF_FLAG_UNSET_S_G_RPT(flags) ((flags) &= ~PIM_IF_FLAG_MASK_S_G_RPT)
+
+/*
+ * Flag to tell us if the ifchannel is proto PIM
+ */
+#define PIM_IF_FLAG_MASK_PROTO_PIM (1 << 3)
+#define PIM_IF_FLAG_TEST_PROTO_PIM(flags) ((flags)&PIM_IF_FLAG_MASK_PROTO_PIM)
+#define PIM_IF_FLAG_SET_PROTO_PIM(flags) ((flags) |= PIM_IF_FLAG_MASK_PROTO_PIM)
+#define PIM_IF_FLAG_UNSET_PROTO_PIM(flags) \
+ ((flags) &= ~PIM_IF_FLAG_MASK_PROTO_PIM)
+/*
+ * Flag to tell us if the ifchannel is proto IGMP
+ */
+#define PIM_IF_FLAG_MASK_PROTO_IGMP (1 << 4)
+#define PIM_IF_FLAG_TEST_PROTO_IGMP(flags) ((flags)&PIM_IF_FLAG_MASK_PROTO_IGMP)
+#define PIM_IF_FLAG_SET_PROTO_IGMP(flags) \
+ ((flags) |= PIM_IF_FLAG_MASK_PROTO_IGMP)
+#define PIM_IF_FLAG_UNSET_PROTO_IGMP(flags) \
+ ((flags) &= ~PIM_IF_FLAG_MASK_PROTO_IGMP)
+/*
+ Per-interface (S,G) state
+*/
+struct pim_ifchannel {
+ RB_ENTRY(rb_ifchannel) pim_ifp_rb;
+
+ struct pim_ifchannel *parent;
+ struct list *sources;
+ pim_sgaddr sg;
+ char sg_str[PIM_SG_LEN];
+ struct interface *interface; /* backpointer to interface */
+ uint32_t flags;
+
+ /* IGMPv3 determined interface has local members for (S,G) ? */
+ enum pim_ifmembership local_ifmembership;
+
+ /* Per-interface (S,G) Join/Prune State (Section 4.1.4 of RFC4601) */
+ enum pim_ifjoin_state ifjoin_state;
+ struct event *t_ifjoin_expiry_timer;
+ struct event *t_ifjoin_prune_pending_timer;
+ int64_t ifjoin_creation; /* Record uptime of ifjoin state */
+
+ /* Per-interface (S,G) Assert State (Section 4.6.1 of RFC4601) */
+ enum pim_ifassert_state ifassert_state;
+ struct event *t_ifassert_timer;
+ pim_addr ifassert_winner;
+ struct pim_assert_metric ifassert_winner_metric;
+ int64_t ifassert_creation; /* Record uptime of ifassert state */
+ struct pim_assert_metric ifassert_my_metric;
+
+ /* Upstream (S,G) state */
+ struct pim_upstream *upstream;
+};
+
+RB_HEAD(pim_ifchannel_rb, pim_ifchannel);
+RB_PROTOTYPE(pim_ifchannel_rb, pim_ifchannel, pim_ifp_rb,
+ pim_ifchannel_compare);
+
+void pim_ifchannel_delete(struct pim_ifchannel *ch);
+void pim_ifchannel_delete_all(struct interface *ifp);
+void pim_ifchannel_membership_clear(struct interface *ifp);
+void pim_ifchannel_delete_on_noinfo(struct interface *ifp);
+struct pim_ifchannel *pim_ifchannel_find(struct interface *ifp, pim_sgaddr *sg);
+struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp, pim_sgaddr *sg,
+ uint8_t ch_flags, int up_flags);
+void pim_ifchannel_join_add(struct interface *ifp, pim_addr neigh_addr,
+ pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags, uint16_t holdtime);
+void pim_ifchannel_prune(struct interface *ifp, pim_addr upstream,
+ pim_sgaddr *sg, uint8_t source_flags,
+ uint16_t holdtime);
+int pim_ifchannel_local_membership_add(struct interface *ifp, pim_sgaddr *sg,
+ bool is_vxlan);
+void pim_ifchannel_local_membership_del(struct interface *ifp, pim_sgaddr *sg);
+
+void pim_ifchannel_ifjoin_switch(const char *caller, struct pim_ifchannel *ch,
+ enum pim_ifjoin_state new_state);
+const char *pim_ifchannel_ifjoin_name(enum pim_ifjoin_state ifjoin_state,
+ int flags);
+const char *pim_ifchannel_ifassert_name(enum pim_ifassert_state ifassert_state);
+
+int pim_ifchannel_isin_oiflist(struct pim_ifchannel *ch);
+
+void reset_ifassert_state(struct pim_ifchannel *ch);
+
+void pim_ifchannel_update_could_assert(struct pim_ifchannel *ch);
+void pim_ifchannel_update_my_assert_metric(struct pim_ifchannel *ch);
+void pim_ifchannel_update_assert_tracking_desired(struct pim_ifchannel *ch);
+
+void pim_ifchannel_scan_forward_start(struct interface *new_ifp);
+void pim_ifchannel_set_star_g_join_state(struct pim_ifchannel *ch, int eom,
+ uint8_t join);
+
+int pim_ifchannel_compare(const struct pim_ifchannel *ch1,
+ const struct pim_ifchannel *ch2);
+
+void delete_on_noinfo(struct pim_ifchannel *ch);
+#endif /* PIM_IFCHANNEL_H */
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
new file mode 100644
index 0000000..063ba6e
--- /dev/null
+++ b/pimd/pim_igmp.c
@@ -0,0 +1,1537 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "memory.h"
+#include "prefix.h"
+#include "if.h"
+#include "hash.h"
+#include "jhash.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_igmp.h"
+#include "pim_igmpv2.h"
+#include "pim_igmpv3.h"
+#include "pim_igmp_mtrace.h"
+#include "pim_iface.h"
+#include "pim_sock.h"
+#include "pim_mroute.h"
+#include "pim_str.h"
+#include "pim_util.h"
+#include "pim_time.h"
+#include "pim_ssm.h"
+#include "pim_tib.h"
+
+static void group_timer_off(struct gm_group *group);
+static void pim_igmp_general_query(struct event *t);
+
+void igmp_anysource_forward_start(struct pim_instance *pim,
+ struct gm_group *group)
+{
+ struct gm_source *source;
+ struct in_addr src_addr = {.s_addr = 0};
+ /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
+ assert(group->group_filtermode_isexcl);
+ assert(listcount(group->group_source_list) < 1);
+
+ source = igmp_get_source_by_addr(group, src_addr, NULL);
+ if (!source) {
+ zlog_warn("%s: Failure to create * source", __func__);
+ return;
+ }
+
+ igmp_source_forward_start(pim, source);
+}
+
+void igmp_anysource_forward_stop(struct gm_group *group)
+{
+ struct gm_source *source;
+ struct in_addr star = {.s_addr = 0};
+
+ source = igmp_find_source_by_addr(group, star);
+ if (source)
+ igmp_source_forward_stop(source);
+}
+
+static void igmp_source_forward_reevaluate_one(struct pim_instance *pim,
+ struct gm_source *source,
+ int is_grp_ssm)
+{
+ pim_sgaddr sg;
+ struct gm_group *group = source->source_group;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = group->group_addr;
+
+ /** if there is no PIM state **/
+ if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ if (pim_addr_is_any(source->source_addr)) {
+ if (is_grp_ssm) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "local membership del for %pSG as G is now SSM",
+ &sg);
+ igmp_source_forward_stop(source);
+ }
+ } else {
+ if (!is_grp_ssm) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "local membership del for %pSG as G is now ASM",
+ &sg);
+ igmp_source_forward_stop(source);
+ }
+ }
+ } else {
+ if (!pim_addr_is_any(source->source_addr) && (is_grp_ssm)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "local membership add for %pSG as G is now SSM",
+ &sg);
+ igmp_source_forward_start(pim, source);
+ }
+ }
+}
+
+void igmp_source_forward_reevaluate_all(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode, *grp_nextnode;
+ struct gm_group *grp;
+ struct pim_ifchannel *ch, *ch_temp;
+
+ if (!pim_ifp)
+ continue;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS(pim_ifp->gm_group_list, grpnode,
+ grp_nextnode, grp)) {
+ struct listnode *srcnode;
+ struct gm_source *src;
+ int is_grp_ssm;
+
+ /*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses,
+ * and an SSM-aware router will ignore
+ * MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE
+ * requests in the SSM range.
+ */
+ is_grp_ssm = pim_is_grp_ssm(pim, grp->group_addr);
+ if (is_grp_ssm && grp->group_filtermode_isexcl) {
+ igmp_group_delete(grp);
+ } else {
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(
+ grp->group_source_list, srcnode,
+ src)) {
+ igmp_source_forward_reevaluate_one(
+ pim, src, is_grp_ssm);
+ } /* scan group sources */
+ }
+ } /* scan igmp groups */
+
+ RB_FOREACH_SAFE (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb,
+ ch_temp) {
+ if (pim_is_grp_ssm(pim, ch->sg.grp)) {
+ if (pim_addr_is_any(ch->sg.src))
+ pim_ifchannel_delete(ch);
+ }
+ }
+ } /* scan interfaces */
+}
+
+void igmp_source_forward_start(struct pim_instance *pim,
+ struct gm_source *source)
+{
+ struct gm_group *group;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = source->source_group->group_addr;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
+ source->source_group->interface->name,
+ IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
+ }
+
+ /*
+ * PIM state should not be allowed for ASM group with valid source
+ * address.
+ */
+ if ((!pim_is_grp_ssm(pim, source->source_group->group_addr)) &&
+ !pim_addr_is_any(source->source_addr)) {
+ zlog_warn(
+ "%s: (S,G)=%pSG ASM range having source address, not allowed to create PIM state",
+ __func__, &sg);
+ return;
+ }
+
+ /* Prevent IGMP interface from installing multicast route multiple
+ times */
+ if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ return;
+ }
+
+ group = source->source_group;
+
+ if (tib_sg_gm_join(pim, sg, group->interface,
+ &source->source_channel_oil))
+ IGMP_SOURCE_DO_FORWARDING(source->source_flags);
+}
+
+/*
+ igmp_source_forward_stop: stop forwarding, but keep the source
+ igmp_source_delete: stop forwarding, and delete the source
+ */
+void igmp_source_forward_stop(struct gm_source *source)
+{
+ struct pim_interface *pim_oif;
+ struct gm_group *group;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = source->source_group->group_addr;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
+ source->source_group->interface->name,
+ IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
+ }
+
+ /* Prevent IGMP interface from removing multicast route multiple
+ times */
+ if (!IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ return;
+ }
+
+ group = source->source_group;
+ pim_oif = group->interface->info;
+
+ tib_sg_gm_prune(pim_oif->pim, sg, group->interface,
+ &source->source_channel_oil);
+ IGMP_SOURCE_DONT_FORWARDING(source->source_flags);
+}
+
+/* This socket is used for TXing IGMP packets only, IGMP RX happens
+ * in pim_mroute_msg()
+ */
+static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp)
+{
+ int fd;
+ int join = 0;
+ struct in_addr group;
+ struct pim_interface *pim_ifp = ifp->info;
+
+ fd = pim_socket_mcast(IPPROTO_IGMP, ifaddr, ifp, 1);
+
+ if (fd < 0)
+ return -1;
+
+ if (inet_aton(PIM_ALL_ROUTERS, &group)) {
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex, pim_ifp))
+ ++join;
+ } else {
+ zlog_warn(
+ "%s %s: IGMP socket fd=%d interface %pI4: could not solve %s to group address: errno=%d: %s",
+ __FILE__, __func__, fd, &ifaddr, PIM_ALL_ROUTERS, errno,
+ safe_strerror(errno));
+ }
+
+ /*
+ IGMP routers periodically send IGMP general queries to
+ AllSystems=224.0.0.1
+ IGMP routers must receive general queries for querier election.
+ */
+ if (inet_aton(PIM_ALL_SYSTEMS, &group)) {
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex, pim_ifp))
+ ++join;
+ } else {
+ zlog_warn(
+ "%s %s: IGMP socket fd=%d interface %pI4: could not solve %s to group address: errno=%d: %s",
+ __FILE__, __func__, fd, &ifaddr,
+ PIM_ALL_SYSTEMS, errno, safe_strerror(errno));
+ }
+
+ if (inet_aton(PIM_ALL_IGMP_ROUTERS, &group)) {
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex,
+ pim_ifp)) {
+ ++join;
+ }
+ } else {
+ zlog_warn(
+ "%s %s: IGMP socket fd=%d interface %pI4: could not solve %s to group address: errno=%d: %s",
+ __FILE__, __func__, fd, &ifaddr,
+ PIM_ALL_IGMP_ROUTERS, errno, safe_strerror(errno));
+ }
+
+ if (!join) {
+ flog_err_sys(
+ EC_LIB_SOCKET,
+ "IGMP socket fd=%d could not join any group on interface address %pI4",
+ fd, &ifaddr);
+ close(fd);
+ fd = -1;
+ }
+
+ return fd;
+}
+
+#undef IGMP_SOCK_DUMP
+
+#ifdef IGMP_SOCK_DUMP
+static void igmp_sock_dump(array_t *igmp_sock_array)
+{
+ int size = array_size(igmp_sock_array);
+ for (int i = 0; i < size; ++i) {
+
+ struct gm_sock *igmp = array_get(igmp_sock_array, i);
+
+ zlog_debug("%s %s: [%d/%d] igmp_addr=%pI4 fd=%d", __FILE__,
+ __func__, i, size, &igmp->ifaddr,
+ igmp->fd);
+ }
+}
+#endif
+
+struct gm_sock *pim_igmp_sock_lookup_ifaddr(struct list *igmp_sock_list,
+ struct in_addr ifaddr)
+{
+ struct listnode *sock_node;
+ struct gm_sock *igmp;
+
+#ifdef IGMP_SOCK_DUMP
+ igmp_sock_dump(igmp_sock_list);
+#endif
+
+ for (ALL_LIST_ELEMENTS_RO(igmp_sock_list, sock_node, igmp))
+ if (ifaddr.s_addr == igmp->ifaddr.s_addr)
+ return igmp;
+
+ return NULL;
+}
+
+static void pim_igmp_other_querier_expire(struct event *t)
+{
+ struct gm_sock *igmp;
+
+ igmp = EVENT_ARG(t);
+
+ assert(!igmp->t_igmp_query_timer);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug("%s: Querier %s resuming", __func__, ifaddr_str);
+ }
+ /* Mark the interface address as querier address */
+ igmp->querier_addr = igmp->ifaddr;
+
+ /*
+ We are the current querier, then
+ re-start sending general queries.
+ RFC 2236 - sec 7 Other Querier
+ present timer expired (Send General
+ Query, Set Gen. Query. timer)
+ */
+ pim_igmp_general_query(t);
+}
+
+void pim_igmp_other_querier_timer_on(struct gm_sock *igmp)
+{
+ long other_querier_present_interval_msec;
+ struct pim_interface *pim_ifp;
+
+ assert(igmp);
+ assert(igmp->interface);
+ assert(igmp->interface->info);
+
+ pim_ifp = igmp->interface->info;
+
+ if (igmp->t_other_querier_timer) {
+ /*
+ There is other querier present already,
+ then reset the other-querier-present timer.
+ */
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "Querier %s resetting TIMER event for Other-Querier-Present",
+ ifaddr_str);
+ }
+ EVENT_OFF(igmp->t_other_querier_timer);
+ } else {
+ /*
+ We are the current querier, then stop sending general queries:
+ igmp->t_igmp_query_timer = NULL;
+ */
+ pim_igmp_general_query_off(igmp);
+ }
+
+ /*
+ Since this socket is starting the other-querier-present timer,
+ there should not be periodic query timer for this socket.
+ */
+ assert(!igmp->t_igmp_query_timer);
+
+ /*
+ RFC 3376: 8.5. Other Querier Present Interval
+
+ The Other Querier Present Interval is the length of time that must
+ pass before a multicast router decides that there is no longer
+ another multicast router which should be the querier. This value
+ MUST be ((the Robustness Variable) times (the Query Interval)) plus
+ (one half of one Query Response Interval).
+
+ other_querier_present_interval_msec = \
+ igmp->querier_robustness_variable * \
+ 1000 * igmp->querier_query_interval + \
+ 100 * (pim_ifp->query_max_response_time_dsec >> 1);
+ */
+ other_querier_present_interval_msec = PIM_IGMP_OQPI_MSEC(
+ igmp->querier_robustness_variable, igmp->querier_query_interval,
+ pim_ifp->gm_query_max_response_time_dsec);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "Querier %s scheduling %ld.%03ld sec TIMER event for Other-Querier-Present",
+ ifaddr_str, other_querier_present_interval_msec / 1000,
+ other_querier_present_interval_msec % 1000);
+ }
+
+ event_add_timer_msec(router->master, pim_igmp_other_querier_expire,
+ igmp, other_querier_present_interval_msec,
+ &igmp->t_other_querier_timer);
+}
+
+void pim_igmp_other_querier_timer_off(struct gm_sock *igmp)
+{
+ assert(igmp);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ if (igmp->t_other_querier_timer) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "IGMP querier %s fd=%d cancelling other-querier-present TIMER event on %s",
+ ifaddr_str, igmp->fd, igmp->interface->name);
+ }
+ }
+ EVENT_OFF(igmp->t_other_querier_timer);
+}
+
+int igmp_validate_checksum(char *igmp_msg, int igmp_msg_len)
+{
+ uint16_t recv_checksum;
+ uint16_t checksum;
+
+ IGMP_GET_INT16((unsigned char *)(igmp_msg + IGMP_CHECKSUM_OFFSET),
+ recv_checksum);
+
+ /* Clear the checksum field */
+ memset(igmp_msg + IGMP_CHECKSUM_OFFSET, 0, 2);
+
+ checksum = in_cksum(igmp_msg, igmp_msg_len);
+ if (ntohs(checksum) != recv_checksum) {
+ zlog_warn("Invalid checksum received %x, calculated %x",
+ recv_checksum, ntohs(checksum));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int igmp_recv_query(struct gm_sock *igmp, int query_version,
+ int max_resp_code, struct in_addr from,
+ const char *from_str, char *igmp_msg,
+ int igmp_msg_len)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr group_addr;
+
+ if (igmp->mtrace_only)
+ return 0;
+
+ memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
+
+ ifp = igmp->interface;
+ pim_ifp = ifp->info;
+
+ if (igmp_validate_checksum(igmp_msg, igmp_msg_len) == -1) {
+ zlog_warn(
+ "Recv IGMP query v%d from %s on %s with invalid checksum",
+ query_version, from_str, ifp->name);
+ return -1;
+ }
+
+ if (!pim_if_connected_to_source(ifp, from)) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug("Recv IGMP query on interface: %s from a non-connected source: %s",
+ ifp->name, from_str);
+ return 0;
+ }
+
+ if (if_address_is_local(&from, AF_INET, ifp->vrf->vrf_id)) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug("Recv IGMP query on interface: %s from ourself %s",
+ ifp->name, from_str);
+ return 0;
+ }
+
+ /* Collecting IGMP Rx stats */
+ switch (query_version) {
+ case 1:
+ igmp->igmp_stats.query_v1++;
+ break;
+ case 2:
+ igmp->igmp_stats.query_v2++;
+ break;
+ case 3:
+ igmp->igmp_stats.query_v3++;
+ break;
+ default:
+ igmp->igmp_stats.unsupported++;
+ }
+
+ /*
+ * RFC 3376 defines some guidelines on operating in backwards
+ * compatibility with older versions of IGMP but there are some gaps in
+ * the logic:
+ *
+ * - once we drop from say version 3 to version 2 we will never go back
+ * to version 3 even if the node that TXed an IGMP v2 query upgrades
+ * to v3
+ *
+ * - The node with the lowest IP is the querier so we will only know to
+ * drop from v3 to v2 if the node that is the querier is also the one
+ * that is running igmp v2. If a non-querier only supports igmp v2
+ * we will have no way of knowing.
+ *
+ * For now we will simplify things and inform the user that they need to
+ * configure all PIM routers to use the same version of IGMP.
+ */
+ if (query_version != pim_ifp->igmp_version) {
+ zlog_warn(
+ "Recv IGMP query v%d from %s on %s but we are using v%d, please configure all PIM routers on this subnet to use the same IGMP version",
+ query_version, from_str, ifp->name,
+ pim_ifp->igmp_version);
+ return 0;
+ }
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Recv IGMP query v%d from %s on %s for group %s",
+ query_version, from_str, ifp->name, group_str);
+ }
+
+ /*
+ RFC 3376: 6.6.2. Querier Election
+
+ When a router receives a query with a lower IP address, it sets
+ the Other-Querier-Present timer to Other Querier Present Interval
+ and ceases to send queries on the network if it was the previously
+ elected querier.
+ */
+ if (ntohl(from.s_addr) < ntohl(igmp->ifaddr.s_addr)) {
+
+ /* As per RFC 2236 section 3:
+ * When a Querier receives a Leave Group message for a group
+ * that has group members on the reception interface, it sends
+ * [Last Member Query Count] Group-Specific Queries every [Last
+ * Member Query Interval] to the group being left. These
+ * Group-Specific Queries have their Max Response time set to
+ * [Last Member Query Interval]. If no Reports are received
+ * after the response time of the last query expires, the
+ * routers assume that the group has no local members, as above.
+ * Any Querier to non-Querier transition is ignored during this
+ * time; the same router keeps sending the Group-Specific
+ * Queries.
+ */
+ const struct gm_group *group;
+ const struct listnode *grpnode;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ group)) {
+ if (!group->t_group_query_retransmit_timer)
+ continue;
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s: lower address query packet from %s is ignored when last member query interval timer is running",
+ ifp->name, from_str);
+ return 0;
+ }
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "%s: local address %s (%u) lost querier election to %s (%u)",
+ ifp->name, ifaddr_str,
+ ntohl(igmp->ifaddr.s_addr), from_str,
+ ntohl(from.s_addr));
+ }
+ /* Reset the other querier timer only if query is received from
+ * the previously elected querier or a better new querier
+ * This will make sure that non-querier elects the new querier
+ * whose ip address is higher than the old querier
+ * in case the old querier goes down via other querier present
+ * timer expiry
+ */
+ if (ntohl(from.s_addr) <= ntohl(igmp->querier_addr.s_addr)) {
+ igmp->querier_addr.s_addr = from.s_addr;
+ pim_igmp_other_querier_timer_on(igmp);
+ }
+ }
+
+ /* IGMP version 3 is the only one where we process the RXed query */
+ if (query_version == 3) {
+ igmp_v3_recv_query(igmp, from_str, igmp_msg);
+ }
+
+ return 0;
+}
+
+static void on_trace(const char *label, struct interface *ifp,
+ struct in_addr from)
+{
+ if (PIM_DEBUG_GM_TRACE) {
+ char from_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<from?>", from, from_str, sizeof(from_str));
+ zlog_debug("%s: from %s on %s", label, from_str, ifp->name);
+ }
+}
+
+static int igmp_v1_recv_report(struct gm_sock *igmp, struct in_addr from,
+ const char *from_str, char *igmp_msg,
+ int igmp_msg_len)
+{
+ struct interface *ifp = igmp->interface;
+ struct gm_group *group;
+ struct in_addr group_addr;
+
+ on_trace(__func__, igmp->interface, from);
+
+ if (igmp->mtrace_only)
+ return 0;
+
+ if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
+ zlog_warn(
+ "Recv IGMP report v1 from %s on %s: size=%d other than correct=%d",
+ from_str, ifp->name, igmp_msg_len, IGMP_V12_MSG_SIZE);
+ return -1;
+ }
+
+ if (igmp_validate_checksum(igmp_msg, igmp_msg_len) == -1) {
+ zlog_warn(
+ "Recv IGMP report v1 from %s on %s with invalid checksum",
+ from_str, ifp->name);
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.report_v1++;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ zlog_warn("%s %s: FIXME WRITEME", __FILE__, __func__);
+ }
+
+ memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
+
+ if (pim_is_group_filtered(ifp->info, &group_addr))
+ return -1;
+
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return -1;
+ }
+
+ group->last_igmp_v1_report_dsec = pim_time_monotonic_dsec();
+
+ return 0;
+}
+
+bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, size_t *hlen)
+{
+ char *igmp_msg;
+ int igmp_msg_len;
+ int msg_type;
+ size_t ip_hlen; /* ip header length in bytes */
+
+ if (len < sizeof(*ip_hdr)) {
+ zlog_warn("IGMP packet size=%zu shorter than minimum=%zu", len,
+ sizeof(*ip_hdr));
+ return false;
+ }
+
+ ip_hlen = ip_hdr->ip_hl << 2; /* ip_hl gives length in 4-byte words */
+ *hlen = ip_hlen;
+
+ if (ip_hlen > len) {
+ zlog_warn(
+ "IGMP packet header claims size %zu, but we only have %zu bytes",
+ ip_hlen, len);
+ return false;
+ }
+
+ igmp_msg = (char *)ip_hdr + ip_hlen;
+ igmp_msg_len = len - ip_hlen;
+ msg_type = *igmp_msg;
+
+ if (igmp_msg_len < PIM_IGMP_MIN_LEN) {
+ zlog_warn("IGMP message size=%d shorter than minimum=%d",
+ igmp_msg_len, PIM_IGMP_MIN_LEN);
+ return false;
+ }
+
+ if ((msg_type != PIM_IGMP_MTRACE_RESPONSE)
+ && (msg_type != PIM_IGMP_MTRACE_QUERY_REQUEST)) {
+ if (ip_hdr->ip_ttl != 1) {
+ zlog_warn(
+ "Recv IGMP packet with invalid ttl=%u, discarding the packet",
+ ip_hdr->ip_ttl);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int pim_igmp_packet(struct gm_sock *igmp, char *buf, size_t len)
+{
+ struct ip *ip_hdr = (struct ip *)buf;
+ size_t ip_hlen; /* ip header length in bytes */
+ char *igmp_msg;
+ int igmp_msg_len;
+ int msg_type;
+ char from_str[INET_ADDRSTRLEN];
+ char to_str[INET_ADDRSTRLEN];
+
+ if (!pim_igmp_verify_header(ip_hdr, len, &ip_hlen))
+ return -1;
+
+ igmp_msg = buf + ip_hlen;
+ igmp_msg_len = len - ip_hlen;
+ msg_type = *igmp_msg;
+
+ pim_inet4_dump("<src?>", ip_hdr->ip_src, from_str, sizeof(from_str));
+ pim_inet4_dump("<dst?>", ip_hdr->ip_dst, to_str, sizeof(to_str));
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Recv IGMP packet from %s to %s on %s: size=%zu ttl=%d msg_type=%d msg_size=%d",
+ from_str, to_str, igmp->interface->name, len, ip_hdr->ip_ttl,
+ msg_type, igmp_msg_len);
+ }
+
+ switch (msg_type) {
+ case PIM_IGMP_MEMBERSHIP_QUERY: {
+ int max_resp_code = igmp_msg[1];
+ int query_version;
+
+ /*
+ RFC 3376: 7.1. Query Version Distinctions
+ IGMPv1 Query: length = 8 octets AND Max Resp Code field is
+ zero
+ IGMPv2 Query: length = 8 octets AND Max Resp Code field is
+ non-zero
+ IGMPv3 Query: length >= 12 octets
+ */
+
+ if (igmp_msg_len == 8) {
+ query_version = max_resp_code ? 2 : 1;
+ } else if (igmp_msg_len >= 12) {
+ query_version = 3;
+ } else {
+ zlog_warn("Unknown IGMP query version");
+ return -1;
+ }
+
+ return igmp_recv_query(igmp, query_version, max_resp_code,
+ ip_hdr->ip_src, from_str, igmp_msg,
+ igmp_msg_len);
+ }
+
+ case PIM_IGMP_V3_MEMBERSHIP_REPORT:
+ return igmp_v3_recv_report(igmp, ip_hdr->ip_src, from_str,
+ igmp_msg, igmp_msg_len);
+
+ case PIM_IGMP_V2_MEMBERSHIP_REPORT:
+ return igmp_v2_recv_report(igmp, ip_hdr->ip_src, from_str,
+ igmp_msg, igmp_msg_len);
+
+ case PIM_IGMP_V1_MEMBERSHIP_REPORT:
+ return igmp_v1_recv_report(igmp, ip_hdr->ip_src, from_str,
+ igmp_msg, igmp_msg_len);
+
+ case PIM_IGMP_V2_LEAVE_GROUP:
+ return igmp_v2_recv_leave(igmp, ip_hdr, from_str, igmp_msg,
+ igmp_msg_len);
+
+ case PIM_IGMP_MTRACE_RESPONSE:
+ return igmp_mtrace_recv_response(igmp, ip_hdr, ip_hdr->ip_src,
+ from_str, igmp_msg,
+ igmp_msg_len);
+ case PIM_IGMP_MTRACE_QUERY_REQUEST:
+ return igmp_mtrace_recv_qry_req(igmp, ip_hdr, ip_hdr->ip_src,
+ from_str, igmp_msg,
+ igmp_msg_len);
+ }
+
+ zlog_warn("Ignoring unsupported IGMP message type: %d", msg_type);
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.unsupported++;
+
+ return -1;
+}
+
+void pim_igmp_general_query_on(struct gm_sock *igmp)
+{
+ struct pim_interface *pim_ifp;
+ int startup_mode;
+ int query_interval;
+
+ /*
+ Since this socket is starting as querier,
+ there should not exist a timer for other-querier-present.
+ */
+ assert(!igmp->t_other_querier_timer);
+ pim_ifp = igmp->interface->info;
+ assert(pim_ifp);
+
+ /*
+ RFC 3376: 8.6. Startup Query Interval
+
+ The Startup Query Interval is the interval between General Queries
+ sent by a Querier on startup. Default: 1/4 the Query Interval.
+ The first one should be sent out immediately instead of 125/4
+ seconds from now.
+ */
+ startup_mode = igmp->startup_query_count > 0;
+ if (startup_mode) {
+ /*
+ * If this is the first time we are sending a query on a
+ * newly configured igmp interface send it out in 1 second
+ * just to give the entire world a tiny bit of time to settle
+ * else the query interval is:
+ * query_interval = pim_ifp->gm_default_query_interval >> 2;
+ */
+ if (igmp->startup_query_count ==
+ igmp->querier_robustness_variable)
+ query_interval = 1;
+ else
+ query_interval = PIM_IGMP_SQI(
+ pim_ifp->gm_default_query_interval);
+
+ --igmp->startup_query_count;
+ } else {
+ query_interval = igmp->querier_query_interval;
+ }
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "Querier %s scheduling %d-second (%s) TIMER event for IGMP query on fd=%d",
+ ifaddr_str, query_interval,
+ startup_mode ? "startup" : "non-startup", igmp->fd);
+ }
+ event_add_timer(router->master, pim_igmp_general_query, igmp,
+ query_interval, &igmp->t_igmp_query_timer);
+}
+
+void pim_igmp_general_query_off(struct gm_sock *igmp)
+{
+ assert(igmp);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ if (igmp->t_igmp_query_timer) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "IGMP querier %s fd=%d cancelling query TIMER event on %s",
+ ifaddr_str, igmp->fd, igmp->interface->name);
+ }
+ }
+ EVENT_OFF(igmp->t_igmp_query_timer);
+}
+
+/* Issue IGMP general query */
+static void pim_igmp_general_query(struct event *t)
+{
+ struct gm_sock *igmp;
+ struct in_addr dst_addr;
+ struct in_addr group_addr;
+ struct pim_interface *pim_ifp;
+ int query_buf_size;
+
+ igmp = EVENT_ARG(t);
+
+ assert(igmp->interface);
+ assert(igmp->interface->info);
+
+ pim_ifp = igmp->interface->info;
+
+ if (pim_ifp->igmp_version == 3) {
+ query_buf_size = PIM_IGMP_BUFSIZE_WRITE;
+ } else {
+ query_buf_size = IGMP_V12_MSG_SIZE;
+ }
+
+ char query_buf[query_buf_size];
+
+ /*
+ RFC3376: 4.1.12. IP Destination Addresses for Queries
+
+ In IGMPv3, General Queries are sent with an IP destination address
+ of 224.0.0.1, the all-systems multicast address. Group-Specific
+ and Group-and-Source-Specific Queries are sent with an IP
+ destination address equal to the multicast address of interest.
+ */
+
+ dst_addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ group_addr.s_addr = PIM_NET_INADDR_ANY;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char querier_str[INET_ADDRSTRLEN];
+ char dst_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<querier?>", igmp->ifaddr, querier_str,
+ sizeof(querier_str));
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ zlog_debug("Querier %s issuing IGMP general query to %s on %s",
+ querier_str, dst_str, igmp->interface->name);
+ }
+
+ igmp_send_query(pim_ifp->igmp_version, 0 /* igmp_group */, query_buf,
+ sizeof(query_buf), 0 /* num_sources */, dst_addr,
+ group_addr, pim_ifp->gm_query_max_response_time_dsec,
+ 1 /* s_flag: always set for general queries */, igmp);
+
+ pim_igmp_general_query_on(igmp);
+}
+
+static void sock_close(struct gm_sock *igmp)
+{
+ pim_igmp_other_querier_timer_off(igmp);
+ pim_igmp_general_query_off(igmp);
+
+ if (PIM_DEBUG_GM_TRACE_DETAIL) {
+ if (igmp->t_igmp_read) {
+ zlog_debug(
+ "Cancelling READ event on IGMP socket %pI4 fd=%d on interface %s",
+ &igmp->ifaddr, igmp->fd,
+ igmp->interface->name);
+ }
+ }
+ EVENT_OFF(igmp->t_igmp_read);
+
+ if (close(igmp->fd)) {
+ flog_err(
+ EC_LIB_SOCKET,
+ "Failure closing IGMP socket %pI4 fd=%d on interface %s: errno=%d: %s",
+ &igmp->ifaddr, igmp->fd,
+ igmp->interface->name, errno, safe_strerror(errno));
+ }
+
+ if (PIM_DEBUG_GM_TRACE_DETAIL) {
+ zlog_debug("Deleted IGMP socket %pI4 fd=%d on interface %s",
+ &igmp->ifaddr, igmp->fd,
+ igmp->interface->name);
+ }
+}
+
+void igmp_startup_mode_on(struct gm_sock *igmp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = igmp->interface->info;
+
+ /*
+ RFC 3376: 8.7. Startup Query Count
+
+ The Startup Query Count is the number of Queries sent out on
+ startup, separated by the Startup Query Interval. Default: the
+ Robustness Variable.
+ */
+ igmp->startup_query_count = igmp->querier_robustness_variable;
+
+ /*
+ Since we're (re)starting, reset QQI to default Query Interval
+ */
+ igmp->querier_query_interval = pim_ifp->gm_default_query_interval;
+}
+
+static void igmp_group_free(struct gm_group *group)
+{
+ list_delete(&group->group_source_list);
+
+ XFREE(MTYPE_PIM_IGMP_GROUP, group);
+}
+
+static void igmp_group_count_incr(struct pim_interface *pim_ifp)
+{
+ uint32_t group_count = listcount(pim_ifp->gm_group_list);
+
+ ++pim_ifp->pim->gm_group_count;
+ if (pim_ifp->pim->gm_group_count == pim_ifp->pim->gm_watermark_limit) {
+ zlog_warn(
+ "IGMP group count reached watermark limit: %u(vrf: %s)",
+ pim_ifp->pim->gm_group_count,
+ VRF_LOGNAME(pim_ifp->pim->vrf));
+ }
+
+ if (pim_ifp->igmp_peak_group_count < group_count)
+ pim_ifp->igmp_peak_group_count = group_count;
+}
+
+static void igmp_group_count_decr(struct pim_interface *pim_ifp)
+{
+ if (pim_ifp->pim->gm_group_count == 0) {
+ zlog_warn("Cannot decrement igmp group count below 0(vrf: %s)",
+ VRF_LOGNAME(pim_ifp->pim->vrf));
+ return;
+ }
+
+ --pim_ifp->pim->gm_group_count;
+}
+
+void igmp_group_delete(struct gm_group *group)
+{
+ struct listnode *src_node;
+ struct listnode *src_nextnode;
+ struct gm_source *src;
+ struct pim_interface *pim_ifp = group->interface->info;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Deleting IGMP group %s from interface %s",
+ group_str, group->interface->name);
+ }
+
+ for (ALL_LIST_ELEMENTS(group->group_source_list, src_node, src_nextnode,
+ src)) {
+ igmp_source_delete(src);
+ }
+
+ EVENT_OFF(group->t_group_query_retransmit_timer);
+
+ group_timer_off(group);
+ igmp_group_count_decr(pim_ifp);
+ listnode_delete(pim_ifp->gm_group_list, group);
+ hash_release(pim_ifp->gm_group_hash, group);
+
+ igmp_group_free(group);
+}
+
+void igmp_group_delete_empty_include(struct gm_group *group)
+{
+ assert(!group->group_filtermode_isexcl);
+ assert(!listcount(group->group_source_list));
+
+ igmp_group_delete(group);
+}
+
+void igmp_sock_free(struct gm_sock *igmp)
+{
+ assert(!igmp->t_igmp_read);
+ assert(!igmp->t_igmp_query_timer);
+ assert(!igmp->t_other_querier_timer);
+
+ XFREE(MTYPE_PIM_IGMP_SOCKET, igmp);
+}
+
+void igmp_sock_delete(struct gm_sock *igmp)
+{
+ struct pim_interface *pim_ifp;
+
+ sock_close(igmp);
+
+ pim_ifp = igmp->interface->info;
+
+ listnode_delete(pim_ifp->gm_socket_list, igmp);
+
+ igmp_sock_free(igmp);
+
+ if (!listcount(pim_ifp->gm_socket_list))
+ pim_igmp_if_reset(pim_ifp);
+}
+
+void igmp_sock_delete_all(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *igmp_node, *igmp_nextnode;
+ struct gm_sock *igmp;
+
+ pim_ifp = ifp->info;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->gm_socket_list, igmp_node,
+ igmp_nextnode, igmp)) {
+ igmp_sock_delete(igmp);
+ }
+}
+
+static unsigned int igmp_group_hash_key(const void *arg)
+{
+ const struct gm_group *group = arg;
+
+ return jhash_1word(group->group_addr.s_addr, 0);
+}
+
+static bool igmp_group_hash_equal(const void *arg1, const void *arg2)
+{
+ const struct gm_group *g1 = (const struct gm_group *)arg1;
+ const struct gm_group *g2 = (const struct gm_group *)arg2;
+
+ if (g1->group_addr.s_addr == g2->group_addr.s_addr)
+ return true;
+
+ return false;
+}
+
+void pim_igmp_if_init(struct pim_interface *pim_ifp, struct interface *ifp)
+{
+ char hash_name[64];
+
+ pim_ifp->gm_socket_list = list_new();
+ pim_ifp->gm_socket_list->del = (void (*)(void *))igmp_sock_free;
+
+ pim_ifp->gm_group_list = list_new();
+ pim_ifp->gm_group_list->del = (void (*)(void *))igmp_group_free;
+
+ snprintf(hash_name, sizeof(hash_name), "IGMP %s hash", ifp->name);
+ pim_ifp->gm_group_hash = hash_create(igmp_group_hash_key,
+ igmp_group_hash_equal, hash_name);
+}
+
+void pim_igmp_if_reset(struct pim_interface *pim_ifp)
+{
+ struct listnode *grp_node, *grp_nextnode;
+ struct gm_group *grp;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->gm_group_list, grp_node, grp_nextnode,
+ grp)) {
+ igmp_group_delete(grp);
+ }
+}
+
+void pim_igmp_if_fini(struct pim_interface *pim_ifp)
+{
+ pim_igmp_if_reset(pim_ifp);
+
+ assert(pim_ifp->gm_group_list);
+ assert(!listcount(pim_ifp->gm_group_list));
+
+ list_delete(&pim_ifp->gm_group_list);
+ hash_free(pim_ifp->gm_group_hash);
+
+ list_delete(&pim_ifp->gm_socket_list);
+}
+
+static struct gm_sock *igmp_sock_new(int fd, struct in_addr ifaddr,
+ struct interface *ifp, int mtrace_only)
+{
+ struct pim_interface *pim_ifp;
+ struct gm_sock *igmp;
+
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ zlog_debug(
+ "Creating IGMP socket fd=%d for address %pI4 on interface %s",
+ fd, &ifaddr, ifp->name);
+ }
+
+ igmp = XCALLOC(MTYPE_PIM_IGMP_SOCKET, sizeof(*igmp));
+
+ igmp->fd = fd;
+ igmp->interface = ifp;
+ igmp->ifaddr = ifaddr;
+ igmp->querier_addr = ifaddr;
+ igmp->t_igmp_read = NULL;
+ igmp->t_igmp_query_timer = NULL;
+ igmp->t_other_querier_timer = NULL; /* no other querier present */
+ igmp->querier_robustness_variable =
+ pim_ifp->gm_default_robustness_variable;
+ igmp->sock_creation = pim_time_monotonic_sec();
+
+ igmp_stats_init(&igmp->igmp_stats);
+
+ if (mtrace_only) {
+ igmp->mtrace_only = mtrace_only;
+ return igmp;
+ }
+
+ igmp->mtrace_only = false;
+
+ /*
+ igmp_startup_mode_on() will reset QQI:
+
+ igmp->querier_query_interval = pim_ifp->gm_default_query_interval;
+ */
+ igmp_startup_mode_on(igmp);
+ pim_igmp_general_query_on(igmp);
+
+ return igmp;
+}
+
+static void igmp_read_on(struct gm_sock *igmp);
+
+static void pim_igmp_read(struct event *t)
+{
+ uint8_t buf[10000];
+ struct gm_sock *igmp = (struct gm_sock *)EVENT_ARG(t);
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
+ socklen_t fromlen = sizeof(from);
+ socklen_t tolen = sizeof(to);
+ ifindex_t ifindex = -1;
+ int len;
+
+ while (1) {
+ len = pim_socket_recvfromto(igmp->fd, buf, sizeof(buf), &from,
+ &fromlen, &to, &tolen, &ifindex);
+ if (len < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EWOULDBLOCK || errno == EAGAIN)
+ break;
+
+ goto done;
+ }
+ }
+
+done:
+ igmp_read_on(igmp);
+}
+
+static void igmp_read_on(struct gm_sock *igmp)
+{
+
+ if (PIM_DEBUG_GM_TRACE_DETAIL) {
+ zlog_debug("Scheduling READ event on IGMP socket fd=%d",
+ igmp->fd);
+ }
+ event_add_read(router->master, pim_igmp_read, igmp, igmp->fd,
+ &igmp->t_igmp_read);
+}
+
+struct gm_sock *pim_igmp_sock_add(struct list *igmp_sock_list,
+ struct in_addr ifaddr, struct interface *ifp,
+ bool mtrace_only)
+{
+ struct gm_sock *igmp;
+ struct sockaddr_in sin;
+ int fd;
+
+ fd = igmp_sock_open(ifaddr, ifp);
+ if (fd < 0) {
+ zlog_warn("Could not open IGMP socket for %pI4 on %s",
+ &ifaddr, ifp->name);
+ return NULL;
+ }
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr = ifaddr;
+ sin.sin_port = 0;
+ if (bind(fd, (struct sockaddr *) &sin, sizeof(sin)) != 0) {
+ zlog_warn("Could not bind IGMP socket for %pI4 on %s: %s(%d)",
+ &ifaddr, ifp->name, strerror(errno), errno);
+ close(fd);
+
+ return NULL;
+ }
+
+ igmp = igmp_sock_new(fd, ifaddr, ifp, mtrace_only);
+
+ igmp_read_on(igmp);
+
+ listnode_add(igmp_sock_list, igmp);
+
+#ifdef IGMP_SOCK_DUMP
+ igmp_sock_dump(igmp_sock_array);
+#endif
+
+ return igmp;
+}
+
+/*
+ RFC 3376: 6.5. Switching Router Filter-Modes
+
+ When a router's filter-mode for a group is EXCLUDE and the group
+ timer expires, the router filter-mode for the group transitions to
+ INCLUDE.
+
+ A router uses source records with running source timers as its state
+ for the switch to a filter-mode of INCLUDE. If there are any source
+ records with source timers greater than zero (i.e., requested to be
+ forwarded), a router switches to filter-mode of INCLUDE using those
+ source records. Source records whose timers are zero (from the
+ previous EXCLUDE mode) are deleted.
+ */
+static void igmp_group_timer(struct event *t)
+{
+ struct gm_group *group;
+
+ group = EVENT_ARG(t);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("%s: Timer for group %s on interface %s", __func__,
+ group_str, group->interface->name);
+ }
+
+ assert(group->group_filtermode_isexcl);
+
+ group->group_filtermode_isexcl = 0;
+
+ /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
+ igmp_anysource_forward_stop(group);
+
+ igmp_source_delete_expired(group->group_source_list);
+
+ assert(!group->group_filtermode_isexcl);
+
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ If there are no more source records for the group, delete group
+ record.
+ */
+ if (listcount(group->group_source_list) < 1) {
+ igmp_group_delete_empty_include(group);
+ }
+}
+
+static void group_timer_off(struct gm_group *group)
+{
+ if (!group->t_group_timer)
+ return;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Cancelling TIMER event for group %s on %s",
+ group_str, group->interface->name);
+ }
+ EVENT_OFF(group->t_group_timer);
+}
+
+void igmp_group_timer_on(struct gm_group *group, long interval_msec,
+ const char *ifname)
+{
+ group_timer_off(group);
+
+ if (PIM_DEBUG_GM_EVENTS) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "Scheduling %ld.%03ld sec TIMER event for group %s on %s",
+ interval_msec / 1000, interval_msec % 1000, group_str,
+ ifname);
+ }
+
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ The group timer is only used when a group is in EXCLUDE mode and
+ it represents the time for the *filter-mode* of the group to
+ expire and switch to INCLUDE mode.
+ */
+ assert(group->group_filtermode_isexcl);
+
+ event_add_timer_msec(router->master, igmp_group_timer, group,
+ interval_msec, &group->t_group_timer);
+}
+
+struct gm_group *find_group_by_addr(struct gm_sock *igmp,
+ struct in_addr group_addr)
+{
+ struct gm_group lookup;
+ struct pim_interface *pim_ifp = igmp->interface->info;
+
+ lookup.group_addr.s_addr = group_addr.s_addr;
+
+ return hash_lookup(pim_ifp->gm_group_hash, &lookup);
+}
+
+struct gm_group *igmp_add_group_by_addr(struct gm_sock *igmp,
+ struct in_addr group_addr)
+{
+ struct gm_group *group;
+ struct pim_interface *pim_ifp = igmp->interface->info;
+
+ group = find_group_by_addr(igmp, group_addr);
+ if (group) {
+ return group;
+ }
+
+ if (!pim_is_group_224_4(group_addr)) {
+ zlog_warn("%s: Group Specified is not part of 224.0.0.0/4",
+ __func__);
+ return NULL;
+ }
+
+ if (pim_is_group_224_0_0_0_24(group_addr)) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s: Group specified %pI4 is part of 224.0.0.0/24",
+ __func__, &group_addr);
+ return NULL;
+ }
+ /*
+ Non-existant group is created as INCLUDE {empty}:
+
+ RFC 3376 - 5.1. Action on Change of Interface State
+
+ If no interface state existed for that multicast address before
+ the change (i.e., the change consisted of creating a new
+ per-interface record), or if no state exists after the change
+ (i.e., the change consisted of deleting a per-interface record),
+ then the "non-existent" state is considered to have a filter mode
+ of INCLUDE and an empty source list.
+ */
+
+ group = XCALLOC(MTYPE_PIM_IGMP_GROUP, sizeof(*group));
+
+ group->group_source_list = list_new();
+ group->group_source_list->del = (void (*)(void *))igmp_source_free;
+
+ group->t_group_timer = NULL;
+ group->t_group_query_retransmit_timer = NULL;
+ group->group_specific_query_retransmit_count = 0;
+ group->group_addr = group_addr;
+ group->interface = igmp->interface;
+ group->last_igmp_v1_report_dsec = -1;
+ group->last_igmp_v2_report_dsec = -1;
+ group->group_creation = pim_time_monotonic_sec();
+ group->igmp_version = IGMP_DEFAULT_VERSION;
+
+ /* initialize new group as INCLUDE {empty} */
+ group->group_filtermode_isexcl = 0; /* 0=INCLUDE, 1=EXCLUDE */
+
+ listnode_add(pim_ifp->gm_group_list, group);
+ group = hash_get(pim_ifp->gm_group_hash, group, hash_alloc_intern);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "Creating new IGMP group %s on socket %d interface %s",
+ group_str, igmp->fd, igmp->interface->name);
+ }
+
+ igmp_group_count_incr(pim_ifp);
+
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ The group timer is only used when a group is in EXCLUDE mode and
+ it represents the time for the *filter-mode* of the group to
+ expire and switch to INCLUDE mode.
+ */
+ assert(!group->group_filtermode_isexcl); /* INCLUDE mode */
+ assert(!group->t_group_timer); /* group timer == 0 */
+
+ /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
+ igmp_anysource_forward_stop(group);
+
+ return group;
+}
+
+void igmp_send_query(int igmp_version, struct gm_group *group, char *query_buf,
+ int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
+ int query_max_response_time_dsec, uint8_t s_flag,
+ struct gm_sock *igmp)
+{
+ if (pim_addr_is_any(group_addr) &&
+ ntohl(dst_addr.s_addr) == INADDR_ALLHOSTS_GROUP)
+ igmp->igmp_stats.general_queries_sent++;
+ else if (group)
+ igmp->igmp_stats.group_queries_sent++;
+
+ if (igmp_version == 3) {
+ igmp_v3_send_query(group, igmp->fd, igmp->interface->name,
+ query_buf, query_buf_size, num_sources,
+ dst_addr, group_addr,
+ query_max_response_time_dsec, s_flag,
+ igmp->querier_robustness_variable,
+ igmp->querier_query_interval);
+ } else if (igmp_version == 2) {
+ igmp_v2_send_query(group, igmp->fd, igmp->interface->name,
+ query_buf, dst_addr, group_addr,
+ query_max_response_time_dsec);
+ }
+}
+
+void igmp_send_query_on_intf(struct interface *ifp, int igmp_ver)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *sock_node = NULL;
+ struct gm_sock *igmp = NULL;
+ struct in_addr dst_addr;
+ struct in_addr group_addr;
+ int query_buf_size;
+
+ if (!igmp_ver)
+ igmp_ver = 2;
+
+ if (igmp_ver == 3)
+ query_buf_size = PIM_IGMP_BUFSIZE_WRITE;
+ else
+ query_buf_size = IGMP_V12_MSG_SIZE;
+
+ dst_addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ group_addr.s_addr = PIM_NET_INADDR_ANY;
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug("Issuing general query on request on %s", ifp->name);
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
+
+ char query_buf[query_buf_size];
+
+ igmp_send_query(
+ igmp_ver, 0 /* igmp_group */, query_buf,
+ sizeof(query_buf), 0 /* num_sources */, dst_addr,
+ group_addr, pim_ifp->gm_query_max_response_time_dsec,
+ 1 /* s_flag: always set for general queries */, igmp);
+ }
+}
diff --git a/pimd/pim_igmp.h b/pimd/pim_igmp.h
new file mode 100644
index 0000000..a1f19b3
--- /dev/null
+++ b/pimd/pim_igmp.h
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_IGMP_H
+#define PIM_IGMP_H
+
+#include <netinet/in.h>
+
+#include <zebra.h>
+#include "vty.h"
+#include "linklist.h"
+#include "pim_igmp_stats.h"
+#include "pim_str.h"
+
+/*
+ The following sizes are likely to support
+ any message sent within local MTU.
+*/
+#define PIM_IGMP_BUFSIZE_READ (20000)
+#define PIM_IGMP_BUFSIZE_WRITE (20000)
+
+#define PIM_IGMP_MEMBERSHIP_QUERY (0x11)
+#define PIM_IGMP_V1_MEMBERSHIP_REPORT (0x12)
+#define PIM_IGMP_V2_MEMBERSHIP_REPORT (0x16)
+#define PIM_IGMP_V2_LEAVE_GROUP (0x17)
+#define PIM_IGMP_MTRACE_RESPONSE (0x1E)
+#define PIM_IGMP_MTRACE_QUERY_REQUEST (0x1F)
+#define PIM_IGMP_V3_MEMBERSHIP_REPORT (0x22)
+
+#define IGMP_V3_REPORT_HEADER_SIZE (8)
+#define IGMP_V3_GROUP_RECORD_MIN_SIZE (8)
+#define IGMP_V3_MSG_MIN_SIZE \
+ (IGMP_V3_REPORT_HEADER_SIZE + IGMP_V3_GROUP_RECORD_MIN_SIZE)
+#define IGMP_V12_MSG_SIZE (8)
+
+#define IGMP_V3_GROUP_RECORD_TYPE_OFFSET (0)
+#define IGMP_V3_GROUP_RECORD_AUXDATALEN_OFFSET (1)
+#define IGMP_V3_GROUP_RECORD_NUMSOURCES_OFFSET (2)
+#define IGMP_V3_GROUP_RECORD_GROUP_OFFSET (4)
+#define IGMP_V3_GROUP_RECORD_SOURCE_OFFSET (8)
+#define IGMP_CHECKSUM_OFFSET (2)
+
+#define IGMP_DEFAULT_VERSION (3)
+
+#define IGMP_GET_INT16(ptr, output) \
+ do { \
+ output = *(ptr) << 8; \
+ output |= *((ptr) + 1); \
+ } while (0)
+
+struct gm_join {
+ pim_addr group_addr;
+ pim_addr source_addr;
+ int sock_fd;
+ time_t sock_creation;
+};
+
+struct gm_sock {
+ int fd;
+ struct interface *interface;
+ pim_addr ifaddr;
+ time_t sock_creation;
+
+ struct event *t_igmp_read; /* read: IGMP sockets */
+ /* timer: issue IGMP general queries */
+ struct event *t_igmp_query_timer;
+ struct event *t_other_querier_timer; /* timer: other querier present */
+ pim_addr querier_addr; /* IP address of the querier */
+ int querier_query_interval; /* QQI */
+ int querier_robustness_variable; /* QRV */
+ int startup_query_count;
+
+ bool mtrace_only;
+
+ struct igmp_stats igmp_stats;
+};
+
+struct pim_interface;
+
+#if PIM_IPV == 4
+void pim_igmp_if_init(struct pim_interface *pim_ifp, struct interface *ifp);
+void pim_igmp_if_reset(struct pim_interface *pim_ifp);
+void pim_igmp_if_fini(struct pim_interface *pim_ifp);
+
+struct gm_sock *pim_igmp_sock_lookup_ifaddr(struct list *igmp_sock_list,
+ struct in_addr ifaddr);
+struct gm_sock *pim_igmp_sock_add(struct list *igmp_sock_list,
+ struct in_addr ifaddr, struct interface *ifp,
+ bool mtrace_only);
+void igmp_sock_delete(struct gm_sock *igmp);
+void igmp_sock_free(struct gm_sock *igmp);
+void igmp_sock_delete_all(struct interface *ifp);
+int pim_igmp_packet(struct gm_sock *igmp, char *buf, size_t len);
+bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, size_t *ip_hlen);
+void pim_igmp_general_query_on(struct gm_sock *igmp);
+void pim_igmp_general_query_off(struct gm_sock *igmp);
+void pim_igmp_other_querier_timer_on(struct gm_sock *igmp);
+void pim_igmp_other_querier_timer_off(struct gm_sock *igmp);
+
+int igmp_validate_checksum(char *igmp_msg, int igmp_msg_len);
+
+#else /* PIM_IPV != 4 */
+static inline void pim_igmp_if_init(struct pim_interface *pim_ifp,
+ struct interface *ifp)
+{
+}
+
+static inline void pim_igmp_if_fini(struct pim_interface *pim_ifp)
+{
+}
+
+static inline void pim_igmp_general_query_on(struct gm_sock *igmp)
+{
+}
+
+static inline void pim_igmp_general_query_off(struct gm_sock *igmp)
+{
+}
+
+static inline void pim_igmp_other_querier_timer_on(struct gm_sock *igmp)
+{
+}
+
+static inline void pim_igmp_other_querier_timer_off(struct gm_sock *igmp)
+{
+}
+#endif /* PIM_IPV == 4 */
+
+#define IGMP_SOURCE_MASK_FORWARDING (1 << 0)
+#define IGMP_SOURCE_MASK_DELETE (1 << 1)
+#define IGMP_SOURCE_MASK_SEND (1 << 2)
+#define IGMP_SOURCE_TEST_FORWARDING(flags) ((flags) & IGMP_SOURCE_MASK_FORWARDING)
+#define IGMP_SOURCE_TEST_DELETE(flags) ((flags) & IGMP_SOURCE_MASK_DELETE)
+#define IGMP_SOURCE_TEST_SEND(flags) ((flags) & IGMP_SOURCE_MASK_SEND)
+#define IGMP_SOURCE_DO_FORWARDING(flags) ((flags) |= IGMP_SOURCE_MASK_FORWARDING)
+#define IGMP_SOURCE_DO_DELETE(flags) ((flags) |= IGMP_SOURCE_MASK_DELETE)
+#define IGMP_SOURCE_DO_SEND(flags) ((flags) |= IGMP_SOURCE_MASK_SEND)
+#define IGMP_SOURCE_DONT_FORWARDING(flags) ((flags) &= ~IGMP_SOURCE_MASK_FORWARDING)
+#define IGMP_SOURCE_DONT_DELETE(flags) ((flags) &= ~IGMP_SOURCE_MASK_DELETE)
+#define IGMP_SOURCE_DONT_SEND(flags) ((flags) &= ~IGMP_SOURCE_MASK_SEND)
+
+struct gm_source {
+ pim_addr source_addr;
+ struct event *t_source_timer;
+ struct gm_group *source_group; /* back pointer */
+ time_t source_creation;
+ uint32_t source_flags;
+ struct channel_oil *source_channel_oil;
+
+ /*
+ RFC 3376: 6.6.3.2. Building and Sending Group and Source Specific
+ Queries
+ */
+ int source_query_retransmit_count;
+};
+
+struct gm_group {
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ The group timer is only used when a group is in EXCLUDE mode and it
+ represents the time for the *filter-mode* of the group to expire and
+ switch to INCLUDE mode.
+ */
+ struct event *t_group_timer;
+
+ /* Shared between group-specific and
+ group-and-source-specific retransmissions */
+ struct event *t_group_query_retransmit_timer;
+
+ /* Counter exclusive for group-specific retransmissions
+ (not used by group-and-source-specific retransmissions,
+ since sources have their counters) */
+ int group_specific_query_retransmit_count;
+
+ /* compatibility mode - igmp v1, v2 or v3 */
+ int igmp_version;
+ pim_addr group_addr;
+ int group_filtermode_isexcl; /* 0=INCLUDE, 1=EXCLUDE */
+ struct list *group_source_list; /* list of struct gm_source */
+ time_t group_creation;
+ struct interface *interface;
+ int64_t last_igmp_v1_report_dsec;
+ int64_t last_igmp_v2_report_dsec;
+};
+
+#if PIM_IPV == 4
+struct pim_instance;
+
+void igmp_anysource_forward_start(struct pim_instance *pim,
+ struct gm_group *group);
+void igmp_anysource_forward_stop(struct gm_group *group);
+
+void igmp_source_forward_start(struct pim_instance *pim,
+ struct gm_source *source);
+void igmp_source_forward_stop(struct gm_source *source);
+void igmp_source_forward_reevaluate_all(struct pim_instance *pim);
+
+struct gm_group *find_group_by_addr(struct gm_sock *igmp,
+ struct in_addr group_addr);
+struct gm_group *igmp_add_group_by_addr(struct gm_sock *igmp,
+ struct in_addr group_addr);
+
+struct gm_source *igmp_get_source_by_addr(struct gm_group *group,
+ struct in_addr src_addr,
+ bool *created);
+
+void igmp_group_delete_empty_include(struct gm_group *group);
+
+void igmp_startup_mode_on(struct gm_sock *igmp);
+
+void igmp_group_timer_on(struct gm_group *group, long interval_msec,
+ const char *ifname);
+
+void igmp_send_query(int igmp_version, struct gm_group *group, char *query_buf,
+ int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
+ int query_max_response_time_dsec, uint8_t s_flag,
+ struct gm_sock *igmp);
+void igmp_group_delete(struct gm_group *group);
+
+void igmp_send_query_on_intf(struct interface *ifp, int igmp_ver);
+
+#else /* PIM_IPV != 4 */
+static inline void igmp_startup_mode_on(struct gm_sock *igmp)
+{
+}
+#endif /* PIM_IPV != 4 */
+
+#endif /* PIM_IGMP_H */
diff --git a/pimd/pim_igmp_join.h b/pimd/pim_igmp_join.h
new file mode 100644
index 0000000..0e9498c
--- /dev/null
+++ b/pimd/pim_igmp_join.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_IGMP_JOIN_H
+#define PIM_IGMP_JOIN_H
+
+#include "pim_addr.h"
+
+/* required headers #include'd by caller */
+
+#ifndef SOL_IP
+#define SOL_IP IPPROTO_IP
+#endif
+
+#ifndef MCAST_JOIN_GROUP
+#define MCAST_JOIN_GROUP 42
+#endif
+
+#ifndef MCAST_JOIN_SOURCE_GROUP
+#define MCAST_JOIN_SOURCE_GROUP 46
+struct group_source_req {
+ uint32_t gsr_interface;
+ struct sockaddr_storage gsr_group;
+ struct sockaddr_storage gsr_source;
+};
+#endif
+
+#if PIM_IPV == 4
+static inline int pim_gm_join_source(int fd, ifindex_t ifindex,
+ pim_addr group_addr, pim_addr source_addr)
+{
+ struct group_source_req req;
+ struct sockaddr_in group = {};
+ struct sockaddr_in source = {};
+
+ memset(&req, 0, sizeof(req));
+
+ group.sin_family = PIM_AF;
+ group.sin_addr = group_addr;
+ group.sin_port = htons(0);
+ memcpy(&req.gsr_group, &group, sizeof(group));
+
+ source.sin_family = PIM_AF;
+ source.sin_addr = source_addr;
+ source.sin_port = htons(0);
+ memcpy(&req.gsr_source, &source, sizeof(source));
+
+ req.gsr_interface = ifindex;
+
+ if (pim_addr_is_any(source_addr))
+ return setsockopt(fd, SOL_IP, MCAST_JOIN_GROUP, &req,
+ sizeof(req));
+ else
+ return setsockopt(fd, SOL_IP, MCAST_JOIN_SOURCE_GROUP, &req,
+ sizeof(req));
+}
+#else /* PIM_IPV != 4*/
+static inline int pim_gm_join_source(int fd, ifindex_t ifindex,
+ pim_addr group_addr, pim_addr source_addr)
+{
+ struct group_source_req req;
+ struct sockaddr_in6 group = {};
+ struct sockaddr_in6 source = {};
+
+ memset(&req, 0, sizeof(req));
+
+ group.sin6_family = PIM_AF;
+ group.sin6_addr = group_addr;
+ group.sin6_port = htons(0);
+ memcpy(&req.gsr_group, &group, sizeof(group));
+
+ source.sin6_family = PIM_AF;
+ source.sin6_addr = source_addr;
+ source.sin6_port = htons(0);
+ memcpy(&req.gsr_source, &source, sizeof(source));
+
+ req.gsr_interface = ifindex;
+
+ if (pim_addr_is_any(source_addr))
+ return setsockopt(fd, SOL_IPV6, MCAST_JOIN_GROUP, &req,
+ sizeof(req));
+ else
+ return setsockopt(fd, SOL_IPV6, MCAST_JOIN_SOURCE_GROUP, &req,
+ sizeof(req));
+}
+#endif /* PIM_IPV != 4*/
+
+#endif /* PIM_IGMP_JOIN_H */
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
new file mode 100644
index 0000000..4d3f602
--- /dev/null
+++ b/pimd/pim_igmp_mtrace.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multicast traceroute for FRRouting
+ * Copyright (C) 2017 Mladen Sablic
+ */
+
+/* based on draft-ietf-idmr-traceroute-ipm-07 */
+
+#include <zebra.h>
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_util.h"
+#include "pim_sock.h"
+#include "pim_rp.h"
+#include "pim_oil.h"
+#include "pim_ifchannel.h"
+#include "pim_macro.h"
+#include "pim_igmp_mtrace.h"
+
+static struct in_addr mtrace_primary_address(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+ struct in_addr any;
+ struct pim_interface *pim_ifp;
+
+ if (ifp->info) {
+ pim_ifp = ifp->info;
+ return pim_ifp->primary_address;
+ }
+
+ any.s_addr = INADDR_ANY;
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
+ struct prefix *p = ifc->address;
+
+ if (p->family != AF_INET)
+ continue;
+
+ if (!CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
+ return p->u.prefix4;
+ /* in case no primary found, return a secondary */
+ any = p->u.prefix4;
+ }
+ return any;
+}
+
+static bool mtrace_fwd_info_weak(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep,
+ struct igmp_mtrace_rsp *rspp,
+ struct interface **ifpp)
+{
+ struct pim_nexthop nexthop;
+ struct interface *ifp_in;
+ struct in_addr nh_addr;
+
+ nh_addr.s_addr = INADDR_ANY;
+
+ memset(&nexthop, 0, sizeof(nexthop));
+
+ if (!pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1)) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace not found neighbor");
+ return false;
+ }
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace pim_nexthop_lookup OK");
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace next_hop=%pPAs", &nexthop.mrib_nexthop_addr);
+
+ nh_addr = nexthop.mrib_nexthop_addr;
+
+ ifp_in = nexthop.interface;
+
+ /* return interface for forwarding mtrace packets */
+ *ifpp = ifp_in;
+
+ /* 6.2.2. 4. Fill in the Incoming Interface Address... */
+ rspp->incoming = mtrace_primary_address(ifp_in);
+ rspp->prev_hop = nh_addr;
+ rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->total = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->rtg_proto = MTRACE_RTG_PROTO_PIM;
+ return true;
+}
+
+static bool mtrace_fwd_info(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep,
+ struct igmp_mtrace_rsp *rspp,
+ struct interface **ifpp)
+{
+ pim_sgaddr sg;
+ struct pim_upstream *up;
+ struct interface *ifp_in;
+ struct in_addr nh_addr;
+ uint32_t total;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = mtracep->src_addr;
+ sg.grp = mtracep->grp_addr;
+
+ up = pim_upstream_find(pim, &sg);
+
+ if (!up) {
+ sg.src = PIMADDR_ANY;
+ up = pim_upstream_find(pim, &sg);
+ }
+
+ if (!up)
+ return false;
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return false;
+ }
+
+ ifp_in = up->rpf.source_nexthop.interface;
+ nh_addr = up->rpf.source_nexthop.mrib_nexthop_addr;
+ total = htonl(MTRACE_UNKNOWN_COUNT);
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("fwd_info: upstream next hop=%pI4", &nh_addr);
+
+ if (up->channel_oil)
+ total = up->channel_oil->cc.pktcnt;
+
+ /* return interface for forwarding mtrace packets */
+ *ifpp = ifp_in;
+
+ /* 6.2.2. 4. Fill in the Incoming Interface Address... */
+ rspp->incoming = mtrace_primary_address(ifp_in);
+ rspp->prev_hop = nh_addr;
+ rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->total = total;
+ rspp->rtg_proto = MTRACE_RTG_PROTO_PIM;
+
+ /* 6.2.2. 4. Fill in ... S, and Src Mask */
+ if (!pim_addr_is_any(sg.src)) {
+ rspp->s = 1;
+ rspp->src_mask = MTRACE_SRC_MASK_SOURCE;
+ } else {
+ rspp->s = 0;
+ rspp->src_mask = MTRACE_SRC_MASK_GROUP;
+ }
+
+ return true;
+}
+
+static void mtrace_rsp_set_fwd_code(struct igmp_mtrace_rsp *mtrace_rspp,
+ enum mtrace_fwd_code fwd_code)
+{
+ if (mtrace_rspp->fwd_code == MTRACE_FWD_CODE_NO_ERROR)
+ mtrace_rspp->fwd_code = fwd_code;
+}
+
+static void mtrace_rsp_init(struct igmp_mtrace_rsp *mtrace_rspp)
+{
+ mtrace_rspp->arrival = 0;
+ mtrace_rspp->incoming.s_addr = INADDR_ANY;
+ mtrace_rspp->outgoing.s_addr = INADDR_ANY;
+ mtrace_rspp->prev_hop.s_addr = INADDR_ANY;
+ mtrace_rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ mtrace_rspp->out_count = htonl(MTRACE_UNKNOWN_COUNT);
+ mtrace_rspp->total = htonl(MTRACE_UNKNOWN_COUNT);
+ mtrace_rspp->rtg_proto = 0;
+ mtrace_rspp->fwd_ttl = 0;
+ mtrace_rspp->mbz = 0;
+ mtrace_rspp->s = 0;
+ mtrace_rspp->src_mask = 0;
+ mtrace_rspp->fwd_code = MTRACE_FWD_CODE_NO_ERROR;
+}
+
+static void mtrace_rsp_debug(uint32_t qry_id, int rsp,
+ struct igmp_mtrace_rsp *mrspp)
+{
+ struct in_addr incoming = mrspp->incoming;
+ struct in_addr outgoing = mrspp->outgoing;
+ struct in_addr prev_hop = mrspp->prev_hop;
+
+ zlog_debug(
+ "Rx mt(%d) qid=%ud arr=%x in=%pI4 out=%pI4 prev=%pI4 proto=%d fwd=%d",
+ rsp, ntohl(qry_id), mrspp->arrival, &incoming, &outgoing,
+ &prev_hop, mrspp->rtg_proto, mrspp->fwd_code);
+}
+
+static void mtrace_debug(struct pim_interface *pim_ifp,
+ struct igmp_mtrace *mtracep, int mtrace_len)
+{
+ struct in_addr ga, sa, da, ra;
+
+ ga = mtracep->grp_addr;
+ sa = mtracep->src_addr;
+ da = mtracep->dst_addr;
+ ra = mtracep->rsp_addr;
+
+ zlog_debug(
+ "Rx mtrace packet incoming on %pI4: hops=%d type=%d size=%d, grp=%pI4, src=%pI4, dst=%pI4 rsp=%pI4 ttl=%d qid=%ud",
+ &pim_ifp->primary_address, mtracep->hops, mtracep->type,
+ mtrace_len, &ga, &sa, &da, &ra, mtracep->rsp_ttl,
+ ntohl(mtracep->qry_id));
+ if (mtrace_len > (int)sizeof(struct igmp_mtrace)) {
+
+ int i;
+
+ int responses = mtrace_len - sizeof(struct igmp_mtrace);
+
+ if ((responses % sizeof(struct igmp_mtrace_rsp)) != 0)
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Mtrace response block of wrong length");
+
+ responses = responses / sizeof(struct igmp_mtrace_rsp);
+
+ for (i = 0; i < responses; i++)
+ mtrace_rsp_debug(mtracep->qry_id, i, &mtracep->rsp[i]);
+ }
+}
+
+/* 5.1 Query Arrival Time */
+static uint32_t query_arrival_time(void)
+{
+ struct timeval tv;
+ uint32_t qat;
+
+ if (gettimeofday(&tv, NULL) < 0) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Query arrival time lookup failed: errno=%d: %s",
+ errno, safe_strerror(errno));
+ return 0;
+ }
+ /* not sure second offset correct, as I get different value */
+ qat = ((tv.tv_sec + 32384) << 16) + ((tv.tv_usec << 10) / 15625);
+
+ return qat;
+}
+
+static int mtrace_send_packet(struct interface *ifp,
+ struct igmp_mtrace *mtracep,
+ size_t mtrace_buf_len, struct in_addr dst_addr,
+ struct in_addr group_addr)
+{
+ struct sockaddr_in to;
+ socklen_t tolen;
+ ssize_t sent;
+ int ret;
+ int fd;
+ uint8_t ttl;
+
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_addr = dst_addr;
+ tolen = sizeof(to);
+
+ if (PIM_DEBUG_MTRACE) {
+ struct in_addr if_addr;
+ struct in_addr rsp_addr = mtracep->rsp_addr;
+
+ if_addr = mtrace_primary_address(ifp);
+ zlog_debug("Sending mtrace packet to %pI4 on %pI4", &rsp_addr,
+ &if_addr);
+ }
+
+ fd = pim_socket_raw(IPPROTO_IGMP);
+
+ if (fd < 0)
+ return -1;
+
+ ret = pim_socket_bind(fd, ifp);
+
+ if (ret < 0) {
+ ret = -1;
+ goto close_fd;
+ }
+
+ if (IPV4_CLASS_DE(ntohl(dst_addr.s_addr))) {
+ if (IPV4_MC_LINKLOCAL(ntohl(dst_addr.s_addr))) {
+ ttl = 1;
+ } else {
+ if (mtracep->type == PIM_IGMP_MTRACE_RESPONSE)
+ ttl = mtracep->rsp_ttl;
+ else
+ ttl = 64;
+ }
+ ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl,
+ sizeof(ttl));
+
+ if (ret < 0) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Failed to set socket multicast TTL");
+ ret = -1;
+ goto close_fd;
+ }
+ }
+
+ sent = sendto(fd, (char *)mtracep, mtrace_buf_len, MSG_DONTWAIT,
+ (struct sockaddr *)&to, tolen);
+
+ if (sent != (ssize_t)mtrace_buf_len) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ if (sent < 0) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Send mtrace request failed for %s on%s: group=%s msg_size=%zd: errno=%d: %s",
+ dst_str, ifp->name, group_str,
+ mtrace_buf_len, errno,
+ safe_strerror(errno));
+ } else {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Send mtrace request failed for %s on %s: group=%s msg_size=%zd: sent=%zd",
+ dst_str, ifp->name, group_str,
+ mtrace_buf_len, sent);
+ }
+ ret = -1;
+ goto close_fd;
+ }
+ ret = 0;
+close_fd:
+ close(fd);
+ return ret;
+}
+
+static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr,
+ struct interface *interface)
+{
+ struct pim_nexthop nexthop;
+ struct sockaddr_in to;
+ struct interface *if_out;
+ socklen_t tolen;
+ int ret;
+ int fd;
+ int sent;
+ uint16_t checksum;
+
+ checksum = ip_hdr->ip_sum;
+
+ ip_hdr->ip_sum = 0;
+
+ if (checksum != in_cksum(ip_hdr, ip_hdr->ip_hl * 4))
+ return -1;
+
+ if (ip_hdr->ip_ttl-- <= 1)
+ return -1;
+
+ if (interface == NULL) {
+ memset(&nexthop, 0, sizeof(nexthop));
+ if (!pim_nexthop_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Dropping mtrace packet, no route to destination");
+ return -1;
+ }
+
+ if_out = nexthop.interface;
+ } else {
+ if_out = interface;
+ }
+
+ ip_hdr->ip_sum = in_cksum(ip_hdr, ip_hdr->ip_hl * 4);
+
+ fd = pim_socket_raw(IPPROTO_RAW);
+
+ if (fd < 0)
+ return -1;
+
+ pim_socket_ip_hdr(fd);
+
+ ret = pim_socket_bind(fd, if_out);
+
+ if (ret < 0) {
+ close(fd);
+ return -1;
+ }
+
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_addr = ip_hdr->ip_dst;
+ tolen = sizeof(to);
+
+ sent = sendto(fd, ip_hdr, ntohs(ip_hdr->ip_len), 0,
+ (struct sockaddr *)&to, tolen);
+
+ close(fd);
+
+ if (sent < 0) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Failed to forward mtrace packet: sendto errno=%d, %s",
+ errno, safe_strerror(errno));
+ return -1;
+ }
+
+ if (PIM_DEBUG_MTRACE) {
+ zlog_debug("Fwd mtrace packet len=%u to %pI4 ttl=%u",
+ ntohs(ip_hdr->ip_len), &ip_hdr->ip_dst,
+ ip_hdr->ip_ttl);
+ }
+
+ return 0;
+}
+
+static int mtrace_mc_forward_packet(struct pim_instance *pim, struct ip *ip_hdr)
+{
+ pim_sgaddr sg;
+ struct channel_oil *c_oil;
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch = NULL;
+ int ret = -1;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.grp = ip_hdr->ip_dst;
+
+ c_oil = pim_find_channel_oil(pim, &sg);
+
+ if (c_oil == NULL) {
+ if (PIM_DEBUG_MTRACE) {
+ zlog_debug(
+ "Dropping mtrace multicast packet len=%u to %pI4 ttl=%u",
+ ntohs(ip_hdr->ip_len),
+ &ip_hdr->ip_dst, ip_hdr->ip_ttl);
+ }
+ return -1;
+ }
+ if (c_oil->up == NULL)
+ return -1;
+ if (c_oil->up->ifchannels == NULL)
+ return -1;
+ for (ALL_LIST_ELEMENTS(c_oil->up->ifchannels, chnode, chnextnode, ch)) {
+ if (pim_macro_chisin_oiflist(ch)) {
+ int r;
+
+ r = mtrace_un_forward_packet(pim, ip_hdr,
+ ch->interface);
+ if (r == 0)
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+
+static int mtrace_forward_packet(struct pim_instance *pim, struct ip *ip_hdr)
+{
+ if (IPV4_CLASS_DE(ntohl(ip_hdr->ip_dst.s_addr)))
+ return mtrace_mc_forward_packet(pim, ip_hdr);
+ else
+ return mtrace_un_forward_packet(pim, ip_hdr, NULL);
+}
+
+static int mtrace_send_mc_response(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep,
+ size_t mtrace_len)
+{
+ pim_sgaddr sg;
+ struct channel_oil *c_oil;
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch = NULL;
+ int ret = -1;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.grp = mtracep->rsp_addr;
+
+ c_oil = pim_find_channel_oil(pim, &sg);
+
+ if (c_oil == NULL) {
+ if (PIM_DEBUG_MTRACE) {
+ struct in_addr rsp_addr = mtracep->rsp_addr;
+
+ zlog_debug(
+ "Dropping mtrace multicast response packet len=%u to %pI4",
+ (unsigned int)mtrace_len, &rsp_addr);
+ }
+ return -1;
+ }
+ if (c_oil->up == NULL)
+ return -1;
+ if (c_oil->up->ifchannels == NULL)
+ return -1;
+ for (ALL_LIST_ELEMENTS(c_oil->up->ifchannels, chnode, chnextnode, ch)) {
+ if (pim_macro_chisin_oiflist(ch)) {
+ int r;
+
+ r = mtrace_send_packet(ch->interface, mtracep,
+ mtrace_len, mtracep->rsp_addr,
+ mtracep->grp_addr);
+ if (r == 0)
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+/* 6.5 Sending Traceroute Responses */
+static int mtrace_send_response(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep, size_t mtrace_len)
+{
+ struct pim_nexthop nexthop;
+
+ mtracep->type = PIM_IGMP_MTRACE_RESPONSE;
+
+ mtracep->checksum = 0;
+ mtracep->checksum = in_cksum((char *)mtracep, mtrace_len);
+
+ if (IPV4_CLASS_DE(ntohl(mtracep->rsp_addr.s_addr))) {
+ struct pim_rpf *p_rpf;
+
+ if (pim_rp_i_am_rp(pim, mtracep->rsp_addr))
+ return mtrace_send_mc_response(pim, mtracep,
+ mtrace_len);
+
+ p_rpf = pim_rp_g(pim, mtracep->rsp_addr);
+
+ if (p_rpf == NULL) {
+ if (PIM_DEBUG_MTRACE) {
+ struct in_addr rsp_addr = mtracep->rsp_addr;
+
+ zlog_debug("mtrace no RP for %pI4", &rsp_addr);
+ }
+ return -1;
+ }
+ nexthop = p_rpf->source_nexthop;
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace response to RP");
+ } else {
+ memset(&nexthop, 0, sizeof(nexthop));
+ /* TODO: should use unicast rib lookup */
+ if (!pim_nexthop_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Dropped response qid=%ud, no route to response address",
+ mtracep->qry_id);
+ return -1;
+ }
+ }
+
+ return mtrace_send_packet(nexthop.interface, mtracep, mtrace_len,
+ mtracep->rsp_addr, mtracep->grp_addr);
+}
+
+int igmp_mtrace_recv_qry_req(struct gm_sock *igmp, struct ip *ip_hdr,
+ struct in_addr from, const char *from_str,
+ char *igmp_msg, int igmp_msg_len)
+{
+ static uint32_t qry_id, qry_src;
+ char mtrace_buf[MTRACE_HDR_SIZE + MTRACE_MAX_HOPS * MTRACE_RSP_SIZE];
+ struct interface *ifp;
+ struct interface *out_ifp = NULL;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+ struct igmp_mtrace *mtracep;
+ struct igmp_mtrace_rsp *rspp;
+ struct in_addr nh_addr;
+ enum mtrace_fwd_code fwd_code = MTRACE_FWD_CODE_NO_ERROR;
+ size_t r_len;
+ int last_rsp_ind = 0;
+ size_t mtrace_len;
+ uint16_t recv_checksum;
+ uint16_t checksum;
+ bool reached_source;
+ bool fwd_info;
+
+ ifp = igmp->interface;
+ pim_ifp = ifp->info;
+ pim = pim_ifp->pim;
+
+ /*
+ * 6. Router Behaviour
+ * Check if mtrace packet is addressed elsewhere and forward,
+ * if applicable
+ */
+ if (!IPV4_CLASS_DE(ntohl(ip_hdr->ip_dst.s_addr)))
+ if (!if_address_is_local(&ip_hdr->ip_dst, AF_INET,
+ pim->vrf->vrf_id))
+ return mtrace_forward_packet(pim, ip_hdr);
+
+ if (igmp_msg_len < (int)sizeof(struct igmp_mtrace)) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace packet from %s on %s: too short, len=%d, min=%zu",
+ from_str, ifp->name, igmp_msg_len,
+ sizeof(struct igmp_mtrace));
+ return -1;
+ }
+
+ mtracep = (struct igmp_mtrace *)igmp_msg;
+
+ recv_checksum = mtracep->checksum;
+
+ mtracep->checksum = 0;
+
+ checksum = in_cksum(igmp_msg, igmp_msg_len);
+
+ if (recv_checksum != checksum) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace packet from %s on %s: checksum mismatch: received=%x computed=%x",
+ from_str, ifp->name, recv_checksum, checksum);
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.mtrace_req++;
+
+ if (PIM_DEBUG_MTRACE)
+ mtrace_debug(pim_ifp, mtracep, igmp_msg_len);
+
+ /* subtract header from message length */
+ r_len = igmp_msg_len - sizeof(struct igmp_mtrace);
+
+ /* Classify mtrace packet, check if it is a query */
+ if (!r_len) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Received IGMP multicast traceroute query");
+
+ /* 6.1.1 Packet verification */
+ if (!pim_if_connected_to_source(ifp, mtracep->dst_addr)) {
+ if (IPV4_CLASS_DE(ntohl(ip_hdr->ip_dst.s_addr))) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Dropping multicast query on wrong interface");
+ return -1;
+ }
+ /* Unicast query on wrong interface */
+ fwd_code = MTRACE_FWD_CODE_WRONG_IF;
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Multicast query on wrong interface");
+ }
+ if (qry_id == mtracep->qry_id && qry_src == from.s_addr) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Dropping multicast query with duplicate source and id");
+ return -1;
+ }
+ qry_id = mtracep->qry_id;
+ qry_src = from.s_addr;
+ }
+ /* if response fields length is equal to a whole number of responses */
+ else if ((r_len % sizeof(struct igmp_mtrace_rsp)) == 0) {
+ r_len = igmp_msg_len - sizeof(struct igmp_mtrace);
+
+ if (r_len != 0)
+ last_rsp_ind = r_len / sizeof(struct igmp_mtrace_rsp);
+ if (last_rsp_ind > MTRACE_MAX_HOPS) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Mtrace request of excessive size");
+ return -1;
+ }
+ } else {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace packet from %s on %s: invalid length %d",
+ from_str, ifp->name, igmp_msg_len);
+ return -1;
+ }
+
+ /* 6.2.1 Packet Verification - drop not link-local multicast */
+ if (IPV4_CLASS_DE(ntohl(ip_hdr->ip_dst.s_addr))
+ && !IPV4_MC_LINKLOCAL(ntohl(ip_hdr->ip_dst.s_addr))) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace packet from %s on %s: not link-local multicast %pI4",
+ from_str, ifp->name, &ip_hdr->ip_dst);
+ return -1;
+ }
+
+ /* 6.2.2. Normal Processing */
+
+ /* 6.2.2. 1. If there is room in the current buffer? */
+
+ if (last_rsp_ind == MTRACE_MAX_HOPS) {
+ /* ...there was no room... */
+ mtracep->rsp[MTRACE_MAX_HOPS - 1].fwd_code =
+ MTRACE_FWD_CODE_NO_SPACE;
+ return mtrace_send_response(pim_ifp->pim, mtracep,
+ igmp_msg_len);
+ }
+
+ /* ...insert new response block... */
+
+ /* calculate new mtrace lenght with extra response */
+ mtrace_len = igmp_msg_len + sizeof(struct igmp_mtrace_rsp);
+
+ /* copy received query/request */
+ memcpy(mtrace_buf, igmp_msg, igmp_msg_len);
+
+ /* repoint mtracep pointer to copy */
+ mtracep = (struct igmp_mtrace *)mtrace_buf;
+
+ /* pointer for extra response field to be filled in */
+ rspp = &mtracep->rsp[last_rsp_ind];
+
+ /* initialize extra response field */
+ mtrace_rsp_init(rspp);
+
+ /* carry over any error noted when receiving the query */
+ rspp->fwd_code = fwd_code;
+
+ /* ...and fill in Query Arrival Time... */
+ rspp->arrival = htonl(query_arrival_time());
+ rspp->outgoing = pim_ifp->primary_address;
+ rspp->out_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->fwd_ttl = 1;
+
+ /* 6.2.2. 2. Attempt to determine the forwarding information... */
+
+ if (mtracep->grp_addr.s_addr != INADDR_ANY)
+ fwd_info = mtrace_fwd_info(pim, mtracep, rspp, &out_ifp);
+ else
+ fwd_info = mtrace_fwd_info_weak(pim, mtracep, rspp, &out_ifp);
+
+ /* 6.2.2 3. If no forwarding information... */
+ if (!fwd_info) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace not found multicast state");
+ mtrace_rsp_set_fwd_code(rspp, MTRACE_FWD_CODE_NO_ROUTE);
+ /* 6.2.2. 3. forward the packet to requester */
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+ }
+
+ nh_addr = rspp->prev_hop;
+
+ reached_source = false;
+
+ if (nh_addr.s_addr == INADDR_ANY) {
+ /* no pim? i.e. 7.5.3. No Previous Hop */
+ if (!out_ifp->info) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace not found incoming if w/ pim");
+ mtrace_rsp_set_fwd_code(rspp,
+ MTRACE_FWD_CODE_NO_MULTICAST);
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+ }
+ /* reached source? i.e. 7.5.1 Arriving at source */
+ if (pim_if_connected_to_source(out_ifp, mtracep->src_addr)) {
+ reached_source = true;
+ rspp->prev_hop = mtracep->src_addr;
+ }
+ /*
+ * 6.4 Forwarding Traceroute Requests:
+ * Previous-hop router not known,
+ * packet is sent to an appropriate multicast address
+ */
+ (void)inet_aton(MCAST_ALL_ROUTERS, &nh_addr);
+ }
+
+ /* 6.2.2 8. If this router is the Rendez-vous Point */
+ if (mtracep->grp_addr.s_addr != INADDR_ANY &&
+ pim_rp_i_am_rp(pim, mtracep->grp_addr)) {
+ mtrace_rsp_set_fwd_code(rspp, MTRACE_FWD_CODE_REACHED_RP);
+ /* 7.7.1. PIM-SM ...RP has not performed source-specific join */
+ if (rspp->src_mask == MTRACE_SRC_MASK_GROUP)
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+ }
+
+ /*
+ * 6.4 Forwarding Traceroute Requests: the number of response
+ * blocks exceeds number of responses, so forward to the requester.
+ */
+ if (mtracep->hops <= (last_rsp_ind + 1))
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+
+ /* 7.5.1. Arriving at source: terminate trace */
+ if (reached_source)
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+
+ mtracep->checksum = 0;
+
+ mtracep->checksum = in_cksum(mtrace_buf, mtrace_len);
+
+ /* 6.4 Forwarding Traceroute Requests: response blocks less than req. */
+ return mtrace_send_packet(out_ifp, mtracep, mtrace_len, nh_addr,
+ mtracep->grp_addr);
+}
+
+/* 6.3. Traceroute responses */
+int igmp_mtrace_recv_response(struct gm_sock *igmp, struct ip *ip_hdr,
+ struct in_addr from, const char *from_str,
+ char *igmp_msg, int igmp_msg_len)
+{
+ static uint32_t qry_id, rsp_dst;
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+ struct igmp_mtrace *mtracep;
+ uint16_t recv_checksum;
+ uint16_t checksum;
+
+ ifp = igmp->interface;
+ pim_ifp = ifp->info;
+ pim = pim_ifp->pim;
+
+ if (igmp_msg_len < (int)sizeof(struct igmp_mtrace)) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace packet from %s on %s: too short, len=%d, min=%zu",
+ from_str, ifp->name, igmp_msg_len,
+ sizeof(struct igmp_mtrace));
+ return -1;
+ }
+
+ mtracep = (struct igmp_mtrace *)igmp_msg;
+
+ recv_checksum = mtracep->checksum;
+
+ mtracep->checksum = 0;
+
+ checksum = in_cksum(igmp_msg, igmp_msg_len);
+
+ if (recv_checksum != checksum) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug(
+ "Recv mtrace response from %s on %s: checksum mismatch: received=%x computed=%x",
+ from_str, ifp->name, recv_checksum, checksum);
+ return -1;
+ }
+
+ mtracep->checksum = checksum;
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.mtrace_rsp++;
+
+ if (PIM_DEBUG_MTRACE)
+ mtrace_debug(pim_ifp, mtracep, igmp_msg_len);
+
+ /* Drop duplicate packets */
+ if (qry_id == mtracep->qry_id && rsp_dst == ip_hdr->ip_dst.s_addr) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("duplicate mtrace response packet dropped");
+ return -1;
+ }
+
+ qry_id = mtracep->qry_id;
+ rsp_dst = ip_hdr->ip_dst.s_addr;
+
+ return mtrace_forward_packet(pim, ip_hdr);
+}
diff --git a/pimd/pim_igmp_mtrace.h b/pimd/pim_igmp_mtrace.h
new file mode 100644
index 0000000..bba9c10
--- /dev/null
+++ b/pimd/pim_igmp_mtrace.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multicast traceroute for FRRouting
+ * Copyright (C) 2017 Mladen Sablic
+ */
+
+#ifndef PIM_IGMP_MTRACE_H
+#define PIM_IGMP_MTRACE_H
+
+#include <zebra.h>
+
+#include "pim_igmp.h"
+
+#define MTRACE_MAX_HOPS (255)
+#define MTRACE_UNKNOWN_COUNT (0xffffffff)
+#define MTRACE_SRC_MASK_GROUP (0x3f) /* forwarding on group state (*,G) */
+#define MTRACE_SRC_MASK_SOURCE (0x20) /* i.e. 32 forwarding on (S,G) */
+
+enum mtrace_fwd_code {
+ MTRACE_FWD_CODE_NO_ERROR = 0x00,
+ MTRACE_FWD_CODE_WRONG_IF = 0x01,
+ MTRACE_FWD_CODE_PRUNE_SENT = 0x02,
+ MTRACE_FWD_CODE_PRUNE_RCVD = 0x03,
+ MTRACE_FWD_CODE_SCOPED = 0x04,
+ MTRACE_FWD_CODE_NO_ROUTE = 0x05,
+ MTRACE_FWD_CODE_WRONG_LAST_HOP = 0x06,
+ MTRACE_FWD_CODE_NOT_FORWARDING = 0x07,
+ MTRACE_FWD_CODE_REACHED_RP = 0x08,
+ MTRACE_FWD_CODE_RPF_IF = 0x09,
+ MTRACE_FWD_CODE_NO_MULTICAST = 0x0A,
+ MTRACE_FWD_CODE_INFO_HIDDEN = 0x0B,
+ MTRACE_FWD_CODE_NO_SPACE = 0x81,
+ MTRACE_FWD_CODE_OLD_ROUTER = 0x82,
+ MTRACE_FWD_CODE_ADMIN_PROHIB = 0x83
+};
+
+enum mtrace_rtg_proto {
+ MTRACE_RTG_PROTO_DVMRP = 1,
+ MTRACE_RTG_PROTO_MOSPF = 2,
+ MTRACE_RTG_PROTO_PIM = 3,
+ MTRACE_RTG_PROTO_CBT = 4,
+ MTRACE_RTG_PROTO_PIM_SPECIAL = 5,
+ MTRACE_RTG_PROTO_PIM_STATIC = 6,
+ MTRACE_RTG_PROTO_DVMRP_STATIC = 7,
+ MTRACE_RTG_PROTO_PIM_MBGP = 8,
+ MTRACE_RTG_PROTO_CBT_SPECIAL = 9,
+ MTRACE_RTG_PROTO_CBT_STATIC = 10,
+ MTRACE_RTG_PROTO_PIM_ASSERT = 11,
+};
+
+struct igmp_mtrace_rsp {
+ uint32_t arrival;
+ struct in_addr incoming;
+ struct in_addr outgoing;
+ struct in_addr prev_hop;
+ uint32_t in_count;
+ uint32_t out_count;
+ uint32_t total;
+ uint32_t rtg_proto : 8;
+ uint32_t fwd_ttl : 8;
+ /* little endian order for next three fields */
+ uint32_t src_mask : 6;
+ uint32_t s : 1;
+ uint32_t mbz : 1;
+ uint32_t fwd_code : 8;
+} __attribute__((packed));
+
+struct igmp_mtrace {
+ uint8_t type;
+ uint8_t hops;
+ uint16_t checksum;
+ struct in_addr grp_addr;
+ struct in_addr src_addr;
+ struct in_addr dst_addr;
+ struct in_addr rsp_addr;
+ uint32_t rsp_ttl : 8;
+ uint32_t qry_id : 24;
+ struct igmp_mtrace_rsp rsp[0];
+} __attribute__((packed));
+
+#define MTRACE_HDR_SIZE (sizeof(struct igmp_mtrace))
+#define MTRACE_RSP_SIZE (sizeof(struct igmp_mtrace_rsp))
+
+int igmp_mtrace_recv_qry_req(struct gm_sock *igmp, struct ip *ip_hdr,
+ struct in_addr from, const char *from_str,
+ char *igmp_msg, int igmp_msg_len);
+
+int igmp_mtrace_recv_response(struct gm_sock *igmp, struct ip *ip_hdr,
+ struct in_addr from, const char *from_str,
+ char *igmp_msg, int igmp_msg_len);
+
+#endif /* PIM_IGMP_MTRACE_H */
diff --git a/pimd/pim_igmp_stats.c b/pimd/pim_igmp_stats.c
new file mode 100644
index 0000000..8ad3fb5
--- /dev/null
+++ b/pimd/pim_igmp_stats.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRRouting
+ * Copyright (C) 2018 Mladen Sablic
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pim_igmp_stats.h"
+
+void igmp_stats_init(struct igmp_stats *stats)
+{
+ memset(stats, 0, sizeof(struct igmp_stats));
+}
+
+void igmp_stats_add(struct igmp_stats *a, struct igmp_stats *b)
+{
+ if (!a || !b)
+ return;
+
+ a->query_v1 += b->query_v1;
+ a->query_v2 += b->query_v2;
+ a->query_v3 += b->query_v3;
+ a->report_v1 += b->report_v1;
+ a->report_v2 += b->report_v2;
+ a->report_v3 += b->report_v3;
+ a->leave_v2 += b->leave_v2;
+ a->mtrace_rsp += b->mtrace_rsp;
+ a->mtrace_req += b->mtrace_req;
+ a->unsupported += b->unsupported;
+ a->peak_groups += b->peak_groups;
+ a->total_groups += b->total_groups;
+ a->total_source_groups += b->total_source_groups;
+ a->joins_sent += b->joins_sent;
+ a->joins_failed += b->joins_failed;
+ a->general_queries_sent += b->general_queries_sent;
+ a->group_queries_sent += b->group_queries_sent;
+ a->total_recv_messages += b->query_v1 + b->query_v2 + b->query_v3 +
+ b->report_v1 + b->report_v2 + b->report_v3 +
+ b->leave_v2 + b->mtrace_rsp + b->mtrace_req;
+}
diff --git a/pimd/pim_igmp_stats.h b/pimd/pim_igmp_stats.h
new file mode 100644
index 0000000..a3ce486
--- /dev/null
+++ b/pimd/pim_igmp_stats.h
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRRouting
+ * Copyright (C) 2018 Mladen Sablic
+ */
+
+#ifndef PIM_IGMP_STATS_H
+#define PIM_IGMP_STATS_H
+
+#include <zebra.h>
+
+struct igmp_stats {
+ uint32_t query_v1;
+ uint32_t query_v2;
+ uint32_t query_v3;
+ uint32_t report_v1;
+ uint32_t report_v2;
+ uint32_t report_v3;
+ uint32_t leave_v2;
+ uint32_t mtrace_rsp;
+ uint32_t mtrace_req;
+ uint32_t unsupported;
+ uint32_t peak_groups;
+ uint32_t total_groups;
+ uint32_t total_source_groups;
+ uint32_t joins_sent;
+ uint32_t joins_failed;
+ uint32_t general_queries_sent;
+ uint32_t group_queries_sent;
+ uint32_t total_recv_messages;
+};
+
+#if PIM_IPV == 4
+void igmp_stats_init(struct igmp_stats *stats);
+void igmp_stats_add(struct igmp_stats *a, struct igmp_stats *b);
+#else
+static inline void igmp_stats_init(struct igmp_stats *stats)
+{
+}
+
+static inline void igmp_stats_add(struct igmp_stats *a, struct igmp_stats *b)
+{
+}
+#endif
+
+#endif /* PIM_IGMP_STATS_H */
diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c
new file mode 100644
index 0000000..944dffd
--- /dev/null
+++ b/pimd/pim_igmpv2.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ * Daniel Walton
+ */
+
+#include "zebra.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_igmp.h"
+#include "pim_igmpv2.h"
+#include "pim_igmpv3.h"
+#include "pim_ssm.h"
+#include "pim_str.h"
+#include "pim_time.h"
+#include "pim_util.h"
+
+
+static void on_trace(const char *label, struct interface *ifp,
+ struct in_addr from)
+{
+ if (PIM_DEBUG_GM_TRACE) {
+ char from_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<from?>", from, from_str, sizeof(from_str));
+ zlog_debug("%s: from %s on %s", label, from_str, ifp->name);
+ }
+}
+
+void igmp_v2_send_query(struct gm_group *group, int fd, const char *ifname,
+ char *query_buf, struct in_addr dst_addr,
+ struct in_addr group_addr,
+ int query_max_response_time_dsec)
+{
+ ssize_t msg_size = 8;
+ uint8_t max_resp_code;
+ ssize_t sent;
+ struct sockaddr_in to;
+ socklen_t tolen;
+ uint16_t checksum;
+
+ /* max_resp_code must be non-zero else this will look like an IGMP v1
+ * query */
+ /* RFC 2236: 2.2. , v2's is equal to it */
+ max_resp_code = query_max_response_time_dsec;
+ assert(max_resp_code > 0);
+
+ query_buf[0] = PIM_IGMP_MEMBERSHIP_QUERY;
+ query_buf[1] = max_resp_code;
+ *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) =
+ 0; /* for computing checksum */
+ memcpy(query_buf + 4, &group_addr, sizeof(struct in_addr));
+
+ checksum = in_cksum(query_buf, msg_size);
+ *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Send IGMPv2 QUERY to %s on %s for group %s",
+ dst_str, ifname, group_str);
+ }
+
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_addr = dst_addr;
+ tolen = sizeof(to);
+
+ sent = sendto(fd, query_buf, msg_size, MSG_DONTWAIT,
+ (struct sockaddr *)&to, tolen);
+ if (sent != (ssize_t)msg_size) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ if (sent < 0) {
+ zlog_warn(
+ "Send IGMPv2 QUERY failed due to %s on %s: group=%s msg_size=%zd: errno=%d: %s",
+ dst_str, ifname, group_str, msg_size, errno,
+ safe_strerror(errno));
+ } else {
+ zlog_warn(
+ "Send IGMPv2 QUERY failed due to %s on %s: group=%s msg_size=%zd: sent=%zd",
+ dst_str, ifname, group_str, msg_size, sent);
+ }
+ return;
+ }
+}
+
+int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
+ const char *from_str, char *igmp_msg, int igmp_msg_len)
+{
+ struct interface *ifp = igmp->interface;
+ struct in_addr group_addr;
+ struct pim_interface *pim_ifp;
+ char group_str[INET_ADDRSTRLEN];
+
+ on_trace(__func__, igmp->interface, from);
+
+ pim_ifp = ifp->info;
+
+ if (igmp->mtrace_only)
+ return 0;
+
+ if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(
+ "Recv IGMPv2 REPORT from %s on %s: size=%d other than correct=%d",
+ from_str, ifp->name, igmp_msg_len,
+ IGMP_V12_MSG_SIZE);
+ }
+
+ if (igmp_validate_checksum(igmp_msg, igmp_msg_len) == -1) {
+ zlog_warn(
+ "Recv IGMPv2 REPORT from %s on %s: size=%d with invalid checksum",
+ from_str, ifp->name, igmp_msg_len);
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.report_v2++;
+
+ memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ pim_inet4_dump("<dst?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Recv IGMPv2 REPORT from %s on %s for %s", from_str,
+ ifp->name, group_str);
+ }
+
+ /*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses, and an SSM-aware router
+ * will ignore MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE requests in
+ * the SSM range.
+ */
+ if (pim_is_grp_ssm(pim_ifp->pim, group_addr)) {
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv2 group record %pI4 from %s on %s exclude mode in SSM range",
+ &group_addr.s_addr, from_str, ifp->name);
+ }
+ return -1;
+ }
+
+
+ /*
+ * RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+ igmpv3_report_isex(igmp, from, group_addr, 0, NULL, 1);
+
+ return 0;
+}
+
+int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
+ const char *from_str, char *igmp_msg, int igmp_msg_len)
+{
+ struct interface *ifp = igmp->interface;
+ struct in_addr group_addr;
+ char group_str[INET_ADDRSTRLEN];
+ struct in_addr from = ip_hdr->ip_src;
+
+ on_trace(__func__, igmp->interface, from);
+
+ if (igmp->mtrace_only)
+ return 0;
+
+ if (igmp_msg_len != IGMP_V12_MSG_SIZE) {
+ if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(
+ "Recv IGMPv2 LEAVE from %s on %s: size=%d other than correct=%d",
+ from_str, ifp->name, igmp_msg_len,
+ IGMP_V12_MSG_SIZE);
+ }
+
+ if (igmp_validate_checksum(igmp_msg, igmp_msg_len) == -1) {
+ zlog_warn(
+ "Recv IGMPv2 LEAVE from %s on %s with invalid checksum",
+ from_str, ifp->name);
+ return -1;
+ }
+
+
+ memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ pim_inet4_dump("<dst?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str,
+ ifp->name, group_str);
+ }
+ /*
+ * As per RFC 2236, section 9:
+ Message Type Destination Group
+ ------------ -----------------
+ General Query ALL-SYSTEMS (224.0.0.1)
+ Group-Specific Query The group being queried
+ Membership Report The group being reported
+ Leave Message ALL-ROUTERS (224.0.0.2)
+
+ Note: in older (i.e., non-standard and now obsolete) versions of
+ IGMPv2, hosts send Leave Messages to the group being left. A
+ router SHOULD accept Leave Messages addressed to the group being
+ left in the interests of backwards compatibility with such hosts.
+ In all cases, however, hosts MUST send to the ALL-ROUTERS address
+ to be compliant with this specification.
+ */
+ if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP)
+ && (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) {
+ if (PIM_DEBUG_GM_EVENTS)
+ zlog_debug(
+ "IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address");
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.leave_v2++;
+
+ /*
+ * RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+ igmpv3_report_toin(igmp, from, group_addr, 0, NULL);
+
+ return 0;
+}
diff --git a/pimd/pim_igmpv2.h b/pimd/pim_igmpv2.h
new file mode 100644
index 0000000..e968d83
--- /dev/null
+++ b/pimd/pim_igmpv2.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ * Daniel Walton
+ */
+
+#ifndef PIM_IGMPV2_H
+#define PIM_IGMPV2_H
+
+void igmp_v2_send_query(struct gm_group *group, int fd, const char *ifname,
+ char *query_buf, struct in_addr dst_addr,
+ struct in_addr group_addr,
+ int query_max_response_time_dsec);
+
+int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
+ const char *from_str, char *igmp_msg, int igmp_msg_len);
+
+int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
+ const char *from_str, char *igmp_msg, int igmp_msg_len);
+
+#endif /* PIM_IGMPV2_H */
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
new file mode 100644
index 0000000..18a9fb7
--- /dev/null
+++ b/pimd/pim_igmpv3.c
@@ -0,0 +1,2028 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+#include "log.h"
+#include "memory.h"
+#include "if.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_iface.h"
+#include "pim_igmp.h"
+#include "pim_igmpv3.h"
+#include "pim_str.h"
+#include "pim_util.h"
+#include "pim_time.h"
+#include "pim_zebra.h"
+#include "pim_oil.h"
+#include "pim_ssm.h"
+
+static void group_retransmit_timer_on(struct gm_group *group);
+static long igmp_group_timer_remain_msec(struct gm_group *group);
+static long igmp_source_timer_remain_msec(struct gm_source *source);
+static void group_query_send(struct gm_group *group);
+static void source_query_send_by_flag(struct gm_group *group,
+ int num_sources_tosend);
+
+static void on_trace(const char *label, struct interface *ifp,
+ struct in_addr from, struct in_addr group_addr,
+ int num_sources, struct in_addr *sources)
+{
+ if (PIM_DEBUG_GM_TRACE) {
+ char from_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<from?>", from, from_str, sizeof(from_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+
+ zlog_debug("%s: from %s on %s: group=%s sources=%d", label,
+ from_str, ifp->name, group_str, num_sources);
+ }
+}
+
+static inline long igmp_gmi_msec(struct gm_group *group)
+{
+ struct pim_interface *pim_ifp = group->interface->info;
+ struct gm_sock *igmp;
+ struct listnode *sock_node;
+
+ long qrv = 0, qqi = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
+ qrv = MAX(qrv, igmp->querier_robustness_variable);
+ qqi = MAX(qqi, igmp->querier_query_interval);
+ }
+ return PIM_IGMP_GMI_MSEC(qrv, qqi,
+ pim_ifp->gm_query_max_response_time_dsec);
+}
+
+void igmp_group_reset_gmi(struct gm_group *group)
+{
+ long group_membership_interval_msec;
+ struct interface *ifp;
+
+ ifp = group->interface;
+
+ /*
+ RFC 3376: 8.4. Group Membership Interval
+
+ The Group Membership Interval is the amount of time that must pass
+ before a multicast router decides there are no more members of a
+ group or a particular source on a network.
+
+ This value MUST be ((the Robustness Variable) times (the Query
+ Interval)) plus (one Query Response Interval).
+
+ group_membership_interval_msec = querier_robustness_variable *
+ (1000 * querier_query_interval) +
+ 100 * query_response_interval_dsec;
+ */
+ group_membership_interval_msec = igmp_gmi_msec(group);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "Resetting group %s timer to GMI=%ld.%03ld sec on %s",
+ group_str, group_membership_interval_msec / 1000,
+ group_membership_interval_msec % 1000, ifp->name);
+ }
+
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ The group timer is only used when a group is in EXCLUDE mode and
+ it represents the time for the *filter-mode* of the group to
+ expire and switch to INCLUDE mode.
+ */
+ assert(group->group_filtermode_isexcl);
+
+ igmp_group_timer_on(group, group_membership_interval_msec, ifp->name);
+}
+
+static void igmp_source_timer(struct event *t)
+{
+ struct gm_source *source;
+ struct gm_group *group;
+
+ source = EVENT_ARG(t);
+
+ group = source->source_group;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "%s: Source timer expired for group %s source %s on %s",
+ __func__, group_str, source_str,
+ group->interface->name);
+ }
+
+ /*
+ RFC 3376: 6.3. IGMPv3 Source-Specific Forwarding Rules
+
+ Group
+ Filter-Mode Source Timer Value Action
+ ----------- ------------------ ------
+ INCLUDE TIMER == 0 Suggest to stop forwarding
+ traffic from source and
+ remove source record. If
+ there are no more source
+ records for the group, delete
+ group record.
+
+ EXCLUDE TIMER == 0 Suggest to not forward
+ traffic from source
+ (DO NOT remove record)
+
+ Source timer switched from (T > 0) to (T == 0): disable forwarding.
+ */
+
+ if (group->group_filtermode_isexcl) {
+ /* EXCLUDE mode */
+
+ igmp_source_forward_stop(source);
+ } else {
+ /* INCLUDE mode */
+
+ /* igmp_source_delete() will stop forwarding source */
+ igmp_source_delete(source);
+
+ /*
+ If there are no more source records for the group, delete
+ group
+ record.
+ */
+ if (!listcount(group->group_source_list)) {
+ igmp_group_delete_empty_include(group);
+ }
+ }
+}
+
+static void source_timer_off(struct gm_group *group, struct gm_source *source)
+{
+ if (!source->t_source_timer)
+ return;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "Cancelling TIMER event for group %s source %s on %s",
+ group_str, source_str, group->interface->name);
+ }
+
+ EVENT_OFF(source->t_source_timer);
+}
+
+static void igmp_source_timer_on(struct gm_group *group,
+ struct gm_source *source, long interval_msec)
+{
+ source_timer_off(group, source);
+ struct pim_interface *pim_ifp = group->interface->info;
+
+ if (PIM_DEBUG_GM_EVENTS) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "Scheduling %ld.%03ld sec TIMER event for group %s source %s on %s",
+ interval_msec / 1000, interval_msec % 1000, group_str,
+ source_str, group->interface->name);
+ }
+
+ event_add_timer_msec(router->master, igmp_source_timer, source,
+ interval_msec, &source->t_source_timer);
+
+ /*
+ RFC 3376: 6.3. IGMPv3 Source-Specific Forwarding Rules
+
+ Source timer switched from (T == 0) to (T > 0): enable forwarding.
+ */
+ igmp_source_forward_start(pim_ifp->pim, source);
+}
+
+void igmp_source_reset_gmi(struct gm_group *group, struct gm_source *source)
+{
+ long group_membership_interval_msec;
+ struct interface *ifp;
+
+ ifp = group->interface;
+
+ group_membership_interval_msec = igmp_gmi_msec(group);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+
+ zlog_debug(
+ "Resetting source %s timer to GMI=%ld.%03ld sec for group %s on %s",
+ source_str, group_membership_interval_msec / 1000,
+ group_membership_interval_msec % 1000, group_str,
+ ifp->name);
+ }
+
+ igmp_source_timer_on(group, source, group_membership_interval_msec);
+}
+
+static void source_mark_delete_flag(struct gm_group *group)
+{
+ struct listnode *src_node;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src)) {
+ IGMP_SOURCE_DO_DELETE(src->source_flags);
+ }
+}
+
+static void source_mark_send_flag(struct gm_group *group)
+{
+ struct listnode *src_node;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src)) {
+ IGMP_SOURCE_DO_SEND(src->source_flags);
+ }
+}
+
+static int source_mark_send_flag_by_timer(struct gm_group *group)
+{
+ struct listnode *src_node;
+ struct gm_source *src;
+ int num_marked_sources = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src)) {
+ /* Is source timer running? */
+ if (src->t_source_timer) {
+ IGMP_SOURCE_DO_SEND(src->source_flags);
+ ++num_marked_sources;
+ } else {
+ IGMP_SOURCE_DONT_SEND(src->source_flags);
+ }
+ }
+
+ return num_marked_sources;
+}
+
+static void source_clear_send_flag(struct list *source_list)
+{
+ struct listnode *src_node;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS_RO(source_list, src_node, src)) {
+ IGMP_SOURCE_DONT_SEND(src->source_flags);
+ }
+}
+
+/*
+ Any source (*,G) is forwarded only if mode is EXCLUDE {empty}
+*/
+static void group_exclude_fwd_anysrc_ifempty(struct gm_group *group)
+{
+ struct pim_interface *pim_ifp = group->interface->info;
+
+ assert(group->group_filtermode_isexcl);
+
+ if (listcount(group->group_source_list) < 1) {
+ igmp_anysource_forward_start(pim_ifp->pim, group);
+ }
+}
+
+void igmp_source_free(struct gm_source *source)
+{
+ /* make sure there is no source timer running */
+ assert(!source->t_source_timer);
+
+ XFREE(MTYPE_PIM_IGMP_GROUP_SOURCE, source);
+}
+
+/*
+ igmp_source_delete: stop forwarding, and delete the source
+ igmp_source_forward_stop: stop forwarding, but keep the source
+*/
+void igmp_source_delete(struct gm_source *source)
+{
+ struct gm_group *group;
+ struct in_addr src;
+
+ group = source->source_group;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "Deleting IGMP source %s for group %s from interface %s c_oil ref_count %d",
+ source_str, group_str, group->interface->name,
+ source->source_channel_oil
+ ? source->source_channel_oil->oil_ref_count
+ : 0);
+ }
+
+ source_timer_off(group, source);
+ igmp_source_forward_stop(source);
+ source->source_channel_oil = NULL;
+
+ /* sanity check that forwarding has been disabled */
+ if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_warn(
+ "%s: forwarding=ON(!) IGMP source %s for group %s from interface %s",
+ __func__, source_str, group_str,
+ group->interface->name);
+ /* warning only */
+ }
+
+ /*
+ notice that listnode_delete() can't be moved
+ into igmp_source_free() because the later is
+ called by list_delete_all_node()
+ */
+ listnode_delete(group->group_source_list, source);
+
+ src.s_addr = source->source_addr.s_addr;
+ igmp_source_free(source);
+
+ /* Group source list is empty and current source is * then
+ *,G group going away so do not trigger start */
+ if (group->group_filtermode_isexcl
+ && (listcount(group->group_source_list) != 0)
+ && src.s_addr != INADDR_ANY) {
+ group_exclude_fwd_anysrc_ifempty(group);
+ }
+}
+
+static void source_delete_by_flag(struct list *source_list)
+{
+ struct listnode *src_node;
+ struct listnode *src_nextnode;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS(source_list, src_node, src_nextnode, src))
+ if (IGMP_SOURCE_TEST_DELETE(src->source_flags))
+ igmp_source_delete(src);
+}
+
+void igmp_source_delete_expired(struct list *source_list)
+{
+ struct listnode *src_node;
+ struct listnode *src_nextnode;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS(source_list, src_node, src_nextnode, src))
+ if (!src->t_source_timer)
+ igmp_source_delete(src);
+}
+
+struct gm_source *igmp_find_source_by_addr(struct gm_group *group,
+ struct in_addr src_addr)
+{
+ struct listnode *src_node;
+ struct gm_source *src;
+
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src))
+ if (src_addr.s_addr == src->source_addr.s_addr)
+ return src;
+
+ return 0;
+}
+
+struct gm_source *igmp_get_source_by_addr(struct gm_group *group,
+ struct in_addr src_addr, bool *new)
+{
+ struct gm_source *src;
+
+ if (new)
+ *new = false;
+
+ src = igmp_find_source_by_addr(group, src_addr);
+ if (src)
+ return src;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", src_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "Creating new IGMP source %s for group %s on interface %s",
+ source_str, group_str, group->interface->name);
+ }
+
+ src = XCALLOC(MTYPE_PIM_IGMP_GROUP_SOURCE, sizeof(*src));
+
+ if (new)
+ *new = true;
+
+ src->t_source_timer = NULL;
+ src->source_group = group; /* back pointer */
+ src->source_addr = src_addr;
+ src->source_creation = pim_time_monotonic_sec();
+ src->source_flags = 0;
+ src->source_query_retransmit_count = 0;
+ src->source_channel_oil = NULL;
+
+ listnode_add(group->group_source_list, src);
+
+ /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
+ igmp_anysource_forward_stop(group);
+ return src;
+}
+
+static void allow(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ struct gm_source *source;
+ struct gm_group *group;
+ int i;
+
+ if (num_sources == 0) {
+ /*
+ RFC 3376: 3.1. Socket-State
+ If the requested filter mode is INCLUDE *and* the requested
+ source list is empty, then the entry corresponding to the
+ requested interface and multicast address is deleted if
+ present. If no such entry is present, the request is ignored.
+ So, deleting the group present.
+ */
+ group = find_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+ if (group->group_filtermode_isexcl) {
+ if (listcount(group->group_source_list) == 1) {
+ struct in_addr star = {.s_addr = INADDR_ANY};
+
+ source = igmp_find_source_by_addr(group, star);
+ if (source)
+ igmp_source_reset_gmi(group, source);
+ }
+ } else {
+ igmp_group_delete(group);
+ }
+
+ return;
+ }
+
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+
+ /* scan received sources */
+ for (i = 0; i < num_sources; ++i) {
+ struct in_addr *src_addr;
+
+ src_addr = sources + i;
+
+ source = igmp_get_source_by_addr(group, *src_addr, NULL);
+ if (!source)
+ continue;
+
+ /*
+ RFC 3376: 6.4.1. Reception of Current-State Records
+
+ When receiving IS_IN reports for groups in EXCLUDE mode is
+ sources should be moved from set with (timers = 0) to set with
+ (timers > 0).
+
+ igmp_source_reset_gmi() below, resetting the source timers to
+ GMI, accomplishes this.
+ */
+ igmp_source_reset_gmi(group, source);
+
+ } /* scan received sources */
+}
+
+void igmpv3_report_isin(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ on_trace(__func__, igmp->interface, from, group_addr, num_sources,
+ sources);
+
+ allow(igmp, from, group_addr, num_sources, sources);
+}
+
+static void isex_excl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ struct gm_source *source;
+ int i;
+
+ /* EXCLUDE mode */
+ assert(group->group_filtermode_isexcl);
+
+ /* E.1: set deletion flag for known sources (X,Y) */
+ source_mark_delete_flag(group);
+
+ /* scan received sources (A) */
+ for (i = 0; i < num_sources; ++i) {
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* E.2: lookup reported source from (A) in (X,Y) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (!new) {
+ /* E.3: if found, clear deletion flag: (X*A) or (Y*A) */
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ } else {
+ /* E.4: if not found, create source with timer=GMI:
+ * (A-X-Y) */
+ assert(!source->t_source_timer); /* timer == 0 */
+ igmp_source_reset_gmi(group, source);
+ assert(source->t_source_timer); /* (A-X-Y) timer > 0 */
+ }
+
+ } /* scan received sources */
+
+ /*
+ * If we are in isexcl mode and num_sources == 0
+ * than that means we have a *,g entry that
+ * needs to be handled
+ */
+ if (group->group_filtermode_isexcl && num_sources == 0) {
+ struct in_addr star = {.s_addr = INADDR_ANY};
+ source = igmp_find_source_by_addr(group, star);
+ if (source) {
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ igmp_source_reset_gmi(group, source);
+ }
+ }
+
+ /* E.5: delete all sources marked with deletion flag: (X-A) and (Y-A) */
+ source_delete_by_flag(group->group_source_list);
+}
+
+static void isex_incl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int i;
+
+ /* INCLUDE mode */
+ assert(!group->group_filtermode_isexcl);
+
+ /* I.1: set deletion flag for known sources (A) */
+ source_mark_delete_flag(group);
+
+ /* scan received sources (B) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* I.2: lookup reported source (B) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (!new) {
+ /* I.3: if found, clear deletion flag (A*B) */
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ } else {
+ /* I.4: if not found, create source with timer=0 (B-A)
+ */
+ assert(!source->t_source_timer); /* (B-A) timer=0 */
+ }
+
+ } /* scan received sources */
+
+ /* I.5: delete all sources marked with deletion flag (A-B) */
+ source_delete_by_flag(group->group_source_list);
+
+ group->group_filtermode_isexcl = 1; /* boolean=true */
+
+ assert(group->group_filtermode_isexcl);
+
+ group_exclude_fwd_anysrc_ifempty(group);
+}
+
+void igmpv3_report_isex(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources, int from_igmp_v2_report)
+{
+ struct interface *ifp = igmp->interface;
+ struct gm_group *group;
+
+ on_trace(__func__, ifp, from, group_addr, num_sources, sources);
+
+ if (pim_is_group_filtered(ifp->info, &group_addr))
+ return;
+
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+
+ /* So we can display how we learned the group in our show command output
+ */
+ if (from_igmp_v2_report)
+ group->igmp_version = 2;
+
+ if (group->group_filtermode_isexcl) {
+ /* EXCLUDE mode */
+ isex_excl(group, num_sources, sources);
+ } else {
+ /* INCLUDE mode */
+ isex_incl(group, num_sources, sources);
+ assert(group->group_filtermode_isexcl);
+ }
+
+ assert(group->group_filtermode_isexcl);
+
+ igmp_group_reset_gmi(group);
+}
+
+static void toin_incl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend = listcount(group->group_source_list);
+ int i;
+
+ /* Set SEND flag for all known sources (A) */
+ source_mark_send_flag(group);
+
+ /* Scan received sources (B) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* Lookup reported source (B) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (!new) {
+ /* If found, clear SEND flag (A*B) */
+ IGMP_SOURCE_DONT_SEND(source->source_flags);
+ --num_sources_tosend;
+ }
+
+ /* (B)=GMI */
+ igmp_source_reset_gmi(group, source);
+ }
+
+ /* Send sources marked with SEND flag: Q(G,A-B) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+}
+
+static void toin_excl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend;
+ int i;
+
+ /* Set SEND flag for X (sources with timer > 0) */
+ num_sources_tosend = source_mark_send_flag_by_timer(group);
+
+ /* Scan received sources (A) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* Lookup reported source (A) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (source->t_source_timer) {
+ /* If found and timer running, clear SEND flag
+ * (X*A) */
+ IGMP_SOURCE_DONT_SEND(source->source_flags);
+ --num_sources_tosend;
+ }
+
+ /* (A)=GMI */
+ igmp_source_reset_gmi(group, source);
+ }
+
+ /* Send sources marked with SEND flag: Q(G,X-A) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+
+ /* Send Q(G) */
+ group_query_send(group);
+}
+
+void igmpv3_report_toin(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ struct interface *ifp = igmp->interface;
+ struct gm_group *group;
+
+ on_trace(__func__, ifp, from, group_addr, num_sources, sources);
+
+ /*
+ * If the requested filter mode is INCLUDE *and* the requested source
+ * list is empty, then the entry corresponding to the requested
+ * interface and multicast address is deleted if present. If no such
+ * entry is present, the request is ignored.
+ */
+ if (num_sources) {
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+ } else {
+ group = find_group_by_addr(igmp, group_addr);
+ if (!group)
+ return;
+ }
+
+ if (group->group_filtermode_isexcl) {
+ /* EXCLUDE mode */
+ toin_excl(group, num_sources, sources);
+ } else {
+ /* INCLUDE mode */
+ toin_incl(group, num_sources, sources);
+ }
+}
+
+static void toex_incl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend = 0;
+ int i;
+
+ assert(!group->group_filtermode_isexcl);
+
+ /* Set DELETE flag for all known sources (A) */
+ source_mark_delete_flag(group);
+
+ /* Clear off SEND flag from all known sources (A) */
+ source_clear_send_flag(group->group_source_list);
+
+ /* Scan received sources (B) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* Lookup reported source (B) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!new) {
+ /* If found, clear deletion flag: (A*B) */
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ /* and set SEND flag (A*B) */
+ IGMP_SOURCE_DO_SEND(source->source_flags);
+ ++num_sources_tosend;
+ }
+
+ } /* Scan received sources (B) */
+
+ group->group_filtermode_isexcl = 1; /* boolean=true */
+
+ /* Delete all sources marked with DELETE flag (A-B) */
+ source_delete_by_flag(group->group_source_list);
+
+ /* Send sources marked with SEND flag: Q(G,A*B) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+
+ assert(group->group_filtermode_isexcl);
+
+ group_exclude_fwd_anysrc_ifempty(group);
+}
+
+static void toex_excl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend = 0;
+ int i;
+
+ /* set DELETE flag for all known sources (X,Y) */
+ source_mark_delete_flag(group);
+
+ /* clear off SEND flag from all known sources (X,Y) */
+ source_clear_send_flag(group->group_source_list);
+
+ if (num_sources == 0) {
+ struct gm_source *source;
+ struct in_addr any = {.s_addr = INADDR_ANY};
+
+ source = igmp_find_source_by_addr(group, any);
+ if (source)
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ }
+
+ /* scan received sources (A) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* lookup reported source (A) in known sources (X,Y) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (!new) {
+ /* if found, clear off DELETE flag from reported source
+ * (A) */
+ IGMP_SOURCE_DONT_DELETE(source->source_flags);
+ } else {
+ /* if not found, create source with Group Timer:
+ * (A-X-Y)=Group Timer */
+ long group_timer_msec;
+
+ assert(!source->t_source_timer); /* timer == 0 */
+ group_timer_msec = igmp_group_timer_remain_msec(group);
+ igmp_source_timer_on(group, source, group_timer_msec);
+ assert(source->t_source_timer); /* (A-X-Y) timer > 0 */
+
+ /* make sure source is created with DELETE flag unset */
+ assert(!IGMP_SOURCE_TEST_DELETE(source->source_flags));
+ }
+
+ /* make sure reported source has DELETE flag unset */
+ assert(!IGMP_SOURCE_TEST_DELETE(source->source_flags));
+
+ if (source->t_source_timer) {
+ /* if source timer>0 mark SEND flag: Q(G,A-Y) */
+ IGMP_SOURCE_DO_SEND(source->source_flags);
+ ++num_sources_tosend;
+ }
+
+ } /* scan received sources (A) */
+
+ /*
+ delete all sources marked with DELETE flag:
+ Delete (X-A)
+ Delete (Y-A)
+ */
+ source_delete_by_flag(group->group_source_list);
+
+ /* send sources marked with SEND flag: Q(G,A-Y) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+}
+
+void igmpv3_report_toex(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ struct interface *ifp = igmp->interface;
+ struct gm_group *group;
+
+ on_trace(__func__, ifp, from, group_addr, num_sources, sources);
+
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+
+ if (group->group_filtermode_isexcl) {
+ /* EXCLUDE mode */
+ toex_excl(group, num_sources, sources);
+ } else {
+ /* INCLUDE mode */
+ toex_incl(group, num_sources, sources);
+ assert(group->group_filtermode_isexcl);
+ }
+ assert(group->group_filtermode_isexcl);
+
+ /* Group Timer=GMI */
+ igmp_group_reset_gmi(group);
+}
+
+void igmpv3_report_allow(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ on_trace(__func__, igmp->interface, from, group_addr, num_sources,
+ sources);
+
+ allow(igmp, from, group_addr, num_sources, sources);
+}
+
+static void igmp_send_query_group(struct gm_group *group, char *query_buf,
+ size_t query_buf_size, int num_sources,
+ int s_flag)
+{
+ struct interface *ifp = group->interface;
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_sock *igmp;
+ struct listnode *sock_node;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
+ igmp_send_query(
+ pim_ifp->igmp_version, group, query_buf, query_buf_size,
+ num_sources, group->group_addr, group->group_addr,
+ pim_ifp->gm_specific_query_max_response_time_dsec,
+ s_flag, igmp);
+ }
+}
+
+/*
+ RFC3376: 6.6.3.1. Building and Sending Group Specific Queries
+
+ When transmitting a group specific query, if the group timer is
+ larger than LMQT, the "Suppress Router-Side Processing" bit is set
+ in the query message.
+*/
+static void group_retransmit_group(struct gm_group *group)
+{
+ struct pim_interface *pim_ifp;
+ long lmqc; /* Last Member Query Count */
+ long lmqi_msec; /* Last Member Query Interval */
+ long lmqt_msec; /* Last Member Query Time */
+ int s_flag;
+ int query_buf_size;
+
+ pim_ifp = group->interface->info;
+
+ if (pim_ifp->igmp_version == 3) {
+ query_buf_size = PIM_IGMP_BUFSIZE_WRITE;
+ } else {
+ query_buf_size = IGMP_V12_MSG_SIZE;
+ }
+
+ char query_buf[query_buf_size];
+
+ lmqc = pim_ifp->gm_last_member_query_count;
+ lmqi_msec = 100 * pim_ifp->gm_specific_query_max_response_time_dsec;
+ lmqt_msec = lmqc * lmqi_msec;
+
+ /*
+ RFC3376: 6.6.3.1. Building and Sending Group Specific Queries
+
+ When transmitting a group specific query, if the group timer is
+ larger than LMQT, the "Suppress Router-Side Processing" bit is set
+ in the query message.
+ */
+ s_flag = igmp_group_timer_remain_msec(group) > lmqt_msec;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "retransmit_group_specific_query: group %s on %s: s_flag=%d count=%d",
+ group_str, group->interface->name, s_flag,
+ group->group_specific_query_retransmit_count);
+ }
+
+ /*
+ RFC3376: 4.1.12. IP Destination Addresses for Queries
+
+ Group-Specific and Group-and-Source-Specific Queries are sent with
+ an IP destination address equal to the multicast address of
+ interest.
+ */
+
+ igmp_send_query_group(group, query_buf, sizeof(query_buf), 0, s_flag);
+}
+
+/*
+ RFC3376: 6.6.3.2. Building and Sending Group and Source Specific Queries
+
+ When building a group and source specific query for a group G, two
+ separate query messages are sent for the group. The first one has
+ the "Suppress Router-Side Processing" bit set and contains all the
+ sources with retransmission state and timers greater than LMQT. The
+ second has the "Suppress Router-Side Processing" bit clear and
+ contains all the sources with retransmission state and timers lower
+ or equal to LMQT. If either of the two calculated messages does not
+ contain any sources, then its transmission is suppressed.
+ */
+static int group_retransmit_sources(struct gm_group *group,
+ int send_with_sflag_set)
+{
+ struct pim_interface *pim_ifp;
+ long lmqc; /* Last Member Query Count */
+ long lmqi_msec; /* Last Member Query Interval */
+ long lmqt_msec; /* Last Member Query Time */
+ char query_buf1[PIM_IGMP_BUFSIZE_WRITE]; /* 1 = with s_flag set */
+ char query_buf2[PIM_IGMP_BUFSIZE_WRITE]; /* 2 = with s_flag clear */
+ int query_buf1_max_sources;
+ int query_buf2_max_sources;
+ struct in_addr *source_addr1;
+ struct in_addr *source_addr2;
+ int num_sources_tosend1;
+ int num_sources_tosend2;
+ struct listnode *src_node;
+ struct gm_source *src;
+ int num_retransmit_sources_left = 0;
+
+ source_addr1 = (struct in_addr *)(query_buf1 + IGMP_V3_SOURCES_OFFSET);
+ source_addr2 = (struct in_addr *)(query_buf2 + IGMP_V3_SOURCES_OFFSET);
+
+ pim_ifp = group->interface->info;
+
+ lmqc = pim_ifp->gm_last_member_query_count;
+ lmqi_msec = 100 * pim_ifp->gm_specific_query_max_response_time_dsec;
+ lmqt_msec = lmqc * lmqi_msec;
+
+ /* Scan all group sources */
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src)) {
+
+ /* Source has retransmission state? */
+ if (src->source_query_retransmit_count < 1)
+ continue;
+
+ if (--src->source_query_retransmit_count > 0) {
+ ++num_retransmit_sources_left;
+ }
+
+ /* Copy source address into appropriate query buffer */
+ if (igmp_source_timer_remain_msec(src) > lmqt_msec) {
+ *source_addr1 = src->source_addr;
+ ++source_addr1;
+ } else {
+ *source_addr2 = src->source_addr;
+ ++source_addr2;
+ }
+ }
+
+ num_sources_tosend1 =
+ source_addr1
+ - (struct in_addr *)(query_buf1 + IGMP_V3_SOURCES_OFFSET);
+ num_sources_tosend2 =
+ source_addr2
+ - (struct in_addr *)(query_buf2 + IGMP_V3_SOURCES_OFFSET);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "retransmit_grp&src_specific_query: group %s on %s: srcs_with_sflag=%d srcs_wo_sflag=%d will_send_sflag=%d retransmit_src_left=%d",
+ group_str, group->interface->name, num_sources_tosend1,
+ num_sources_tosend2, send_with_sflag_set,
+ num_retransmit_sources_left);
+ }
+
+ if (num_sources_tosend1 > 0) {
+ /*
+ Send group-and-source-specific query with s_flag set and all
+ sources with timers greater than LMQT.
+ */
+
+ if (send_with_sflag_set) {
+
+ query_buf1_max_sources =
+ (sizeof(query_buf1) - IGMP_V3_SOURCES_OFFSET)
+ >> 2;
+ if (num_sources_tosend1 > query_buf1_max_sources) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr,
+ group_str, sizeof(group_str));
+ zlog_warn(
+ "%s: group %s on %s: s_flag=1 unable to fit %d sources into buf_size=%zu (max_sources=%d)",
+ __func__, group_str,
+ group->interface->name,
+ num_sources_tosend1, sizeof(query_buf1),
+ query_buf1_max_sources);
+ } else {
+ /*
+ RFC3376: 4.1.12. IP Destination Addresses for
+ Queries
+
+ Group-Specific and Group-and-Source-Specific
+ Queries are sent with
+ an IP destination address equal to the
+ multicast address of
+ interest.
+ */
+
+ igmp_send_query_group(
+ group, query_buf1, sizeof(query_buf1),
+ num_sources_tosend1, 1 /* s_flag */);
+ }
+
+ } /* send_with_sflag_set */
+ }
+
+ if (num_sources_tosend2 > 0) {
+ /*
+ Send group-and-source-specific query with s_flag clear and all
+ sources with timers lower or equal to LMQT.
+ */
+
+ query_buf2_max_sources =
+ (sizeof(query_buf2) - IGMP_V3_SOURCES_OFFSET) >> 2;
+ if (num_sources_tosend2 > query_buf2_max_sources) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_warn(
+ "%s: group %s on %s: s_flag=0 unable to fit %d sources into buf_size=%zu (max_sources=%d)",
+ __func__, group_str, group->interface->name,
+ num_sources_tosend2, sizeof(query_buf2),
+ query_buf2_max_sources);
+ } else {
+ /*
+ RFC3376: 4.1.12. IP Destination Addresses for Queries
+
+ Group-Specific and Group-and-Source-Specific Queries
+ are sent with
+ an IP destination address equal to the multicast
+ address of
+ interest.
+ */
+
+ igmp_send_query_group(
+ group, query_buf2, sizeof(query_buf2),
+ num_sources_tosend2, 0 /* s_flag */);
+ }
+ }
+
+ return num_retransmit_sources_left;
+}
+
+static void igmp_group_retransmit(struct event *t)
+{
+ struct gm_group *group;
+ int num_retransmit_sources_left;
+ int send_with_sflag_set; /* boolean */
+
+ group = EVENT_ARG(t);
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug("group_retransmit_timer: group %s on %s", group_str,
+ group->interface->name);
+ }
+
+ /* Retransmit group-specific queries? (RFC3376: 6.6.3.1) */
+ if (group->group_specific_query_retransmit_count > 0) {
+
+ /* Retransmit group-specific queries (RFC3376: 6.6.3.1) */
+ group_retransmit_group(group);
+ --group->group_specific_query_retransmit_count;
+
+ /*
+ RFC3376: 6.6.3.2
+ If a group specific query is scheduled to be transmitted at
+ the
+ same time as a group and source specific query for the same
+ group,
+ then transmission of the group and source specific message
+ with the
+ "Suppress Router-Side Processing" bit set may be suppressed.
+ */
+ send_with_sflag_set = 0; /* boolean=false */
+ } else {
+ send_with_sflag_set = 1; /* boolean=true */
+ }
+
+ /* Retransmit group-and-source-specific queries (RFC3376: 6.6.3.2) */
+ num_retransmit_sources_left =
+ group_retransmit_sources(group, send_with_sflag_set);
+
+ /*
+ Keep group retransmit timer running if there is any retransmit
+ counter pending
+ */
+ if ((num_retransmit_sources_left > 0)
+ || (group->group_specific_query_retransmit_count > 0)) {
+ group_retransmit_timer_on(group);
+ }
+}
+
+/*
+ group_retransmit_timer_on:
+ if group retransmit timer isn't running, starts it;
+ otherwise, do nothing
+*/
+static void group_retransmit_timer_on(struct gm_group *group)
+{
+ struct pim_interface *pim_ifp;
+ long lmqi_msec; /* Last Member Query Interval */
+
+ /* if group retransmit timer is running, do nothing */
+ if (group->t_group_query_retransmit_timer) {
+ return;
+ }
+
+ pim_ifp = group->interface->info;
+
+ lmqi_msec = 100 * pim_ifp->gm_specific_query_max_response_time_dsec;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "Scheduling %ld.%03ld sec retransmit timer for group %s on %s",
+ lmqi_msec / 1000, lmqi_msec % 1000, group_str,
+ group->interface->name);
+ }
+
+ event_add_timer_msec(router->master, igmp_group_retransmit, group,
+ lmqi_msec, &group->t_group_query_retransmit_timer);
+}
+
+static long igmp_group_timer_remain_msec(struct gm_group *group)
+{
+ return pim_time_timer_remain_msec(group->t_group_timer);
+}
+
+static long igmp_source_timer_remain_msec(struct gm_source *source)
+{
+ return pim_time_timer_remain_msec(source->t_source_timer);
+}
+
+/*
+ RFC3376: 6.6.3.1. Building and Sending Group Specific Queries
+*/
+static void group_query_send(struct gm_group *group)
+{
+ struct pim_interface *pim_ifp;
+ long lmqc; /* Last Member Query Count */
+
+ pim_ifp = group->interface->info;
+ lmqc = pim_ifp->gm_last_member_query_count;
+
+ /* lower group timer to lmqt */
+ igmp_group_timer_lower_to_lmqt(group);
+
+ /* reset retransmission counter */
+ group->group_specific_query_retransmit_count = lmqc;
+
+ /* immediately send group specific query (decrease retransmit counter by
+ * 1)*/
+ group_retransmit_group(group);
+
+ /* make sure group retransmit timer is running */
+ group_retransmit_timer_on(group);
+}
+
+/*
+ RFC3376: 6.6.3.2. Building and Sending Group and Source Specific Queries
+*/
+static void source_query_send_by_flag(struct gm_group *group,
+ int num_sources_tosend)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *src_node;
+ struct gm_source *src;
+ long lmqc; /* Last Member Query Count */
+ long lmqi_msec; /* Last Member Query Interval */
+ long lmqt_msec; /* Last Member Query Time */
+
+ assert(num_sources_tosend > 0);
+
+ pim_ifp = group->interface->info;
+
+ lmqc = pim_ifp->gm_last_member_query_count;
+ lmqi_msec = 100 * pim_ifp->gm_specific_query_max_response_time_dsec;
+ lmqt_msec = lmqc * lmqi_msec;
+
+ /*
+ RFC3376: 6.6.3.2. Building and Sending Group and Source Specific
+ Queries
+
+ (...) for each of the sources in X of group G, with source timer
+ larger
+ than LMQT:
+ o Set number of retransmissions for each source to [Last Member
+ Query Count].
+ o Lower source timer to LMQT.
+ */
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list, src_node, src)) {
+ if (IGMP_SOURCE_TEST_SEND(src->source_flags)) {
+ /* source "src" in X of group G */
+ if (igmp_source_timer_remain_msec(src) > lmqt_msec) {
+ src->source_query_retransmit_count = lmqc;
+ igmp_source_timer_lower_to_lmqt(src);
+ }
+ }
+ }
+
+ /* send group-and-source specific queries */
+ group_retransmit_sources(group, 1 /* send_with_sflag_set=true */);
+
+ /* make sure group retransmit timer is running */
+ group_retransmit_timer_on(group);
+}
+
+static void block_excl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend = 0;
+ int i;
+
+ /* 1. clear off SEND flag from all known sources (X,Y) */
+ source_clear_send_flag(group->group_source_list);
+
+ /* 2. scan received sources (A) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+ bool new;
+
+ src_addr = sources + i;
+
+ /* lookup reported source (A) in known sources (X,Y) */
+ source = igmp_get_source_by_addr(group, *src_addr, &new);
+ if (!source)
+ continue;
+
+ if (new) {
+ /* 3: if not found, create source with Group Timer:
+ * (A-X-Y)=Group Timer */
+ long group_timer_msec;
+
+ assert(!source->t_source_timer); /* timer == 0 */
+ group_timer_msec = igmp_group_timer_remain_msec(group);
+ igmp_source_timer_on(group, source, group_timer_msec);
+ assert(source->t_source_timer); /* (A-X-Y) timer > 0 */
+ }
+
+ if (source->t_source_timer) {
+ /* 4. if source timer>0 mark SEND flag: Q(G,A-Y) */
+ IGMP_SOURCE_DO_SEND(source->source_flags);
+ ++num_sources_tosend;
+ }
+ }
+
+ /* 5. send sources marked with SEND flag: Q(G,A-Y) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+}
+
+static void block_incl(struct gm_group *group, int num_sources,
+ struct in_addr *sources)
+{
+ int num_sources_tosend = 0;
+ int i;
+
+ /* 1. clear off SEND flag from all known sources (B) */
+ source_clear_send_flag(group->group_source_list);
+
+ /* 2. scan received sources (A) */
+ for (i = 0; i < num_sources; ++i) {
+ struct gm_source *source;
+ struct in_addr *src_addr;
+
+ src_addr = sources + i;
+
+ /* lookup reported source (A) in known sources (B) */
+ source = igmp_find_source_by_addr(group, *src_addr);
+ if (source) {
+ /* 3. if found (A*B), mark SEND flag: Q(G,A*B) */
+ IGMP_SOURCE_DO_SEND(source->source_flags);
+ ++num_sources_tosend;
+ }
+ }
+
+ /* 4. send sources marked with SEND flag: Q(G,A*B) */
+ if (num_sources_tosend > 0) {
+ source_query_send_by_flag(group, num_sources_tosend);
+ }
+}
+
+void igmpv3_report_block(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources)
+{
+ struct interface *ifp = igmp->interface;
+ struct gm_group *group;
+
+ on_trace(__func__, ifp, from, group_addr, num_sources, sources);
+
+ /* non-existent group is created as INCLUDE {empty} */
+ group = igmp_add_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+
+ if (group->group_filtermode_isexcl) {
+ /* EXCLUDE mode */
+ block_excl(group, num_sources, sources);
+ } else {
+ /* INCLUDE mode */
+ block_incl(group, num_sources, sources);
+ }
+}
+
+void igmp_group_timer_lower_to_lmqt(struct gm_group *group)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ char *ifname;
+ int lmqi_dsec; /* Last Member Query Interval */
+ int lmqc; /* Last Member Query Count */
+ int lmqt_msec; /* Last Member Query Time */
+
+ /*
+ RFC 3376: 6.2.2. Definition of Group Timers
+
+ The group timer is only used when a group is in EXCLUDE mode and
+ it represents the time for the *filter-mode* of the group to
+ expire and switch to INCLUDE mode.
+ */
+ if (!group->group_filtermode_isexcl) {
+ return;
+ }
+
+ ifp = group->interface;
+ pim_ifp = ifp->info;
+ ifname = ifp->name;
+
+ lmqi_dsec = pim_ifp->gm_specific_query_max_response_time_dsec;
+ lmqc = pim_ifp->gm_last_member_query_count;
+ lmqt_msec = PIM_IGMP_LMQT_MSEC(
+ lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "%s: group %s on %s: LMQC=%d LMQI=%d dsec LMQT=%d msec",
+ __func__, group_str, ifname, lmqc, lmqi_dsec,
+ lmqt_msec);
+ }
+
+ assert(group->group_filtermode_isexcl);
+
+ igmp_group_timer_on(group, lmqt_msec, ifname);
+}
+
+void igmp_source_timer_lower_to_lmqt(struct gm_source *source)
+{
+ struct gm_group *group;
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ char *ifname;
+ int lmqi_dsec; /* Last Member Query Interval */
+ int lmqc; /* Last Member Query Count */
+ int lmqt_msec; /* Last Member Query Time */
+
+ group = source->source_group;
+ ifp = group->interface;
+ pim_ifp = ifp->info;
+ ifname = ifp->name;
+
+ lmqi_dsec = pim_ifp->gm_specific_query_max_response_time_dsec;
+ lmqc = pim_ifp->gm_last_member_query_count;
+ lmqt_msec = PIM_IGMP_LMQT_MSEC(
+ lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char group_str[INET_ADDRSTRLEN];
+ char source_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group->group_addr, group_str,
+ sizeof(group_str));
+ pim_inet4_dump("<source?>", source->source_addr, source_str,
+ sizeof(source_str));
+ zlog_debug(
+ "%s: group %s source %s on %s: LMQC=%d LMQI=%d dsec LMQT=%d msec",
+ __func__, group_str, source_str, ifname, lmqc,
+ lmqi_dsec, lmqt_msec);
+ }
+
+ igmp_source_timer_on(group, source, lmqt_msec);
+}
+
+void igmp_v3_send_query(struct gm_group *group, int fd, const char *ifname,
+ char *query_buf, int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
+ int query_max_response_time_dsec, uint8_t s_flag,
+ uint8_t querier_robustness_variable,
+ uint16_t querier_query_interval)
+{
+ ssize_t msg_size;
+ uint8_t max_resp_code;
+ uint8_t qqic;
+ ssize_t sent;
+ struct sockaddr_in to;
+ socklen_t tolen;
+ uint16_t checksum;
+
+ assert(num_sources >= 0);
+
+ msg_size = IGMP_V3_SOURCES_OFFSET + (num_sources << 2);
+ if (msg_size > query_buf_size) {
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "%s %s: unable to send: msg_size=%zd larger than query_buf_size=%d",
+ __FILE__, __func__, msg_size, query_buf_size);
+ return;
+ }
+
+ s_flag = PIM_FORCE_BOOLEAN(s_flag);
+ assert((s_flag == 0) || (s_flag == 1));
+
+ max_resp_code = igmp_msg_encode16to8(query_max_response_time_dsec);
+ qqic = igmp_msg_encode16to8(querier_query_interval);
+
+ /*
+ RFC 3376: 4.1.6. QRV (Querier's Robustness Variable)
+
+ If non-zero, the QRV field contains the [Robustness Variable]
+ value used by the querier, i.e., the sender of the Query. If the
+ querier's [Robustness Variable] exceeds 7, the maximum value of
+ the QRV field, the QRV is set to zero.
+ */
+ if (querier_robustness_variable > 7) {
+ querier_robustness_variable = 0;
+ }
+
+ query_buf[0] = PIM_IGMP_MEMBERSHIP_QUERY;
+ query_buf[1] = max_resp_code;
+ *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) =
+ 0; /* for computing checksum */
+ memcpy(query_buf + 4, &group_addr, sizeof(struct in_addr));
+
+ query_buf[8] = (s_flag << 3) | querier_robustness_variable;
+ query_buf[9] = qqic;
+ *(uint16_t *)(query_buf + IGMP_V3_NUMSOURCES_OFFSET) =
+ htons(num_sources);
+
+ checksum = in_cksum(query_buf, msg_size);
+ *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum;
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_debug(
+ "Send IGMPv3 query to %s on %s for group %s, sources=%d msg_size=%zd s_flag=%x QRV=%u QQI=%u QQIC=%02x",
+ dst_str, ifname, group_str, num_sources, msg_size,
+ s_flag, querier_robustness_variable,
+ querier_query_interval, qqic);
+ }
+
+ memset(&to, 0, sizeof(to));
+ to.sin_family = AF_INET;
+ to.sin_addr = dst_addr;
+ tolen = sizeof(to);
+
+ sent = sendto(fd, query_buf, msg_size, MSG_DONTWAIT,
+ (struct sockaddr *)&to, tolen);
+ if (sent != (ssize_t)msg_size) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ if (sent < 0) {
+ zlog_warn(
+ "Send IGMPv3 query failed due to %s on %s: group=%s msg_size=%zd: errno=%d: %s",
+ dst_str, ifname, group_str, msg_size, errno,
+ safe_strerror(errno));
+ } else {
+ zlog_warn(
+ "Send IGMPv3 query failed due to %s on %s: group=%s msg_size=%zd: sent=%zd",
+ dst_str, ifname, group_str, msg_size, sent);
+ }
+ return;
+ }
+
+ /*
+ s_flag sanity test: s_flag must be set for general queries
+
+ RFC 3376: 6.6.1. Timer Updates
+
+ When a router sends or receives a query with a clear Suppress
+ Router-Side Processing flag, it must update its timers to reflect
+ the correct timeout values for the group or sources being queried.
+
+ General queries don't trigger timer update.
+ */
+ if (!s_flag) {
+ /* general query? */
+ if (group_addr.s_addr == INADDR_ANY) {
+ char dst_str[INET_ADDRSTRLEN];
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<dst?>", dst_addr, dst_str,
+ sizeof(dst_str));
+ pim_inet4_dump("<group?>", group_addr, group_str,
+ sizeof(group_str));
+ zlog_warn(
+ "%s: to %s on %s: group=%s sources=%d: s_flag is clear for general query!",
+ __func__, dst_str, ifname, group_str,
+ num_sources);
+ }
+ }
+}
+
+void igmp_v3_recv_query(struct gm_sock *igmp, const char *from_str,
+ char *igmp_msg)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr group_addr;
+ uint8_t resv_s_qrv = 0;
+ uint8_t s_flag = 0;
+ uint8_t qrv = 0;
+ int i;
+
+ memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
+ ifp = igmp->interface;
+ pim_ifp = ifp->info;
+
+ /*
+ * RFC 3376: 4.1.6. QRV (Querier's Robustness Variable)
+ *
+ * Routers adopt the QRV value from the most recently received Query
+ * as their own [Robustness Variable] value, unless that most
+ * recently received QRV was zero, in which case the receivers use
+ * the default [Robustness Variable] value specified in section 8.1
+ * or a statically configured value.
+ */
+ resv_s_qrv = igmp_msg[8];
+ qrv = 7 & resv_s_qrv;
+ igmp->querier_robustness_variable =
+ qrv ? qrv : pim_ifp->gm_default_robustness_variable;
+
+ /*
+ * RFC 3376: 4.1.7. QQIC (Querier's Query Interval Code)
+ *
+ * Multicast routers that are not the current querier adopt the QQI
+ * value from the most recently received Query as their own [Query
+ * Interval] value, unless that most recently received QQI was zero,
+ * in which case the receiving routers use the default.
+ */
+ if (igmp->t_other_querier_timer) {
+ /* other querier present */
+ uint8_t qqic;
+ uint16_t qqi;
+ qqic = igmp_msg[9];
+ qqi = igmp_msg_decode8to16(qqic);
+ igmp->querier_query_interval =
+ qqi ? qqi : pim_ifp->gm_default_query_interval;
+
+ if (PIM_DEBUG_GM_TRACE) {
+ char ifaddr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<ifaddr?>", igmp->ifaddr, ifaddr_str,
+ sizeof(ifaddr_str));
+ zlog_debug(
+ "Querier %s new query interval is %s QQI=%u sec (recv QQIC=%02x from %s)",
+ ifaddr_str,
+ qqi ? "recv-non-default" : "default",
+ igmp->querier_query_interval, qqic, from_str);
+ }
+ }
+
+ /*
+ * RFC 3376: 6.6.1. Timer Updates
+ *
+ * When a router sends or receives a query with a clear Suppress
+ * Router-Side Processing flag, it must update its timers to reflect
+ * the correct timeout values for the group or sources being queried.
+ *
+ * General queries don't trigger timer update.
+ */
+ s_flag = (1 << 3) & resv_s_qrv;
+
+ if (!s_flag) {
+ /* s_flag is clear */
+
+ if (group_addr.s_addr == INADDR_ANY) {
+ /* this is a general query */
+ /* log that general query should have the s_flag set */
+ zlog_warn(
+ "General IGMP query v3 from %s on %s: Suppress Router-Side Processing flag is clear",
+ from_str, ifp->name);
+ } else {
+ struct gm_group *group;
+
+ /* this is a non-general query: perform timer updates */
+
+ group = find_group_by_addr(igmp, group_addr);
+ if (group) {
+ int recv_num_sources = ntohs(*(
+ uint16_t
+ *)(igmp_msg
+ + IGMP_V3_NUMSOURCES_OFFSET));
+
+ /*
+ * RFC 3376: 6.6.1. Timer Updates
+ * Query Q(G,A): Source Timer for sources in A
+ * are lowered to LMQT
+ * Query Q(G): Group Timer is lowered to LMQT
+ */
+ if (recv_num_sources < 1) {
+ /* Query Q(G): Group Timer is lowered to
+ * LMQT */
+
+ igmp_group_timer_lower_to_lmqt(group);
+ } else {
+ /* Query Q(G,A): Source Timer for
+ * sources in A are lowered to LMQT */
+
+ /* Scan sources in query and lower their
+ * timers to LMQT */
+ struct in_addr *sources =
+ (struct in_addr
+ *)(igmp_msg
+ + IGMP_V3_SOURCES_OFFSET);
+ for (i = 0; i < recv_num_sources; ++i) {
+ struct in_addr src_addr;
+ struct gm_source *src;
+ memcpy(&src_addr, sources + i,
+ sizeof(struct in_addr));
+ src = igmp_find_source_by_addr(
+ group, src_addr);
+ if (src) {
+ igmp_source_timer_lower_to_lmqt(
+ src);
+ }
+ }
+ }
+ } else {
+ char group_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<group?>", group_addr,
+ group_str, sizeof(group_str));
+ zlog_warn(
+ "IGMP query v3 from %s on %s: could not find group %s for timer update",
+ from_str, ifp->name, group_str);
+ }
+ }
+ } /* s_flag is clear: timer updates */
+}
+
+static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
+ struct in_addr grp, int rec_type)
+{
+ struct pim_interface *pim_ifp;
+ struct in_addr grp_addr;
+
+ pim_ifp = ifp->info;
+
+ /* determine filtering status for group */
+ if (pim_is_group_filtered(pim_ifp, &grp)) {
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
+ &grp.s_addr, from_str, ifp->name,
+ pim_ifp->boundary_oil_plist);
+ }
+ return false;
+ }
+
+ /*
+ * If we receive a igmp report with the group in 224.0.0.0/24
+ * then we should ignore it
+ */
+
+ grp_addr.s_addr = ntohl(grp.s_addr);
+
+ if (pim_is_group_224_0_0_0_24(grp_addr)) {
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv3 group record %pI4 from %s on %s group range falls in 224.0.0.0/24",
+ &grp.s_addr, from_str, ifp->name);
+ }
+ return false;
+ }
+
+ /*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses, and an SSM-aware router
+ * will ignore MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE requests in
+ * the SSM range.
+ */
+ if (pim_is_grp_ssm(pim_ifp->pim, grp)) {
+ switch (rec_type) {
+ case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE:
+ case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE:
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv3 group record %pI4 from %s on %s exclude mode in SSM range",
+ &grp.s_addr, from_str, ifp->name);
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
+ const char *from_str, char *igmp_msg, int igmp_msg_len)
+{
+ int num_groups;
+ uint8_t *group_record;
+ uint8_t *report_pastend = (uint8_t *)igmp_msg + igmp_msg_len;
+ struct interface *ifp = igmp->interface;
+ struct pim_interface *pim_ifp = ifp->info;
+ int i;
+
+ if (igmp->mtrace_only)
+ return 0;
+
+ if (igmp_msg_len < IGMP_V3_MSG_MIN_SIZE) {
+ zlog_warn(
+ "Recv IGMP report v3 from %s on %s: size=%d shorter than minimum=%d",
+ from_str, ifp->name, igmp_msg_len,
+ IGMP_V3_MSG_MIN_SIZE);
+ return -1;
+ }
+
+ if (igmp_validate_checksum(igmp_msg, igmp_msg_len) == -1) {
+ zlog_warn(
+ "Recv IGMPv3 report from %s on %s with invalid checksum",
+ from_str, ifp->name);
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->igmp_stats.report_v3++;
+
+ if (pim_ifp->igmp_version == 2) {
+ zlog_warn(
+ "Received Version 3 packet but interface: %s is configured for version 2",
+ ifp->name);
+ return -1;
+ }
+
+ num_groups = ntohs(
+ *(uint16_t *)(igmp_msg + IGMP_V3_REPORT_NUMGROUPS_OFFSET));
+ if (num_groups < 1) {
+ zlog_warn(
+ "Recv IGMP report v3 from %s on %s: missing group records",
+ from_str, ifp->name);
+ return -1;
+ }
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Recv IGMP report v3 from %s on %s: size=%d groups=%d",
+ from_str, ifp->name, igmp_msg_len, num_groups);
+ }
+
+ group_record = (uint8_t *)igmp_msg + IGMP_V3_REPORT_GROUPPRECORD_OFFSET;
+
+ /* Scan groups */
+ for (i = 0; i < num_groups; ++i) {
+ struct in_addr rec_group;
+ uint8_t *sources;
+ uint8_t *src;
+ int rec_type;
+ int rec_auxdatalen;
+ int rec_num_sources;
+ int j;
+
+ if ((group_record + IGMP_V3_GROUP_RECORD_MIN_SIZE)
+ > report_pastend) {
+ zlog_warn(
+ "Recv IGMP report v3 from %s on %s: group record beyond report end",
+ from_str, ifp->name);
+ return -1;
+ }
+
+ rec_type = group_record[IGMP_V3_GROUP_RECORD_TYPE_OFFSET];
+ rec_auxdatalen =
+ group_record[IGMP_V3_GROUP_RECORD_AUXDATALEN_OFFSET];
+ rec_num_sources = ntohs(*(
+ uint16_t *)(group_record
+ + IGMP_V3_GROUP_RECORD_NUMSOURCES_OFFSET));
+
+ memcpy(&rec_group,
+ group_record + IGMP_V3_GROUP_RECORD_GROUP_OFFSET,
+ sizeof(struct in_addr));
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ " Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
+ from_str, ifp->name, i, rec_type,
+ rec_auxdatalen, rec_num_sources,
+ &rec_group);
+ }
+
+ /* Scan sources */
+
+ sources = group_record + IGMP_V3_GROUP_RECORD_SOURCE_OFFSET;
+
+ for (j = 0, src = sources; j < rec_num_sources; ++j, src += 4) {
+
+ if ((src + 4) > report_pastend) {
+ zlog_warn(
+ "Recv IGMP report v3 from %s on %s: group source beyond report end",
+ from_str, ifp->name);
+ return -1;
+ }
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ char src_str[200];
+
+ if (!inet_ntop(AF_INET, src, src_str,
+ sizeof(src_str)))
+ snprintf(src_str, sizeof(src_str),
+ "<source?>");
+
+ zlog_debug(
+ " Recv IGMP report v3 from %s on %s: record=%d group=%pI4 source=%s",
+ from_str, ifp->name, i,
+ &rec_group, src_str);
+ }
+ } /* for (sources) */
+
+
+ if (igmp_pkt_grp_addr_ok(ifp, from_str, rec_group, rec_type))
+ switch (rec_type) {
+ case IGMP_GRP_REC_TYPE_MODE_IS_INCLUDE:
+ igmpv3_report_isin(igmp, from, rec_group,
+ rec_num_sources,
+ (struct in_addr *)sources);
+ break;
+ case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE:
+ igmpv3_report_isex(
+ igmp, from, rec_group, rec_num_sources,
+ (struct in_addr *)sources, 0);
+ break;
+ case IGMP_GRP_REC_TYPE_CHANGE_TO_INCLUDE_MODE:
+ igmpv3_report_toin(igmp, from, rec_group,
+ rec_num_sources,
+ (struct in_addr *)sources);
+ break;
+ case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE:
+ igmpv3_report_toex(igmp, from, rec_group,
+ rec_num_sources,
+ (struct in_addr *)sources);
+ break;
+ case IGMP_GRP_REC_TYPE_ALLOW_NEW_SOURCES:
+ igmpv3_report_allow(igmp, from, rec_group,
+ rec_num_sources,
+ (struct in_addr *)sources);
+ break;
+ case IGMP_GRP_REC_TYPE_BLOCK_OLD_SOURCES:
+ igmpv3_report_block(igmp, from, rec_group,
+ rec_num_sources,
+ (struct in_addr *)sources);
+ break;
+ default:
+ zlog_warn(
+ "Recv IGMP report v3 from %s on %s: unknown record type: type=%d",
+ from_str, ifp->name, rec_type);
+ }
+
+ group_record +=
+ 8 + (rec_num_sources << 2) + (rec_auxdatalen << 2);
+
+ } /* for (group records) */
+
+ return 0;
+}
diff --git a/pimd/pim_igmpv3.h b/pimd/pim_igmpv3.h
new file mode 100644
index 0000000..43c7df4
--- /dev/null
+++ b/pimd/pim_igmpv3.h
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_IGMPV3_H
+#define PIM_IGMPV3_H
+
+#include <zebra.h>
+#include "if.h"
+
+#include "pim_igmp.h"
+
+#define IGMP_V3_CHECKSUM_OFFSET (2)
+#define IGMP_V3_REPORT_NUMGROUPS_OFFSET (6)
+#define IGMP_V3_REPORT_GROUPPRECORD_OFFSET (8)
+#define IGMP_V3_NUMSOURCES_OFFSET (10)
+#define IGMP_V3_SOURCES_OFFSET (12)
+
+#define IGMP_GRP_REC_TYPE_MODE_IS_INCLUDE (1)
+#define IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE (2)
+#define IGMP_GRP_REC_TYPE_CHANGE_TO_INCLUDE_MODE (3)
+#define IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE (4)
+#define IGMP_GRP_REC_TYPE_ALLOW_NEW_SOURCES (5)
+#define IGMP_GRP_REC_TYPE_BLOCK_OLD_SOURCES (6)
+
+/* GMI: Group Membership Interval */
+#define PIM_IGMP_GMI_MSEC(qrv,qqi,qri_dsec) ((qrv) * (1000 * (qqi)) + 100 * (qri_dsec))
+
+/* OQPI: Other Querier Present Interval */
+#define PIM_IGMP_OQPI_MSEC(qrv,qqi,qri_dsec) ((qrv) * (1000 * (qqi)) + 100 * ((qri_dsec) >> 1))
+
+/* SQI: Startup Query Interval */
+#define PIM_IGMP_SQI(qi) (((qi) < 4) ? 1 : ((qi) >> 2))
+
+/* LMQT: Last Member Query Time */
+#define PIM_IGMP_LMQT_MSEC(lmqi_dsec, lmqc) ((lmqc) * (100 * (lmqi_dsec)))
+
+/* OHPI: Older Host Present Interval */
+#define PIM_IGMP_OHPI_DSEC(qrv,qqi,qri_dsec) ((qrv) * (10 * (qqi)) + (qri_dsec))
+
+#if PIM_IPV == 4
+void igmp_group_reset_gmi(struct gm_group *group);
+void igmp_source_reset_gmi(struct gm_group *group, struct gm_source *source);
+
+void igmp_source_free(struct gm_source *source);
+void igmp_source_delete(struct gm_source *source);
+void igmp_source_delete_expired(struct list *source_list);
+
+void igmpv3_report_isin(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources);
+void igmpv3_report_isex(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources, int from_igmp_v2_report);
+void igmpv3_report_toin(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources);
+void igmpv3_report_toex(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources);
+void igmpv3_report_allow(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources);
+void igmpv3_report_block(struct gm_sock *igmp, struct in_addr from,
+ struct in_addr group_addr, int num_sources,
+ struct in_addr *sources);
+
+void igmp_group_timer_lower_to_lmqt(struct gm_group *group);
+void igmp_source_timer_lower_to_lmqt(struct gm_source *source);
+
+struct gm_source *igmp_find_source_by_addr(struct gm_group *group,
+ struct in_addr src_addr);
+
+void igmp_v3_send_query(struct gm_group *group, int fd, const char *ifname,
+ char *query_buf, int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
+ int query_max_response_time_dsec, uint8_t s_flag,
+ uint8_t querier_robustness_variable,
+ uint16_t querier_query_interval);
+
+void igmp_v3_recv_query(struct gm_sock *igmp, const char *from_str,
+ char *igmp_msg);
+
+int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
+ const char *from_str, char *igmp_msg, int igmp_msg_len);
+
+#else /* PIM_IPV != 4 */
+static inline void igmp_group_reset_gmi(struct gm_group *group)
+{
+}
+
+
+static inline void igmp_source_reset_gmi(struct gm_group *group,
+ struct gm_source *source)
+{
+}
+#endif
+
+#endif /* PIM_IGMPV3_H */
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
new file mode 100644
index 0000000..b3410d1
--- /dev/null
+++ b/pimd/pim_instance.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRR - PIM Instance
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#include <zebra.h>
+
+#include "hash.h"
+#include "vrf.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_ssm.h"
+#include "pim_rpf.h"
+#include "pim_rp.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_static.h"
+#include "pim_ssmpingd.h"
+#include "pim_vty.h"
+#include "pim_bsm.h"
+#include "pim_mlag.h"
+#include "pim_sock.h"
+
+static void pim_instance_terminate(struct pim_instance *pim)
+{
+ pim_vxlan_exit(pim);
+
+ if (pim->ssm_info) {
+ pim_ssm_terminate(pim->ssm_info);
+ pim->ssm_info = NULL;
+ }
+
+ if (pim->static_routes)
+ list_delete(&pim->static_routes);
+
+ pim_instance_mlag_terminate(pim);
+
+ pim_upstream_terminate(pim);
+
+ pim_rp_free(pim);
+
+ pim_bsm_proc_free(pim);
+
+ /* Traverse and cleanup rpf_hash */
+ hash_clean_and_free(&pim->rpf_hash, (void *)pim_rp_list_hash_clean);
+
+ pim_if_terminate(pim);
+
+ pim_oil_terminate(pim);
+
+ pim_msdp_exit(pim);
+
+ close(pim->reg_sock);
+
+ pim_mroute_socket_disable(pim);
+
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist);
+
+ pim->vrf = NULL;
+ XFREE(MTYPE_PIM_PIM_INSTANCE, pim);
+}
+
+static struct pim_instance *pim_instance_init(struct vrf *vrf)
+{
+ struct pim_instance *pim;
+ char hash_name[64];
+
+ pim = XCALLOC(MTYPE_PIM_PIM_INSTANCE, sizeof(struct pim_instance));
+
+ pim_if_init(pim);
+
+ pim->mcast_if_count = 0;
+ pim->keep_alive_time = PIM_KEEPALIVE_PERIOD;
+ pim->rp_keep_alive_time = PIM_RP_KEEPALIVE_PERIOD;
+
+ pim->ecmp_enable = false;
+ pim->ecmp_rebalance_enable = false;
+
+ pim->vrf = vrf;
+
+ pim->spt.switchover = PIM_SPT_IMMEDIATE;
+ pim->spt.plist = NULL;
+
+ pim_msdp_init(pim, router->master);
+ pim_vxlan_init(pim);
+
+ snprintf(hash_name, sizeof(hash_name), "PIM %s RPF Hash", vrf->name);
+ pim->rpf_hash = hash_create_size(256, pim_rpf_hash_key, pim_rpf_equal,
+ hash_name);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: NHT rpf hash init ", __func__);
+
+ pim->ssm_info = pim_ssm_init();
+
+ pim->static_routes = list_new();
+ pim->static_routes->del = (void (*)(void *))pim_static_route_free;
+
+ pim->send_v6_secondary = 1;
+
+ pim->gm_socket = -1;
+
+ pim_rp_init(pim);
+
+ pim_bsm_proc_init(pim);
+
+ pim_oil_init(pim);
+
+ pim_upstream_init(pim);
+
+ pim_instance_mlag_init(pim);
+
+ pim->last_route_change_time = -1;
+
+ pim->reg_sock = pim_reg_sock();
+ if (pim->reg_sock < 0)
+ assert(0);
+
+ /* MSDP global timer defaults. */
+ pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
+ pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
+ pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
+
+ return pim;
+}
+
+struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id)
+{
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+
+ if (vrf)
+ return vrf->info;
+
+ return NULL;
+}
+
+static int pim_vrf_new(struct vrf *vrf)
+{
+ struct pim_instance *pim = pim_instance_init(vrf);
+
+ zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
+
+ vrf->info = (void *)pim;
+
+ pim_ssmpingd_init(pim);
+ return 0;
+}
+
+static int pim_vrf_delete(struct vrf *vrf)
+{
+ struct pim_instance *pim = vrf->info;
+
+ if (!pim)
+ return 0;
+
+ zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
+
+ pim_ssmpingd_destroy(pim);
+ pim_instance_terminate(pim);
+
+ vrf->info = NULL;
+
+ return 0;
+}
+
+/*
+ * Code to turn on the pim instance that
+ * we have created with new
+ */
+static int pim_vrf_enable(struct vrf *vrf)
+{
+ struct pim_instance *pim = (struct pim_instance *)vrf->info;
+ struct interface *ifp;
+
+ zlog_debug("%s: for %s %u", __func__, vrf->name, vrf->vrf_id);
+
+ pim_mroute_socket_enable(pim);
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (!ifp->info)
+ continue;
+
+ pim_if_create_pimreg(pim);
+ break;
+ }
+
+ return 0;
+}
+
+static int pim_vrf_disable(struct vrf *vrf)
+{
+ /* Note: This is a callback, the VRF will be deleted by the caller. */
+ return 0;
+}
+
+static int pim_vrf_config_write(struct vty *vty)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+
+ if (!pim)
+ continue;
+
+ if (vrf->vrf_id != VRF_DEFAULT)
+ vty_frame(vty, "vrf %s\n", vrf->name);
+
+ pim_global_config_write_worker(pim, vty);
+
+ if (vrf->vrf_id != VRF_DEFAULT)
+ vty_endframe(vty, "exit-vrf\n!\n");
+ }
+
+ return 0;
+}
+
+void pim_vrf_init(void)
+{
+ vrf_init(pim_vrf_new, pim_vrf_enable, pim_vrf_disable, pim_vrf_delete);
+
+ vrf_cmd_init(pim_vrf_config_write);
+}
+
+void pim_vrf_terminate(void)
+{
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ struct pim_instance *pim;
+
+ pim = vrf->info;
+ if (!pim)
+ continue;
+
+ pim_ssmpingd_destroy(pim);
+ pim_instance_terminate(pim);
+
+ vrf->info = NULL;
+ }
+
+ vrf_terminate();
+}
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
new file mode 100644
index 0000000..11577ae
--- /dev/null
+++ b/pimd/pim_instance.h
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRR - PIM Instance
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#ifndef __PIM_INSTANCE_H__
+#define __PIM_INSTANCE_H__
+
+#include <mlag.h>
+
+#include "pim_str.h"
+#include "pim_msdp.h"
+#include "pim_assert.h"
+#include "pim_bsm.h"
+#include "pim_vxlan_instance.h"
+#include "pim_oil.h"
+#include "pim_upstream.h"
+#include "pim_mroute.h"
+
+enum pim_spt_switchover {
+ PIM_SPT_IMMEDIATE,
+ PIM_SPT_INFINITY,
+};
+
+/* stats for updates rxed from the MLAG component during the life of a
+ * session
+ */
+struct pim_mlag_msg_stats {
+ uint32_t mroute_add_rx;
+ uint32_t mroute_add_tx;
+ uint32_t mroute_del_rx;
+ uint32_t mroute_del_tx;
+ uint32_t mlag_status_updates;
+ uint32_t pim_status_updates;
+ uint32_t vxlan_updates;
+ uint32_t peer_zebra_status_updates;
+};
+
+struct pim_mlag_stats {
+ /* message stats are reset when the connection to mlagd flaps */
+ struct pim_mlag_msg_stats msg;
+ uint32_t mlagd_session_downs;
+ uint32_t peer_session_downs;
+ uint32_t peer_zebra_downs;
+};
+
+enum pim_mlag_flags {
+ PIM_MLAGF_NONE = 0,
+ /* connection to the local MLAG daemon is up */
+ PIM_MLAGF_LOCAL_CONN_UP = (1 << 0),
+ /* connection to the MLAG daemon on the peer switch is up. note
+ * that there is no direct connection between FRR and the peer MLAG
+ * daemon. this is just a peer-session status provided by the local
+ * MLAG daemon.
+ */
+ PIM_MLAGF_PEER_CONN_UP = (1 << 1),
+ /* status update rxed from the local daemon */
+ PIM_MLAGF_STATUS_RXED = (1 << 2),
+ /* initial dump of data done post peerlink flap */
+ PIM_MLAGF_PEER_REPLAY_DONE = (1 << 3),
+ /* zebra is up on the peer */
+ PIM_MLAGF_PEER_ZEBRA_UP = (1 << 4)
+};
+
+struct pim_router {
+ struct event_loop *master;
+
+ uint32_t debugs;
+
+ int t_periodic;
+ struct pim_assert_metric infinite_assert_metric;
+ long rpf_cache_refresh_delay_msec;
+ uint32_t register_suppress_time;
+ int packet_process;
+ uint32_t register_probe_time;
+ uint16_t multipath;
+
+ /*
+ * What is the default vrf that we work in
+ */
+ vrf_id_t vrf_id;
+
+ enum mlag_role mlag_role;
+ uint32_t pim_mlag_intf_cnt;
+ /* if true we have registered with MLAG */
+ bool mlag_process_register;
+ /* if true local MLAG process reported that it is connected
+ * with the peer MLAG process
+ */
+ bool connected_to_mlag;
+ /* Holds the client data(unencoded) that need to be pushed to MCLAGD*/
+ struct stream_fifo *mlag_fifo;
+ struct stream *mlag_stream;
+ struct event *zpthread_mlag_write;
+ struct in_addr anycast_vtep_ip;
+ struct in_addr local_vtep_ip;
+ struct pim_mlag_stats mlag_stats;
+ enum pim_mlag_flags mlag_flags;
+ char peerlink_rif[INTERFACE_NAMSIZ];
+ struct interface *peerlink_rif_p;
+};
+
+/* Per VRF PIM DB */
+struct pim_instance {
+ // vrf_id_t vrf_id;
+ struct vrf *vrf;
+
+ struct {
+ enum pim_spt_switchover switchover;
+ char *plist;
+ } spt;
+
+ /* The name of the register-accept prefix-list */
+ char *register_plist;
+
+ struct hash *rpf_hash;
+
+ void *ssm_info; /* per-vrf SSM configuration */
+
+ int send_v6_secondary;
+
+ struct event *thread;
+ int mroute_socket;
+ int reg_sock; /* Socket to send register msg */
+ int64_t mroute_socket_creation;
+ int64_t mroute_add_events;
+ int64_t mroute_add_last;
+ int64_t mroute_del_events;
+ int64_t mroute_del_last;
+
+ struct interface *regiface;
+
+ // List of static routes;
+ struct list *static_routes;
+
+ // Upstream vrf specific information
+ struct rb_pim_upstream_head upstream_head;
+ struct timer_wheel *upstream_sg_wheel;
+
+ /*
+ * RP information
+ */
+ struct list *rp_list;
+ struct route_table *rp_table;
+
+ int iface_vif_index[MAXVIFS];
+ int mcast_if_count;
+
+ struct rb_pim_oil_head channel_oil_head;
+
+ struct pim_msdp msdp;
+ struct pim_vxlan_instance vxlan;
+
+ struct list *ssmpingd_list;
+ pim_addr ssmpingd_group_addr;
+
+ unsigned int gm_socket_if_count;
+ int gm_socket;
+ struct event *t_gm_recv;
+
+ unsigned int gm_group_count;
+ unsigned int gm_watermark_limit;
+ unsigned int keep_alive_time;
+ unsigned int rp_keep_alive_time;
+
+ bool ecmp_enable;
+ bool ecmp_rebalance_enable;
+ /* No. of Dual active I/fs in pim_instance */
+ uint32_t inst_mlag_intf_cnt;
+
+ /* Bsm related */
+ struct bsm_scope global_scope;
+ uint64_t bsm_rcvd;
+ uint64_t bsm_sent;
+ uint64_t bsm_dropped;
+
+ /* If we need to rescan all our upstreams */
+ struct event *rpf_cache_refresher;
+ int64_t rpf_cache_refresh_requests;
+ int64_t rpf_cache_refresh_events;
+ int64_t rpf_cache_refresh_last;
+ int64_t scan_oil_events;
+ int64_t scan_oil_last;
+
+ int64_t nexthop_lookups;
+ int64_t nexthop_lookups_avoided;
+ int64_t last_route_change_time;
+
+ uint64_t gm_rx_drop_sys;
+};
+
+void pim_vrf_init(void);
+void pim_vrf_terminate(void);
+
+extern struct pim_router *router;
+
+struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id);
+
+#endif
diff --git a/pimd/pim_int.c b/pimd/pim_int.c
new file mode 100644
index 0000000..6c98a80
--- /dev/null
+++ b/pimd/pim_int.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include <string.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include "pim_int.h"
+
+uint32_t pim_read_uint32_host(const uint8_t *buf)
+{
+ uint32_t val;
+ memcpy(&val, buf, sizeof(val));
+ /* val is in netorder */
+ val = ntohl(val);
+ /* val is in hostorder */
+ return val;
+}
+
+void pim_write_uint32(uint8_t *buf, uint32_t val_host)
+{
+ /* val_host is in host order */
+ val_host = htonl(val_host);
+ /* val_host is in netorder */
+ memcpy(buf, &val_host, sizeof(val_host));
+}
diff --git a/pimd/pim_int.h b/pimd/pim_int.h
new file mode 100644
index 0000000..9e38672
--- /dev/null
+++ b/pimd/pim_int.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_INT_H
+#define PIM_INT_H
+
+#include <stdint.h>
+
+uint32_t pim_read_uint32_host(const uint8_t *buf);
+void pim_write_uint32(uint8_t *buf, uint32_t val_host);
+
+#endif /* PIM_INT_H */
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
new file mode 100644
index 0000000..671f7a3
--- /dev/null
+++ b/pimd/pim_join.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "if.h"
+#include "vty.h"
+#include "plist.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_str.h"
+#include "pim_tlv.h"
+#include "pim_msg.h"
+#include "pim_pim.h"
+#include "pim_join.h"
+#include "pim_oil.h"
+#include "pim_iface.h"
+#include "pim_hello.h"
+#include "pim_ifchannel.h"
+#include "pim_rpf.h"
+#include "pim_rp.h"
+#include "pim_jp_agg.h"
+#include "pim_util.h"
+#include "pim_ssm.h"
+
+static void on_trace(const char *label, struct interface *ifp, pim_addr src)
+{
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: from %pPA on %s", label, &src, ifp->name);
+}
+
+static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
+ uint16_t holdtime, pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags)
+{
+ struct pim_interface *pim_ifp = NULL;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: join (S,G)=%pSG rpt=%d wc=%d upstream=%pPAs holdtime=%d from %pPA on %s",
+ __func__, sg, !!(source_flags & PIM_RPT_BIT_MASK),
+ !!(source_flags & PIM_WILDCARD_BIT_MASK), &upstream,
+ holdtime, &neigh->source_addr, ifp->name);
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ ++pim_ifp->pim_ifstat_join_recv;
+
+ /*
+ * If the RPT and WC are set it's a (*,G)
+ * and the source is the RP
+ */
+ if (CHECK_FLAG(source_flags, PIM_WILDCARD_BIT_MASK)) {
+ /* As per RFC 7761 Section 4.9.1:
+ * The RPT (or Rendezvous Point Tree) bit is a 1-bit value for
+ * use with PIM Join/Prune messages (see Section 4.9.5.1). If
+ * the WC bit is 1, the RPT bit MUST be 1.
+ */
+ if (!CHECK_FLAG(source_flags, PIM_RPT_BIT_MASK)) {
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "Discarding (*,G)=%pSG join since WC bit is set but RPT bit is unset",
+ sg);
+
+ return;
+ }
+
+ struct pim_rpf *rp = RP(pim_ifp->pim, sg->grp);
+ pim_addr rpf_addr;
+
+ if (!rp) {
+ zlog_warn("%s: Lookup of RP failed for %pSG", __func__,
+ sg);
+ return;
+ }
+ /*
+ * If the RP sent in the message is not
+ * our RP for the group, drop the message
+ */
+ rpf_addr = rp->rpf_addr;
+ if (pim_addr_cmp(sg->src, rpf_addr)) {
+ zlog_warn(
+ "%s: Specified RP(%pPAs) in join is different than our configured RP(%pPAs)",
+ __func__, &sg->src, &rpf_addr);
+ return;
+ }
+
+ if (pim_is_grp_ssm(pim_ifp->pim, sg->grp)) {
+ zlog_warn(
+ "%s: Specified Group(%pPA) in join is now in SSM, not allowed to create PIM state",
+ __func__, &sg->grp);
+ return;
+ }
+
+ sg->src = PIMADDR_ANY;
+ }
+
+ /* Restart join expiry timer */
+ pim_ifchannel_join_add(ifp, neigh->source_addr, upstream, sg,
+ source_flags, holdtime);
+}
+
+static void recv_prune(struct interface *ifp, struct pim_neighbor *neigh,
+ uint16_t holdtime, pim_addr upstream, pim_sgaddr *sg,
+ uint8_t source_flags)
+{
+ struct pim_interface *pim_ifp = NULL;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: prune (S,G)=%pSG rpt=%d wc=%d upstream=%pPAs holdtime=%d from %pPA on %s",
+ __func__, sg, source_flags & PIM_RPT_BIT_MASK,
+ source_flags & PIM_WILDCARD_BIT_MASK, &upstream,
+ holdtime, &neigh->source_addr, ifp->name);
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ ++pim_ifp->pim_ifstat_prune_recv;
+
+ if (CHECK_FLAG(source_flags, PIM_WILDCARD_BIT_MASK)) {
+ /* As per RFC 7761 Section 4.9.1:
+ * The RPT (or Rendezvous Point Tree) bit is a 1-bit value for
+ * use with PIM Join/Prune messages (see Section 4.9.5.1). If
+ * the WC bit is 1, the RPT bit MUST be 1.
+ */
+ if (!CHECK_FLAG(source_flags, PIM_RPT_BIT_MASK)) {
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "Discarding (*,G)=%pSG prune since WC bit is set but RPT bit is unset",
+ sg);
+
+ return;
+ }
+
+ /*
+ * RFC 4601 Section 4.5.2:
+ * Received Prune(*,G) messages are processed even if the
+ * RP in the message does not match RP(G).
+ */
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: Prune received with RP(%pPAs) for %pSG",
+ __func__, &sg->src, sg);
+
+ sg->src = PIMADDR_ANY;
+ }
+
+ pim_ifchannel_prune(ifp, upstream, sg, source_flags, holdtime);
+}
+
+int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size)
+{
+ pim_addr msg_upstream_addr;
+ bool wrong_af = false;
+ struct pim_interface *pim_ifp;
+ uint8_t msg_num_groups;
+ uint16_t msg_holdtime;
+ int addr_offset;
+ uint8_t *buf;
+ uint8_t *pastend;
+ int remain;
+ int group;
+ struct pim_ifchannel *child = NULL;
+ struct listnode *ch_node, *nch_node;
+
+ buf = tlv_buf;
+ pastend = tlv_buf + tlv_buf_size;
+ pim_ifp = ifp->info;
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+ /*
+ Parse ucast addr
+ */
+ addr_offset = pim_parse_addr_ucast(&msg_upstream_addr, buf,
+ pastend - buf, &wrong_af);
+ if (addr_offset < 1) {
+ zlog_warn("%s: pim_parse_addr_ucast() failure: from %pPA on %s",
+ __func__, &src_addr, ifp->name);
+ return -1;
+ }
+ buf += addr_offset;
+
+ /*
+ Check upstream address family
+ */
+ if (wrong_af) {
+ zlog_warn(
+ "%s: ignoring join/prune directed to unexpected addr family from %pPA on %s",
+ __func__, &src_addr, ifp->name);
+ return -2;
+ }
+
+ remain = pastend - buf;
+ if (remain < 4) {
+ zlog_warn(
+ "%s: short join/prune message buffer for group list: size=%d minimum=%d from %pPA on %s",
+ __func__, remain, 4, &src_addr, ifp->name);
+ return -4;
+ }
+
+ ++buf; /* skip reserved byte */
+ msg_num_groups = *(const uint8_t *)buf;
+ ++buf;
+ msg_holdtime = ntohs(*(const uint16_t *)buf);
+ ++buf;
+ ++buf;
+
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "%s: join/prune upstream=%pPAs groups=%d holdtime=%d from %pPA on %s",
+ __func__, &msg_upstream_addr, msg_num_groups,
+ msg_holdtime, &src_addr, ifp->name);
+
+ /* Scan groups */
+ for (group = 0; group < msg_num_groups; ++group) {
+ pim_sgaddr sg;
+ uint8_t msg_source_flags;
+ uint16_t msg_num_joined_sources;
+ uint16_t msg_num_pruned_sources;
+ int source;
+ struct pim_ifchannel *starg_ch = NULL, *sg_ch = NULL;
+ bool filtered = false;
+
+ memset(&sg, 0, sizeof(sg));
+ addr_offset = pim_parse_addr_group(&sg, buf, pastend - buf);
+ if (addr_offset < 1) {
+ return -5;
+ }
+ buf += addr_offset;
+
+ remain = pastend - buf;
+ if (remain < 4) {
+ zlog_warn(
+ "%s: short join/prune buffer for source list: size=%d minimum=%d from %pPA on %s",
+ __func__, remain, 4, &src_addr, ifp->name);
+ return -6;
+ }
+
+ msg_num_joined_sources = ntohs(*(const uint16_t *)buf);
+ buf += 2;
+ msg_num_pruned_sources = ntohs(*(const uint16_t *)buf);
+ buf += 2;
+
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "%s: join/prune upstream=%pPAs group=%pPA/32 join_src=%d prune_src=%d from %pPA on %s",
+ __func__, &msg_upstream_addr, &sg.grp,
+ msg_num_joined_sources, msg_num_pruned_sources,
+ &src_addr, ifp->name);
+
+ /* boundary check */
+ filtered = pim_is_group_filtered(pim_ifp, &sg.grp);
+
+ /* Scan joined sources */
+ for (source = 0; source < msg_num_joined_sources; ++source) {
+ addr_offset = pim_parse_addr_source(
+ &sg, &msg_source_flags, buf, pastend - buf);
+ if (addr_offset < 1) {
+ return -7;
+ }
+
+ buf += addr_offset;
+
+ /* if we are filtering this group, skip the join */
+ if (filtered)
+ continue;
+
+ recv_join(ifp, neigh, msg_holdtime, msg_upstream_addr,
+ &sg, msg_source_flags);
+
+ if (pim_addr_is_any(sg.src)) {
+ starg_ch = pim_ifchannel_find(ifp, &sg);
+ if (starg_ch)
+ pim_ifchannel_set_star_g_join_state(
+ starg_ch, 0, 1);
+ }
+ }
+
+ /* Scan pruned sources */
+ for (source = 0; source < msg_num_pruned_sources; ++source) {
+ addr_offset = pim_parse_addr_source(
+ &sg, &msg_source_flags, buf, pastend - buf);
+ if (addr_offset < 1) {
+ return -8;
+ }
+
+ buf += addr_offset;
+
+ /* if we are filtering this group, skip the prune */
+ if (filtered)
+ continue;
+
+ recv_prune(ifp, neigh, msg_holdtime, msg_upstream_addr,
+ &sg, msg_source_flags);
+ /*
+ * So if we are receiving a S,G,RPT prune
+ * before we have any data for that S,G
+ * We need to retrieve the sg_ch after
+ * we parse the prune.
+ */
+ sg_ch = pim_ifchannel_find(ifp, &sg);
+
+ if (!sg_ch)
+ continue;
+
+ /* (*,G) prune received */
+ for (ALL_LIST_ELEMENTS(sg_ch->sources, ch_node,
+ nch_node, child)) {
+ if (PIM_IF_FLAG_TEST_S_G_RPT(child->flags)) {
+ if (child->ifjoin_state
+ == PIM_IFJOIN_PRUNE_PENDING_TMP)
+ EVENT_OFF(
+ child->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(child->t_ifjoin_expiry_timer);
+ PIM_IF_FLAG_UNSET_S_G_RPT(child->flags);
+ child->ifjoin_state = PIM_IFJOIN_NOINFO;
+ delete_on_noinfo(child);
+ }
+ }
+
+ /* Received SG-RPT Prune delete oif from specific S,G */
+ if (starg_ch && (msg_source_flags & PIM_RPT_BIT_MASK)
+ && !(msg_source_flags & PIM_WILDCARD_BIT_MASK)) {
+ struct pim_upstream *up = sg_ch->upstream;
+ PIM_IF_FLAG_SET_S_G_RPT(sg_ch->flags);
+ if (up) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: SGRpt flag is set, del inherit oif from up %s",
+ __func__, up->sg_str);
+ pim_channel_del_inherited_oif(
+ up->channel_oil,
+ starg_ch->interface,
+ __func__);
+ }
+ }
+ }
+ if (starg_ch && !filtered)
+ pim_ifchannel_set_star_g_join_state(starg_ch, 1, 0);
+ starg_ch = NULL;
+ } /* scan groups */
+
+ return 0;
+}
+
+/*
+ * J/P Message Format
+ *
+ * While the RFC clearly states that this is 32 bits wide, it
+ * is cheating. These fields:
+ * Encoded-Unicast format (6 bytes MIN)
+ * Encoded-Group format (8 bytes MIN)
+ * Encoded-Source format (8 bytes MIN)
+ * are *not* 32 bits wide.
+ *
+ * Nor does the RFC explicitly call out the size for:
+ * Reserved (1 byte)
+ * Num Groups (1 byte)
+ * Holdtime (2 bytes)
+ * Number of Joined Sources (2 bytes)
+ * Number of Pruned Sources (2 bytes)
+ *
+ * This leads to a missleading representation from casual
+ * reading and making assumptions. Be careful!
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type | Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Upstream Neighbor Address (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Num groups | Holdtime |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Multicast Group Address 1 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Number of Joined Sources | Number of Pruned Sources |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Joined Source Address 1 (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Joined Source Address n (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Pruned Source Address 1 (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Pruned Source Address n (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Multicast Group Address m (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Number of Joined Sources | Number of Pruned Sources |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Joined Source Address 1 (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Joined Source Address n (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Pruned Source Address 1 (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Pruned Source Address n (Encoded-Source format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
+{
+ struct pim_jp_agg_group *group;
+ struct pim_interface *pim_ifp = NULL;
+ struct pim_jp_groups *grp = NULL;
+ struct pim_jp *msg = NULL;
+ struct listnode *node, *nnode;
+ uint8_t pim_msg[10000];
+ uint8_t *curr_ptr = pim_msg;
+ bool new_packet = true;
+ size_t packet_left = 0;
+ size_t packet_size = 0;
+ size_t group_size = 0;
+
+ if (rpf->source_nexthop.interface)
+ pim_ifp = rpf->source_nexthop.interface->info;
+ else {
+ zlog_warn("%s: RPF interface is not present", __func__);
+ return -1;
+ }
+
+
+ on_trace(__func__, rpf->source_nexthop.interface, rpf->rpf_addr);
+
+ if (!pim_ifp) {
+ zlog_warn("%s: multicast not enabled on interface %s", __func__,
+ rpf->source_nexthop.interface->name);
+ return -1;
+ }
+
+ if (pim_addr_is_any(rpf->rpf_addr)) {
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "%s: upstream=%pPA is myself on interface %s",
+ __func__, &rpf->rpf_addr,
+ rpf->source_nexthop.interface->name);
+ return 0;
+ }
+
+ /*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Thus, if a router needs to send a Join/Prune or Assert message on
+ an interface on which it has not yet sent a Hello message with the
+ currently configured IP address, then it MUST immediately send the
+ relevant Hello message without waiting for the Hello Timer to
+ expire, followed by the Join/Prune or Assert message.
+ */
+ pim_hello_require(rpf->source_nexthop.interface);
+
+ for (ALL_LIST_ELEMENTS(groups, node, nnode, group)) {
+ if (new_packet) {
+ msg = (struct pim_jp *)pim_msg;
+
+ memset(msg, 0, sizeof(*msg));
+
+ pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
+ rpf->rpf_addr);
+ msg->reserved = 0;
+ msg->holdtime = htons(PIM_JP_HOLDTIME);
+
+ new_packet = false;
+
+ grp = &msg->groups[0];
+ curr_ptr = (uint8_t *)grp;
+ packet_size = sizeof(struct pim_msg_header);
+ packet_size += sizeof(pim_encoded_unicast);
+ packet_size +=
+ 4; // reserved (1) + groups (1) + holdtime (2)
+
+ packet_left = rpf->source_nexthop.interface->mtu - 24;
+ packet_left -= packet_size;
+ }
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "%s: sending (G)=%pPAs to upstream=%pPA on interface %s",
+ __func__, &group->group, &rpf->rpf_addr,
+ rpf->source_nexthop.interface->name);
+
+ group_size = pim_msg_get_jp_group_size(group->sources);
+ if (group_size > packet_left) {
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
+ if (pim_msg_send(pim_ifp->pim_sock_fd,
+ pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
+ rpf->source_nexthop.interface)) {
+ zlog_warn(
+ "%s: could not send PIM message on interface %s",
+ __func__,
+ rpf->source_nexthop.interface->name);
+ }
+
+ msg = (struct pim_jp *)pim_msg;
+ memset(msg, 0, sizeof(*msg));
+
+ pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
+ rpf->rpf_addr);
+ msg->reserved = 0;
+ msg->holdtime = htons(PIM_JP_HOLDTIME);
+
+ new_packet = false;
+
+ grp = &msg->groups[0];
+ curr_ptr = (uint8_t *)grp;
+ packet_size = sizeof(struct pim_msg_header);
+ packet_size += sizeof(pim_encoded_unicast);
+ packet_size +=
+ 4; // reserved (1) + groups (1) + holdtime (2)
+
+ packet_left = rpf->source_nexthop.interface->mtu - 24;
+ packet_left -= packet_size;
+ }
+
+ msg->num_groups++;
+ /*
+ Build PIM message
+ */
+
+ curr_ptr += group_size;
+ packet_left -= group_size;
+ packet_size += group_size;
+ pim_msg_build_jp_groups(grp, group, group_size);
+
+ if (!pim_ifp->pim_passive_enable) {
+ pim_ifp->pim_ifstat_join_send += ntohs(grp->joins);
+ pim_ifp->pim_ifstat_prune_send += ntohs(grp->prunes);
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: interface %s num_joins %u num_prunes %u",
+ __func__, rpf->source_nexthop.interface->name,
+ ntohs(grp->joins), ntohs(grp->prunes));
+
+ grp = (struct pim_jp_groups *)curr_ptr;
+ if (packet_left < sizeof(struct pim_jp_groups)
+ || msg->num_groups == 255) {
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
+ if (pim_msg_send(pim_ifp->pim_sock_fd,
+ pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
+ rpf->source_nexthop.interface)) {
+ zlog_warn(
+ "%s: could not send PIM message on interface %s",
+ __func__,
+ rpf->source_nexthop.interface->name);
+ }
+
+ new_packet = true;
+ }
+ }
+
+
+ if (!new_packet) {
+ // msg->num_groups = htons (msg->num_groups);
+ pim_msg_build_header(
+ pim_ifp->primary_address, qpim_all_pim_routers_addr,
+ pim_msg, packet_size, PIM_MSG_TYPE_JOIN_PRUNE, false);
+ if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size, rpf->source_nexthop.interface)) {
+ zlog_warn(
+ "%s: could not send PIM message on interface %s",
+ __func__, rpf->source_nexthop.interface->name);
+ }
+ }
+ return 0;
+}
diff --git a/pimd/pim_join.h b/pimd/pim_join.h
new file mode 100644
index 0000000..a28d805
--- /dev/null
+++ b/pimd/pim_join.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_JOIN_H
+#define PIM_JOIN_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+#include "pim_neighbor.h"
+
+int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size);
+
+int pim_joinprune_send(struct pim_rpf *nexthop, struct list *groups);
+
+#endif /* PIM_JOIN_H */
diff --git a/pimd/pim_jp_agg.c b/pimd/pim_jp_agg.c
new file mode 100644
index 0000000..40332ed
--- /dev/null
+++ b/pimd/pim_jp_agg.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRR - J/P Aggregation
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#include <zebra.h>
+
+#include "linklist.h"
+#include "log.h"
+#include "vrf.h"
+#include "if.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_msg.h"
+#include "pim_jp_agg.h"
+#include "pim_join.h"
+#include "pim_iface.h"
+
+void pim_jp_agg_group_list_free(struct pim_jp_agg_group *jag)
+{
+ list_delete(&jag->sources);
+
+ XFREE(MTYPE_PIM_JP_AGG_GROUP, jag);
+}
+
+static void pim_jp_agg_src_free(struct pim_jp_sources *js)
+{
+ struct pim_upstream *up = js->up;
+
+ /*
+ * When we are being called here, we know
+ * that the neighbor is going away start
+ * the normal j/p timer so that it can
+ * pick this shit back up when the
+ * nbr comes back alive
+ */
+ if (up)
+ join_timer_start(js->up);
+ XFREE(MTYPE_PIM_JP_AGG_SOURCE, js);
+}
+
+int pim_jp_agg_group_list_cmp(void *arg1, void *arg2)
+{
+ const struct pim_jp_agg_group *jag1 =
+ (const struct pim_jp_agg_group *)arg1;
+ const struct pim_jp_agg_group *jag2 =
+ (const struct pim_jp_agg_group *)arg2;
+
+ return pim_addr_cmp(jag1->group, jag2->group);
+}
+
+static int pim_jp_agg_src_cmp(void *arg1, void *arg2)
+{
+ const struct pim_jp_sources *js1 = (const struct pim_jp_sources *)arg1;
+ const struct pim_jp_sources *js2 = (const struct pim_jp_sources *)arg2;
+
+ if (js1->is_join && !js2->is_join)
+ return -1;
+
+ if (!js1->is_join && js2->is_join)
+ return 1;
+
+ return pim_addr_cmp(js1->up->sg.src, js2->up->sg.src);
+}
+
+/*
+ * This function is used by scan_oil to clear
+ * the created jp_agg_group created when
+ * figuring out where to send prunes
+ * and joins.
+ */
+void pim_jp_agg_clear_group(struct list *group)
+{
+ struct listnode *gnode, *gnnode;
+ struct listnode *snode, *snnode;
+ struct pim_jp_agg_group *jag;
+ struct pim_jp_sources *js;
+
+ for (ALL_LIST_ELEMENTS(group, gnode, gnnode, jag)) {
+ for (ALL_LIST_ELEMENTS(jag->sources, snode, snnode, js)) {
+ listnode_delete(jag->sources, js);
+ js->up = NULL;
+ XFREE(MTYPE_PIM_JP_AGG_SOURCE, js);
+ }
+ list_delete(&jag->sources);
+ listnode_delete(group, jag);
+ XFREE(MTYPE_PIM_JP_AGG_GROUP, jag);
+ }
+}
+
+static struct pim_iface_upstream_switch *
+pim_jp_agg_get_interface_upstream_switch_list(struct pim_rpf *rpf)
+{
+ struct interface *ifp = rpf->source_nexthop.interface;
+ struct pim_interface *pim_ifp;
+ struct pim_iface_upstream_switch *pius;
+ struct listnode *node, *nnode;
+
+ if (!ifp)
+ return NULL;
+
+ pim_ifp = ifp->info;
+
+ /* Old interface is pim disabled */
+ if (!pim_ifp)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->upstream_switch_list, node, nnode,
+ pius)) {
+ if (!pim_addr_cmp(pius->address, rpf->rpf_addr))
+ break;
+ }
+
+ if (!pius) {
+ pius = XCALLOC(MTYPE_PIM_JP_AGG_GROUP,
+ sizeof(struct pim_iface_upstream_switch));
+ pius->address = rpf->rpf_addr;
+ pius->us = list_new();
+ listnode_add_sort(pim_ifp->upstream_switch_list, pius);
+ }
+
+ return pius;
+}
+
+void pim_jp_agg_remove_group(struct list *group, struct pim_upstream *up,
+ struct pim_neighbor *nbr)
+{
+ struct listnode *node, *nnode;
+ struct pim_jp_agg_group *jag = NULL;
+ struct pim_jp_sources *js = NULL;
+
+ for (ALL_LIST_ELEMENTS(group, node, nnode, jag)) {
+ if (!pim_addr_cmp(jag->group, up->sg.grp))
+ break;
+ }
+
+ if (!jag)
+ return;
+
+ for (ALL_LIST_ELEMENTS(jag->sources, node, nnode, js)) {
+ if (js->up == up)
+ break;
+ }
+
+ if (nbr) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("up %s remove from nbr %s/%pPAs jp-agg-list",
+ up->sg_str, nbr->interface->name,
+ &nbr->source_addr);
+ }
+
+ if (js) {
+ js->up = NULL;
+ listnode_delete(jag->sources, js);
+ XFREE(MTYPE_PIM_JP_AGG_SOURCE, js);
+ }
+
+ if (jag->sources->count == 0) {
+ list_delete(&jag->sources);
+ listnode_delete(group, jag);
+ XFREE(MTYPE_PIM_JP_AGG_GROUP, jag);
+ }
+}
+
+int pim_jp_agg_is_in_list(struct list *group, struct pim_upstream *up)
+{
+ struct listnode *node, *nnode;
+ struct pim_jp_agg_group *jag = NULL;
+ struct pim_jp_sources *js = NULL;
+
+ for (ALL_LIST_ELEMENTS(group, node, nnode, jag)) {
+ if (!pim_addr_cmp(jag->group, up->sg.grp))
+ break;
+ }
+
+ if (!jag)
+ return 0;
+
+ for (ALL_LIST_ELEMENTS(jag->sources, node, nnode, js)) {
+ if (js->up == up)
+ return 1;
+ }
+
+ return 0;
+}
+
+//#define PIM_JP_AGG_DEBUG 1
+/*
+ * For the given upstream, check all the neighbor
+ * jp_agg lists and ensure that it is not
+ * in another list
+ *
+ * *IF* ignore is true we can skip
+ * up->rpf.source_nexthop.interface particular interface for checking
+ *
+ * This is a debugging function, Probably
+ * can be safely compiled out in real
+ * builds
+ */
+void pim_jp_agg_upstream_verification(struct pim_upstream *up, bool ignore)
+{
+#ifdef PIM_JP_AGG_DEBUG
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return;
+ }
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ pim = pim_ifp->pim;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ struct listnode *nnode;
+
+ if (ignore && ifp == up->rpf.source_nexthop.interface)
+ continue;
+
+ if (pim_ifp) {
+ struct pim_neighbor *neigh;
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list,
+ nnode, neigh)) {
+ assert(!pim_jp_agg_is_in_list(
+ neigh->upstream_jp_agg, up));
+ }
+ }
+ }
+#else
+ return;
+#endif
+}
+
+void pim_jp_agg_add_group(struct list *group, struct pim_upstream *up,
+ bool is_join, struct pim_neighbor *nbr)
+{
+ struct listnode *node, *nnode;
+ struct pim_jp_agg_group *jag = NULL;
+ struct pim_jp_sources *js = NULL;
+
+ for (ALL_LIST_ELEMENTS(group, node, nnode, jag)) {
+ if (!pim_addr_cmp(jag->group, up->sg.grp))
+ break;
+ }
+
+ if (!jag) {
+ jag = XCALLOC(MTYPE_PIM_JP_AGG_GROUP,
+ sizeof(struct pim_jp_agg_group));
+ jag->group = up->sg.grp;
+ jag->sources = list_new();
+ jag->sources->cmp = pim_jp_agg_src_cmp;
+ jag->sources->del = (void (*)(void *))pim_jp_agg_src_free;
+ listnode_add_sort(group, jag);
+ }
+
+ for (ALL_LIST_ELEMENTS(jag->sources, node, nnode, js)) {
+ if (js->up == up)
+ break;
+ }
+
+ if (nbr) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("up %s add to nbr %s/%pPAs jp-agg-list",
+ up->sg_str,
+ up->rpf.source_nexthop.interface->name,
+ &nbr->source_addr);
+ }
+
+ if (!js) {
+ js = XCALLOC(MTYPE_PIM_JP_AGG_SOURCE,
+ sizeof(struct pim_jp_sources));
+ js->up = up;
+ js->is_join = is_join;
+ listnode_add_sort(jag->sources, js);
+ } else {
+ if (js->is_join != is_join) {
+ listnode_delete(jag->sources, js);
+ js->is_join = is_join;
+ listnode_add_sort(jag->sources, js);
+ }
+ }
+}
+
+void pim_jp_agg_switch_interface(struct pim_rpf *orpf, struct pim_rpf *nrpf,
+ struct pim_upstream *up)
+{
+ struct pim_iface_upstream_switch *opius;
+ struct pim_iface_upstream_switch *npius;
+
+ opius = pim_jp_agg_get_interface_upstream_switch_list(orpf);
+ npius = pim_jp_agg_get_interface_upstream_switch_list(nrpf);
+
+ /*
+ * RFC 4601: 4.5.7. Sending (S,G) Join/Prune Messages
+ *
+ * Transitions from Joined State
+ *
+ * RPF'(S,G) changes not due to an Assert
+ *
+ * The upstream (S,G) state machine remains in Joined
+ * state. Send Join(S,G) to the new upstream neighbor, which is
+ * the new value of RPF'(S,G). Send Prune(S,G) to the old
+ * upstream neighbor, which is the old value of RPF'(S,G). Set
+ * the Join Timer (JT) to expire after t_periodic seconds.
+ */
+
+ /* send Prune(S,G) to the old upstream neighbor */
+ if (opius)
+ pim_jp_agg_add_group(opius->us, up, false, NULL);
+
+ /* send Join(S,G) to the current upstream neighbor */
+ if (npius)
+ pim_jp_agg_add_group(npius->us, up, true, NULL);
+}
+
+
+void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf,
+ struct pim_upstream *up, bool is_join)
+{
+ struct list groups, sources;
+ struct pim_jp_agg_group jag;
+ struct pim_jp_sources js;
+
+ /* skip JP upstream messages if source is directly connected */
+ if (!up || !rpf->source_nexthop.interface ||
+ pim_if_connected_to_source(rpf->source_nexthop.interface,
+ up->sg.src) ||
+ if_is_loopback(rpf->source_nexthop.interface))
+ return;
+
+ memset(&groups, 0, sizeof(groups));
+ memset(&sources, 0, sizeof(sources));
+ jag.sources = &sources;
+
+ listnode_add(&groups, &jag);
+ listnode_add(jag.sources, &js);
+
+ jag.group = up->sg.grp;
+ js.up = up;
+ js.is_join = is_join;
+
+ pim_joinprune_send(rpf, &groups);
+
+ list_delete_all_node(jag.sources);
+ list_delete_all_node(&groups);
+}
diff --git a/pimd/pim_jp_agg.h b/pimd/pim_jp_agg.h
new file mode 100644
index 0000000..926d211
--- /dev/null
+++ b/pimd/pim_jp_agg.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for FRR - J/P Aggregation
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#ifndef __PIM_JP_AGG_H__
+#define __PIM_JP_AGG_H__
+
+#include "pim_rpf.h"
+
+struct pim_jp_sources {
+ struct pim_upstream *up;
+ int is_join;
+};
+
+struct pim_jp_agg_group {
+ pim_addr group;
+ struct list *sources;
+};
+
+void pim_jp_agg_upstream_verification(struct pim_upstream *up, bool ignore);
+int pim_jp_agg_is_in_list(struct list *group, struct pim_upstream *up);
+
+void pim_jp_agg_group_list_free(struct pim_jp_agg_group *jag);
+int pim_jp_agg_group_list_cmp(void *arg1, void *arg2);
+
+void pim_jp_agg_clear_group(struct list *group);
+void pim_jp_agg_remove_group(struct list *group, struct pim_upstream *up,
+ struct pim_neighbor *nbr);
+
+void pim_jp_agg_add_group(struct list *group, struct pim_upstream *up,
+ bool is_join, struct pim_neighbor *nbr);
+
+void pim_jp_agg_switch_interface(struct pim_rpf *orpf, struct pim_rpf *nrpf,
+ struct pim_upstream *up);
+
+void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf,
+ struct pim_upstream *up, bool is_join);
+#endif
diff --git a/pimd/pim_macro.c b/pimd/pim_macro.c
new file mode 100644
index 0000000..2690fca
--- /dev/null
+++ b/pimd/pim_macro.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_macro.h"
+#include "pim_iface.h"
+#include "pim_ifchannel.h"
+#include "pim_rp.h"
+
+/*
+ DownstreamJPState(S,G,I) is the per-interface state machine for
+ receiving (S,G) Join/Prune messages.
+
+ DownstreamJPState(S,G,I) is either Join or Prune-Pending
+ DownstreamJPState(*,G,I) is either Join or Prune-Pending
+*/
+static int downstream_jpstate_isjoined(const struct pim_ifchannel *ch)
+{
+ switch (ch->ifjoin_state) {
+ case PIM_IFJOIN_NOINFO:
+ case PIM_IFJOIN_PRUNE:
+ case PIM_IFJOIN_PRUNE_TMP:
+ case PIM_IFJOIN_PRUNE_PENDING_TMP:
+ return 0;
+ case PIM_IFJOIN_JOIN:
+ case PIM_IFJOIN_PRUNE_PENDING:
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ The clause "local_receiver_include(S,G,I)" is true if the IGMP/MLD
+ module or other local membership mechanism has determined that local
+ members on interface I desire to receive traffic sent specifically
+ by S to G.
+*/
+static int local_receiver_include(const struct pim_ifchannel *ch)
+{
+ /* local_receiver_include(S,G,I) ? */
+ return ch->local_ifmembership == PIM_IFMEMBERSHIP_INCLUDE;
+}
+
+/*
+ RFC 4601: 4.1.6. State Summarization Macros
+
+ The set "joins(S,G)" is the set of all interfaces on which the
+ router has received (S,G) Joins:
+
+ joins(S,G) =
+ { all interfaces I such that
+ DownstreamJPState(S,G,I) is either Join or Prune-Pending }
+
+ DownstreamJPState(S,G,I) is either Join or Prune-Pending ?
+*/
+int pim_macro_chisin_joins(const struct pim_ifchannel *ch)
+{
+ return downstream_jpstate_isjoined(ch);
+}
+
+/*
+ RFC 4601: 4.6.5. Assert State Macros
+
+ The set "lost_assert(S,G)" is the set of all interfaces on which the
+ router has received (S,G) joins but has lost an (S,G) assert.
+
+ lost_assert(S,G) =
+ { all interfaces I such that
+ lost_assert(S,G,I) == true }
+
+ bool lost_assert(S,G,I) {
+ if ( RPF_interface(S) == I ) {
+ return false
+ } else {
+ return ( AssertWinner(S,G,I) != NULL AND
+ AssertWinner(S,G,I) != me AND
+ (AssertWinnerMetric(S,G,I) is better
+ than spt_assert_metric(S,I) )
+ }
+ }
+
+ AssertWinner(S,G,I) is the IP source address of the Assert(S,G)
+ packet that won an Assert.
+*/
+int pim_macro_ch_lost_assert(const struct pim_ifchannel *ch)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct pim_assert_metric spt_assert_metric;
+
+ ifp = ch->interface;
+ if (!ifp) {
+ zlog_warn("%s: (S,G)=%s: null interface", __func__, ch->sg_str);
+ return 0; /* false */
+ }
+
+ /* RPF_interface(S) == I ? */
+ if (ch->upstream->rpf.source_nexthop.interface == ifp)
+ return 0; /* false */
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: (S,G)=%s: multicast not enabled on interface %s",
+ __func__, ch->sg_str, ifp->name);
+ return 0; /* false */
+ }
+
+ if (pim_addr_is_any(ch->ifassert_winner))
+ return 0; /* false */
+
+ /* AssertWinner(S,G,I) == me ? */
+ if (!pim_addr_cmp(ch->ifassert_winner, pim_ifp->primary_address))
+ return 0; /* false */
+
+ spt_assert_metric = pim_macro_spt_assert_metric(
+ &ch->upstream->rpf, pim_ifp->primary_address);
+
+ return pim_assert_metric_better(&ch->ifassert_winner_metric,
+ &spt_assert_metric);
+}
+
+/*
+ RFC 4601: 4.1.6. State Summarization Macros
+
+ pim_include(S,G) =
+ { all interfaces I such that:
+ ( (I_am_DR( I ) AND lost_assert(S,G,I) == false )
+ OR AssertWinner(S,G,I) == me )
+ AND local_receiver_include(S,G,I) }
+
+ AssertWinner(S,G,I) is the IP source address of the Assert(S,G)
+ packet that won an Assert.
+*/
+int pim_macro_chisin_pim_include(const struct pim_ifchannel *ch)
+{
+ struct pim_interface *pim_ifp = ch->interface->info;
+ bool mlag_active = false;
+
+ if (!pim_ifp) {
+ zlog_warn("%s: (S,G)=%s: multicast not enabled on interface %s",
+ __func__, ch->sg_str, ch->interface->name);
+ return 0; /* false */
+ }
+
+ /* local_receiver_include(S,G,I) ? */
+ if (!local_receiver_include(ch))
+ return 0; /* false */
+
+ /* OR AssertWinner(S,G,I) == me ? */
+ if (!pim_addr_cmp(ch->ifassert_winner, pim_ifp->primary_address))
+ return 1; /* true */
+
+ /*
+ * When we have a activeactive interface we need to signal
+ * that this interface is interesting to the upstream
+ * decision to JOIN *if* we are syncing over the interface
+ */
+ if (pim_ifp->activeactive) {
+ struct pim_upstream *up = ch->upstream;
+
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags))
+ mlag_active = true;
+ }
+
+ return (
+ /* I_am_DR( I ) ? */
+ (PIM_I_am_DR(pim_ifp) || mlag_active) &&
+ /* lost_assert(S,G,I) == false ? */
+ (!pim_macro_ch_lost_assert(ch)));
+}
+
+int pim_macro_chisin_joins_or_include(const struct pim_ifchannel *ch)
+{
+ if (pim_macro_chisin_joins(ch))
+ return 1; /* true */
+
+ return pim_macro_chisin_pim_include(ch);
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ CouldAssert(S,G,I) =
+ SPTbit(S,G)==TRUE
+ AND (RPF_interface(S) != I)
+ AND (I in ( ( joins(*,*,RP(G)) (+) joins(*,G) (-) prunes(S,G,rpt) )
+ (+) ( pim_include(*,G) (-) pim_exclude(S,G) )
+ (-) lost_assert(*,G)
+ (+) joins(S,G) (+) pim_include(S,G) ) )
+
+ CouldAssert(S,G,I) is true for downstream interfaces that would be in
+ the inherited_olist(S,G) if (S,G) assert information was not taken
+ into account.
+
+ CouldAssert(S,G,I) may be affected by changes in the following:
+
+ pim_ifp->primary_address
+ pim_ifp->pim_dr_addr
+ ch->ifassert_winner_metric
+ ch->ifassert_winner
+ ch->local_ifmembership
+ ch->ifjoin_state
+ ch->upstream->rpf.source_nexthop.mrib_metric_preference
+ ch->upstream->rpf.source_nexthop.mrib_route_metric
+ ch->upstream->rpf.source_nexthop.interface
+*/
+int pim_macro_ch_could_assert_eval(const struct pim_ifchannel *ch)
+{
+ struct interface *ifp;
+
+ ifp = ch->interface;
+ if (!ifp) {
+ zlog_warn("%s: (S,G)=%s: null interface", __func__, ch->sg_str);
+ return 0; /* false */
+ }
+
+ /* SPTbit(S,G) == true */
+ if (ch->upstream->sptbit == PIM_UPSTREAM_SPTBIT_FALSE)
+ return 0; /* false */
+
+ /* RPF_interface(S) != I ? */
+ if (ch->upstream->rpf.source_nexthop.interface == ifp)
+ return 0; /* false */
+
+ /* I in joins(S,G) (+) pim_include(S,G) ? */
+ return pim_macro_chisin_joins_or_include(ch);
+}
+
+/*
+ RFC 4601: 4.6.3. Assert Metrics
+
+ spt_assert_metric(S,I) gives the assert metric we use if we're
+ sending an assert based on active (S,G) forwarding state:
+
+ assert_metric
+ spt_assert_metric(S,I) {
+ return {0,MRIB.pref(S),MRIB.metric(S),my_ip_address(I)}
+ }
+*/
+struct pim_assert_metric pim_macro_spt_assert_metric(const struct pim_rpf *rpf,
+ pim_addr ifaddr)
+{
+ struct pim_assert_metric metric;
+
+ metric.rpt_bit_flag = 0;
+ metric.metric_preference = rpf->source_nexthop.mrib_metric_preference;
+ metric.route_metric = rpf->source_nexthop.mrib_route_metric;
+ metric.ip_address = ifaddr;
+
+ return metric;
+}
+
+/*
+ RFC 4601: 4.6.3. Assert Metrics
+
+ An assert metric for (S,G) to include in (or compare against) an
+ Assert message sent on interface I should be computed using the
+ following pseudocode:
+
+ assert_metric my_assert_metric(S,G,I) {
+ if( CouldAssert(S,G,I) == true ) {
+ return spt_assert_metric(S,I)
+ } else if( CouldAssert(*,G,I) == true ) {
+ return rpt_assert_metric(G,I)
+ } else {
+ return infinite_assert_metric()
+ }
+ }
+*/
+struct pim_assert_metric
+pim_macro_ch_my_assert_metric_eval(const struct pim_ifchannel *ch)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ch->interface->info;
+
+ if (pim_ifp) {
+ if (PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags)) {
+ return pim_macro_spt_assert_metric(
+ &ch->upstream->rpf, pim_ifp->primary_address);
+ }
+ }
+
+ return router->infinite_assert_metric;
+}
+
+/*
+ RFC 4601 4.2. Data Packet Forwarding Rules
+
+ Macro:
+ inherited_olist(S,G) =
+ inherited_olist(S,G,rpt) (+)
+ joins(S,G) (+) pim_include(S,G) (-) lost_assert(S,G)
+*/
+static int pim_macro_chisin_inherited_olist(const struct pim_ifchannel *ch)
+{
+ if (pim_macro_ch_lost_assert(ch))
+ return 0; /* false */
+
+ return pim_macro_chisin_joins_or_include(ch);
+}
+
+/*
+ RFC 4601 4.2. Data Packet Forwarding Rules
+ RFC 4601 4.8.2. PIM-SSM-Only Routers
+
+ Additionally, the Packet forwarding rules of Section 4.2 can be
+ simplified in a PIM-SSM-only router:
+
+ iif is the incoming interface of the packet.
+ oiflist = NULL
+ if (iif == RPF_interface(S) AND UpstreamJPState(S,G) == Joined) {
+ oiflist = inherited_olist(S,G)
+ } else if (iif is in inherited_olist(S,G)) {
+ send Assert(S,G) on iif
+ }
+ oiflist = oiflist (-) iif
+ forward packet on all interfaces in oiflist
+
+ Macro:
+ inherited_olist(S,G) =
+ joins(S,G) (+) pim_include(S,G) (-) lost_assert(S,G)
+
+ Note:
+ - The following test is performed as response to WRONGVIF kernel
+ upcall:
+ if (iif is in inherited_olist(S,G)) {
+ send Assert(S,G) on iif
+ }
+ See pim_mroute.c mroute_msg().
+*/
+int pim_macro_chisin_oiflist(const struct pim_ifchannel *ch)
+{
+ if (ch->upstream->join_state == PIM_UPSTREAM_NOTJOINED) {
+ /* oiflist is NULL */
+ return 0; /* false */
+ }
+
+ /* oiflist = oiflist (-) iif */
+ if (ch->interface == ch->upstream->rpf.source_nexthop.interface)
+ return 0; /* false */
+
+ return pim_macro_chisin_inherited_olist(ch);
+}
+
+/*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ AssertTrackingDesired(S,G,I) =
+ (I in ( ( joins(*,*,RP(G)) (+) joins(*,G) (-) prunes(S,G,rpt) )
+ (+) ( pim_include(*,G) (-) pim_exclude(S,G) )
+ (-) lost_assert(*,G)
+ (+) joins(S,G) ) )
+ OR (local_receiver_include(S,G,I) == true
+ AND (I_am_DR(I) OR (AssertWinner(S,G,I) == me)))
+ OR ((RPF_interface(S) == I) AND (JoinDesired(S,G) == true))
+ OR ((RPF_interface(RP(G)) == I) AND (JoinDesired(*,G) == true)
+ AND (SPTbit(S,G) == false))
+
+ AssertTrackingDesired(S,G,I) is true on any interface in which an
+ (S,G) assert might affect our behavior.
+*/
+int pim_macro_assert_tracking_desired_eval(const struct pim_ifchannel *ch)
+{
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+
+ ifp = ch->interface;
+ if (!ifp) {
+ zlog_warn("%s: (S,G)=%s: null interface", __func__, ch->sg_str);
+ return 0; /* false */
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ zlog_warn("%s: (S,G)=%s: multicast not enabled on interface %s",
+ __func__, ch->sg_str, ch->interface->name);
+ return 0; /* false */
+ }
+
+ /* I in joins(S,G) ? */
+ if (pim_macro_chisin_joins(ch))
+ return 1; /* true */
+
+ /* local_receiver_include(S,G,I) ? */
+ if (local_receiver_include(ch)) {
+ /* I_am_DR(I) ? */
+ if (PIM_I_am_DR(pim_ifp))
+ return 1; /* true */
+
+ /* AssertWinner(S,G,I) == me ? */
+ if (!pim_addr_cmp(ch->ifassert_winner,
+ pim_ifp->primary_address))
+ return 1; /* true */
+ }
+
+ /* RPF_interface(S) == I ? */
+ if (ch->upstream->rpf.source_nexthop.interface == ifp) {
+ /* JoinDesired(S,G) ? */
+ if (PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(ch->upstream->flags))
+ return 1; /* true */
+ }
+
+ return 0; /* false */
+}
diff --git a/pimd/pim_macro.h b/pimd/pim_macro.h
new file mode 100644
index 0000000..39fa535
--- /dev/null
+++ b/pimd/pim_macro.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_MACRO_H
+#define PIM_MACRO_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+#include "pim_upstream.h"
+#include "pim_ifchannel.h"
+
+int pim_macro_ch_lost_assert(const struct pim_ifchannel *ch);
+int pim_macro_chisin_joins(const struct pim_ifchannel *ch);
+int pim_macro_chisin_pim_include(const struct pim_ifchannel *ch);
+int pim_macro_chisin_joins_or_include(const struct pim_ifchannel *ch);
+int pim_macro_ch_could_assert_eval(const struct pim_ifchannel *ch);
+struct pim_assert_metric pim_macro_spt_assert_metric(const struct pim_rpf *rpf,
+ pim_addr ifaddr);
+struct pim_assert_metric
+pim_macro_ch_my_assert_metric_eval(const struct pim_ifchannel *ch);
+int pim_macro_chisin_oiflist(const struct pim_ifchannel *ch);
+int pim_macro_assert_tracking_desired_eval(const struct pim_ifchannel *ch);
+
+#endif /* PIM_MACRO_H */
diff --git a/pimd/pim_main.c b/pimd/pim_main.c
new file mode 100644
index 0000000..7db0a76
--- /dev/null
+++ b/pimd/pim_main.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "privs.h"
+#include "lib/version.h"
+#include <getopt.h>
+#include "command.h"
+#include "frrevent.h"
+#include <signal.h>
+
+#include "memory.h"
+#include "vrf.h"
+#include "filter.h"
+#include "vty.h"
+#include "sigevent.h"
+#include "prefix.h"
+#include "plist.h"
+#include "vrf.h"
+#include "libfrr.h"
+#include "routemap.h"
+#include "routing_nb.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_signals.h"
+#include "pim_zebra.h"
+#include "pim_msdp.h"
+#include "pim_iface.h"
+#include "pim_bfd.h"
+#include "pim_mlag.h"
+#include "pim_errors.h"
+#include "pim_nb.h"
+
+extern struct host host;
+
+struct option longopts[] = {{0}};
+
+/* pimd privileges */
+zebra_capabilities_t _caps_p[] = {
+ ZCAP_NET_ADMIN, ZCAP_SYS_ADMIN, ZCAP_NET_RAW, ZCAP_BIND,
+};
+
+/* pimd privileges to run with */
+struct zebra_privs_t pimd_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0};
+
+static const struct frr_yang_module_info *const pimd_yang_modules[] = {
+ &frr_filter_info,
+ &frr_interface_info,
+ &frr_route_map_info,
+ &frr_vrf_info,
+ &frr_routing_info,
+ &frr_pim_info,
+ &frr_pim_rp_info,
+ &frr_gmp_info,
+};
+
+FRR_DAEMON_INFO(pimd, PIM, .vty_port = PIMD_VTY_PORT,
+
+ .proghelp = "Implementation of the PIM routing protocol.",
+
+ .signals = pimd_signals,
+ .n_signals = 4 /* XXX array_size(pimd_signals) XXX*/,
+
+ .privs = &pimd_privs, .yang_modules = pimd_yang_modules,
+ .n_yang_modules = array_size(pimd_yang_modules),
+);
+
+
+int main(int argc, char **argv, char **envp)
+{
+ frr_preinit(&pimd_di, argc, argv);
+ frr_opt_add("", longopts, "");
+
+ /* this while just reads the options */
+ while (1) {
+ int opt;
+
+ opt = frr_getopt(argc, argv, NULL);
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ default:
+ frr_help_exit(1);
+ }
+ }
+
+ pim_router_init();
+
+ /*
+ * Initializations
+ */
+ pim_error_init();
+ pim_vrf_init();
+ access_list_init();
+ prefix_list_init();
+ prefix_list_add_hook(pim_prefix_list_update);
+ prefix_list_delete_hook(pim_prefix_list_update);
+
+ pim_route_map_init();
+ pim_init();
+
+ /*
+ * Initialize zclient "update" and "lookup" sockets
+ */
+ pim_iface_init();
+ pim_zebra_init();
+ pim_bfd_init();
+ pim_mlag_init();
+
+ hook_register(routing_conf_event,
+ routing_control_plane_protocols_name_validate);
+
+ routing_control_plane_protocols_register_vrf_dependency();
+
+ frr_config_fork();
+
+#ifdef PIM_DEBUG_BYDEFAULT
+ zlog_notice("PIM_DEBUG_BYDEFAULT: Enabling all debug commands");
+ PIM_DO_DEBUG_PIM_EVENTS;
+ PIM_DO_DEBUG_PIM_PACKETS;
+ PIM_DO_DEBUG_PIM_TRACE;
+ PIM_DO_DEBUG_GM_EVENTS;
+ PIM_DO_DEBUG_GM_PACKETS;
+ PIM_DO_DEBUG_GM_TRACE;
+ PIM_DO_DEBUG_ZEBRA;
+#endif
+
+#ifdef PIM_CHECK_RECV_IFINDEX_SANITY
+ zlog_notice(
+ "PIM_CHECK_RECV_IFINDEX_SANITY: will match sock/recv ifindex");
+#ifdef PIM_REPORT_RECV_IFINDEX_MISMATCH
+ zlog_notice(
+ "PIM_REPORT_RECV_IFINDEX_MISMATCH: will report sock/recv ifindex mismatch");
+#endif
+#endif
+
+#ifdef PIM_UNEXPECTED_KERNEL_UPCALL
+ zlog_notice(
+ "PIM_UNEXPECTED_KERNEL_UPCALL: report unexpected kernel upcall");
+#endif
+
+ frr_run(router->master);
+
+ /* never reached */
+ return 0;
+}
diff --git a/pimd/pim_memory.c b/pimd/pim_memory.c
new file mode 100644
index 0000000..85780f0
--- /dev/null
+++ b/pimd/pim_memory.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* pimd memory type definitions
+ *
+ * Copyright (C) 2015 David Lamparter
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pim_memory.h"
+
+DEFINE_MGROUP(PIMD, "pimd");
+DEFINE_MTYPE(PIMD, PIM_CHANNEL_OIL, "PIM SSM (S,G) channel OIL");
+DEFINE_MTYPE(PIMD, PIM_INTERFACE, "PIM interface");
+DEFINE_MTYPE(PIMD, PIM_IGMP_JOIN, "PIM interface IGMP static join");
+DEFINE_MTYPE(PIMD, PIM_IGMP_SOCKET, "PIM interface IGMP socket");
+DEFINE_MTYPE(PIMD, PIM_IGMP_GROUP, "PIM interface IGMP group");
+DEFINE_MTYPE(PIMD, PIM_IGMP_GROUP_SOURCE, "PIM interface IGMP source");
+DEFINE_MTYPE(PIMD, PIM_NEIGHBOR, "PIM interface neighbor");
+DEFINE_MTYPE(PIMD, PIM_IFCHANNEL, "PIM interface (S,G) state");
+DEFINE_MTYPE(PIMD, PIM_UPSTREAM, "PIM upstream (S,G) state");
+DEFINE_MTYPE(PIMD, PIM_SSMPINGD, "PIM sspimgd socket");
+DEFINE_MTYPE(PIMD, PIM_STATIC_ROUTE, "PIM Static Route");
+DEFINE_MTYPE(PIMD, PIM_RP, "PIM RP info");
+DEFINE_MTYPE(PIMD, PIM_FILTER_NAME, "PIM RP filter info");
+DEFINE_MTYPE(PIMD, PIM_MSDP_PEER, "PIM MSDP peer");
+DEFINE_MTYPE(PIMD, PIM_MSDP_MG_NAME, "PIM MSDP mesh-group name");
+DEFINE_MTYPE(PIMD, PIM_MSDP_SA, "PIM MSDP source-active cache");
+DEFINE_MTYPE(PIMD, PIM_MSDP_MG, "PIM MSDP mesh group");
+DEFINE_MTYPE(PIMD, PIM_MSDP_MG_MBR, "PIM MSDP mesh group mbr");
+DEFINE_MTYPE(PIMD, PIM_SEC_ADDR, "PIM secondary address");
+DEFINE_MTYPE(PIMD, PIM_JP_AGG_GROUP, "PIM JP AGG Group");
+DEFINE_MTYPE(PIMD, PIM_JP_AGG_SOURCE, "PIM JP AGG Source");
+DEFINE_MTYPE(PIMD, PIM_PIM_INSTANCE, "PIM global state");
+DEFINE_MTYPE(PIMD, PIM_NEXTHOP_CACHE, "PIM nexthop cache state");
+DEFINE_MTYPE(PIMD, PIM_SSM_INFO, "PIM SSM configuration");
+DEFINE_MTYPE(PIMD, PIM_PLIST_NAME, "PIM Prefix List Names");
+DEFINE_MTYPE(PIMD, PIM_VXLAN_SG, "PIM VxLAN mroute cache");
diff --git a/pimd/pim_memory.h b/pimd/pim_memory.h
new file mode 100644
index 0000000..41730e7
--- /dev/null
+++ b/pimd/pim_memory.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* pimd memory type declarations
+ *
+ * Copyright (C) 2015 David Lamparter
+ */
+
+#ifndef _QUAGGA_PIM_MEMORY_H
+#define _QUAGGA_PIM_MEMORY_H
+
+#include "memory.h"
+
+DECLARE_MGROUP(PIMD);
+DECLARE_MTYPE(PIM_CHANNEL_OIL);
+DECLARE_MTYPE(PIM_INTERFACE);
+DECLARE_MTYPE(PIM_IGMP_JOIN);
+DECLARE_MTYPE(PIM_IGMP_SOCKET);
+DECLARE_MTYPE(PIM_IGMP_GROUP);
+DECLARE_MTYPE(PIM_IGMP_GROUP_SOURCE);
+DECLARE_MTYPE(PIM_NEIGHBOR);
+DECLARE_MTYPE(PIM_IFCHANNEL);
+DECLARE_MTYPE(PIM_UPSTREAM);
+DECLARE_MTYPE(PIM_SSMPINGD);
+DECLARE_MTYPE(PIM_STATIC_ROUTE);
+DECLARE_MTYPE(PIM_RP);
+DECLARE_MTYPE(PIM_FILTER_NAME);
+DECLARE_MTYPE(PIM_MSDP_PEER);
+DECLARE_MTYPE(PIM_MSDP_MG_NAME);
+DECLARE_MTYPE(PIM_MSDP_SA);
+DECLARE_MTYPE(PIM_MSDP_MG);
+DECLARE_MTYPE(PIM_MSDP_MG_MBR);
+DECLARE_MTYPE(PIM_SEC_ADDR);
+DECLARE_MTYPE(PIM_JP_AGG_GROUP);
+DECLARE_MTYPE(PIM_JP_AGG_SOURCE);
+DECLARE_MTYPE(PIM_PIM_INSTANCE);
+DECLARE_MTYPE(PIM_NEXTHOP_CACHE);
+DECLARE_MTYPE(PIM_SSM_INFO);
+DECLARE_MTYPE(PIM_PLIST_NAME);
+DECLARE_MTYPE(PIM_VXLAN_SG);
+
+#endif /* _QUAGGA_PIM_MEMORY_H */
diff --git a/pimd/pim_mlag.c b/pimd/pim_mlag.c
new file mode 100644
index 0000000..5d72eb6
--- /dev/null
+++ b/pimd/pim_mlag.c
@@ -0,0 +1,1089 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This is an implementation of PIM MLAG Functionality
+ *
+ * Module name: PIM MLAG
+ *
+ * Author: sathesh Kumar karra <sathk@cumulusnetworks.com>
+ *
+ * Copyright (C) 2019 Cumulus Networks http://www.cumulusnetworks.com
+ */
+#include <zebra.h>
+
+#include "pimd.h"
+#include "pim_mlag.h"
+#include "pim_upstream.h"
+#include "pim_vxlan.h"
+
+extern struct zclient *zclient;
+
+#define PIM_MLAG_METADATA_LEN 4
+
+/*********************ACtual Data processing *****************************/
+/* TBD: There can be duplicate updates to FIB***/
+#define PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil) \
+ do { \
+ if (PIM_DEBUG_MLAG) \
+ zlog_debug( \
+ "%s: add Dual-active Interface to %s " \
+ "to oil:%s", \
+ __func__, ch->interface->name, ch->sg_str); \
+ pim_channel_update_oif_mute(ch_oil, ch->interface->info); \
+ } while (0)
+
+#define PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil) \
+ do { \
+ if (PIM_DEBUG_MLAG) \
+ zlog_debug( \
+ "%s: del Dual-active Interface to %s " \
+ "to oil:%s", \
+ __func__, ch->interface->name, ch->sg_str); \
+ pim_channel_update_oif_mute(ch_oil, ch->interface->info); \
+ } while (0)
+
+
+static void pim_mlag_calculate_df_for_ifchannels(struct pim_upstream *up,
+ bool is_df)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+ struct pim_interface *pim_ifp = NULL;
+ struct channel_oil *ch_oil = NULL;
+
+ ch_oil = (up) ? up->channel_oil : NULL;
+
+ if (!ch_oil)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Calculating DF for Dual active if-channel%s",
+ __func__, up->sg_str);
+
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ pim_ifp = (ch->interface) ? ch->interface->info : NULL;
+ if (!pim_ifp || !PIM_I_am_DualActive(pim_ifp))
+ continue;
+
+ if (is_df)
+ PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil);
+ else
+ PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil);
+ }
+}
+
+static void pim_mlag_inherit_mlag_flags(struct pim_upstream *up, bool is_df)
+{
+ struct listnode *listnode;
+ struct pim_upstream *child;
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+ struct pim_interface *pim_ifp = NULL;
+ struct channel_oil *ch_oil = NULL;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Updating DF for uptream:%s children", __func__,
+ up->sg_str);
+
+
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ pim_ifp = (ch->interface) ? ch->interface->info : NULL;
+ if (!pim_ifp || !PIM_I_am_DualActive(pim_ifp))
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, listnode, child)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Updating DF for child:%s",
+ __func__, child->sg_str);
+ ch_oil = (child) ? child->channel_oil : NULL;
+
+ if (!ch_oil)
+ continue;
+
+ if (is_df)
+ PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil);
+ else
+ PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil);
+ }
+ }
+}
+
+/******************************* pim upstream sync **************************/
+/* Update DF role for the upstream entry and return true on role change */
+bool pim_mlag_up_df_role_update(struct pim_instance *pim,
+ struct pim_upstream *up, bool is_df, const char *reason)
+{
+ struct channel_oil *c_oil = up->channel_oil;
+ bool old_is_df = !PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags);
+ struct pim_interface *vxlan_ifp;
+
+ if (is_df == old_is_df) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: Ignoring Role update for %s, since no change",
+ __func__, up->sg_str);
+ return false;
+ }
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("local MLAG mroute %s role changed to %s based on %s",
+ up->sg_str, is_df ? "df" : "non-df", reason);
+
+ if (is_df)
+ PIM_UPSTREAM_FLAG_UNSET_MLAG_NON_DF(up->flags);
+ else
+ PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(up->flags);
+
+
+ /*
+ * This Upstream entry synced to peer Because of Dual-active
+ * Interface configuration
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags)) {
+ pim_mlag_inherit_mlag_flags(up, is_df);
+ pim_mlag_calculate_df_for_ifchannels(up, is_df);
+ }
+
+ /* If the DF role has changed check if ipmr-lo needs to be
+ * muted/un-muted. Active-Active devices and vxlan termination
+ * devices (ipmr-lo) are suppressed on the non-DF.
+ * This may leave the mroute with the empty OIL in which case the
+ * the forwarding entry's sole purpose is to just blackhole the flow
+ * headed to the switch.
+ */
+ if (c_oil) {
+ vxlan_ifp = pim_vxlan_get_term_ifp(pim);
+ if (vxlan_ifp)
+ pim_channel_update_oif_mute(c_oil, vxlan_ifp);
+ }
+
+ /* If DF role changed on a (*,G) termination mroute update the
+ * associated DF role on the inherited (S,G) entries
+ */
+ if (pim_addr_is_any(up->sg.src) &&
+ PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->flags))
+ pim_vxlan_inherit_mlag_flags(pim, up, true /* inherit */);
+
+ return true;
+}
+
+/* Run per-upstream entry DF election and return true on role change */
+static bool pim_mlag_up_df_role_elect(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ bool is_df;
+ uint32_t peer_cost;
+ uint32_t local_cost;
+ bool rv;
+
+ if (!pim_up_mlag_is_local(up))
+ return false;
+
+ /* We are yet to rx a status update from the local MLAG daemon so
+ * we will assume DF status.
+ */
+ if (!(router->mlag_flags & PIM_MLAGF_STATUS_RXED))
+ return pim_mlag_up_df_role_update(pim, up,
+ true /*is_df*/, "mlagd-down");
+
+ /* If not connected to peer assume DF role on the MLAG primary
+ * switch (and non-DF on the secondary switch.
+ */
+ if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)) {
+ is_df = (router->mlag_role == MLAG_ROLE_PRIMARY) ? true : false;
+ return pim_mlag_up_df_role_update(pim, up,
+ is_df, "peer-down");
+ }
+
+ /* If MLAG peer session is up but zebra is down on the peer
+ * assume DF role.
+ */
+ if (!(router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP))
+ return pim_mlag_up_df_role_update(pim, up,
+ true /*is_df*/, "zebra-down");
+
+ /* If we are connected to peer switch but don't have a mroute
+ * from it we have to assume non-DF role to avoid duplicates.
+ * Note: When the peer connection comes up we wait for initial
+ * replay to complete before moving "strays" i.e. local-mlag-mroutes
+ * without a peer reference to non-df role.
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags))
+ return pim_mlag_up_df_role_update(pim, up,
+ false /*is_df*/, "no-peer-mroute");
+
+ /* switch with the lowest RPF cost wins. if both switches have the same
+ * cost MLAG role is used as a tie breaker (MLAG primary wins).
+ */
+ peer_cost = up->mlag.peer_mrib_metric;
+ local_cost = pim_up_mlag_local_cost(up);
+ if (local_cost == peer_cost) {
+ is_df = (router->mlag_role == MLAG_ROLE_PRIMARY) ? true : false;
+ rv = pim_mlag_up_df_role_update(pim, up, is_df, "equal-cost");
+ } else {
+ is_df = (local_cost < peer_cost) ? true : false;
+ rv = pim_mlag_up_df_role_update(pim, up, is_df, "cost");
+ }
+
+ return rv;
+}
+
+/* Handle upstream entry add from the peer MLAG switch -
+ * - if a local entry doesn't exist one is created with reference
+ * _MLAG_PEER
+ * - if a local entry exists and has a MLAG OIF DF election is run.
+ * the non-DF switch stop forwarding traffic to MLAG devices.
+ */
+static void pim_mlag_up_peer_add(struct mlag_mroute_add *msg)
+{
+ struct pim_upstream *up;
+ struct pim_instance *pim;
+ int flags = 0;
+ pim_sgaddr sg;
+ struct vrf *vrf;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src.s_addr = htonl(msg->source_ip);
+ sg.grp.s_addr = htonl(msg->group_ip);
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("peer MLAG mroute add %s:%pSG cost %d",
+ msg->vrf_name, &sg, msg->cost_to_rp);
+
+ /* XXX - this is not correct. we MUST cache updates to avoid losing
+ * an entry because of race conditions with the peer switch.
+ */
+ vrf = vrf_lookup_by_name(msg->vrf_name);
+ if (!vrf) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "peer MLAG mroute add failed %s:%pSG; no vrf",
+ msg->vrf_name, &sg);
+ return;
+ }
+ pim = vrf->info;
+
+ up = pim_upstream_find(pim, &sg);
+ if (up) {
+ /* upstream already exists; create peer reference if it
+ * doesn't already exist.
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags))
+ pim_upstream_ref(up, PIM_UPSTREAM_FLAG_MASK_MLAG_PEER,
+ __func__);
+ } else {
+ PIM_UPSTREAM_FLAG_SET_MLAG_PEER(flags);
+ up = pim_upstream_add(pim, &sg, NULL /*iif*/, flags, __func__,
+ NULL /*if_ch*/);
+
+ if (!up) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "peer MLAG mroute add failed %s:%pSG",
+ vrf->name, &sg);
+ return;
+ }
+ }
+ up->mlag.peer_mrib_metric = msg->cost_to_rp;
+ pim_mlag_up_df_role_elect(pim, up);
+}
+
+/* Handle upstream entry del from the peer MLAG switch -
+ * - peer reference is removed. this can result in the upstream
+ * being deleted altogether.
+ * - if a local entry continues to exisy and has a MLAG OIF DF election
+ * is re-run (at the end of which the local entry will be the DF).
+ */
+static struct pim_upstream *pim_mlag_up_peer_deref(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags))
+ return up;
+
+ PIM_UPSTREAM_FLAG_UNSET_MLAG_PEER(up->flags);
+ up = pim_upstream_del(pim, up, __func__);
+ if (up)
+ pim_mlag_up_df_role_elect(pim, up);
+
+ return up;
+}
+
+static void pim_mlag_up_peer_del(struct mlag_mroute_del *msg)
+{
+ struct pim_upstream *up;
+ struct pim_instance *pim;
+ pim_sgaddr sg;
+ struct vrf *vrf;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src.s_addr = htonl(msg->source_ip);
+ sg.grp.s_addr = htonl(msg->group_ip);
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("peer MLAG mroute del %s:%pSG", msg->vrf_name, &sg);
+
+ vrf = vrf_lookup_by_name(msg->vrf_name);
+ if (!vrf) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "peer MLAG mroute del skipped %s:%pSG; no vrf",
+ msg->vrf_name, &sg);
+ return;
+ }
+ pim = vrf->info;
+
+ up = pim_upstream_find(pim, &sg);
+ if (!up) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "peer MLAG mroute del skipped %s:%pSG; no up",
+ vrf->name, &sg);
+ return;
+ }
+
+ (void)pim_mlag_up_peer_deref(pim, up);
+}
+
+/* When we lose connection to the local MLAG daemon we can drop all peer
+ * references.
+ */
+static void pim_mlag_up_peer_del_all(void)
+{
+ struct list *temp = list_new();
+ struct pim_upstream *up;
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ /*
+ * So why these gyrations?
+ * pim->upstream_head has the list of *,G and S,G
+ * that are in the system. The problem of course
+ * is that it is an ordered list:
+ * (*,G1) -> (S1,G1) -> (S2,G2) -> (S3, G2) -> (*,G2) -> (S1,G2)
+ * And the *,G1 has pointers to S1,G1 and S2,G1
+ * if we delete *,G1 then we have a situation where
+ * S1,G1 and S2,G2 can be deleted as well. Then a
+ * simple ALL_LIST_ELEMENTS will have the next listnode
+ * pointer become invalid and we crash.
+ * So let's grab the list of MLAG_PEER upstreams
+ * add a refcount put on another list and delete safely
+ */
+ RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags))
+ continue;
+ listnode_add(temp, up);
+ /*
+ * Add a reference since we are adding to this
+ * list for deletion
+ */
+ up->ref_count++;
+ }
+
+ while (temp->count) {
+ up = listnode_head(temp);
+ listnode_delete(temp, up);
+
+ up = pim_mlag_up_peer_deref(pim, up);
+ /*
+ * This is the deletion of the reference added
+ * above
+ */
+ if (up)
+ pim_upstream_del(pim, up, __func__);
+ }
+ }
+
+ list_delete(&temp);
+}
+
+/* Send upstream entry to the local MLAG daemon (which will subsequently
+ * send it to the peer MLAG switch).
+ */
+static void pim_mlag_up_local_add_send(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct stream *s = NULL;
+ struct vrf *vrf = pim->vrf;
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP))
+ return;
+
+ s = stream_new(sizeof(struct mlag_mroute_add) + PIM_MLAG_METADATA_LEN);
+ if (!s)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("local MLAG mroute add %s:%s",
+ vrf->name, up->sg_str);
+
+ ++router->mlag_stats.msg.mroute_add_tx;
+
+ stream_putl(s, MLAG_MROUTE_ADD);
+ stream_put(s, vrf->name, VRF_NAMSIZ);
+ stream_putl(s, ntohl(up->sg.src.s_addr));
+ stream_putl(s, ntohl(up->sg.grp.s_addr));
+
+ stream_putl(s, pim_up_mlag_local_cost(up));
+ /* XXX - who is addding*/
+ stream_putl(s, MLAG_OWNER_VXLAN);
+ /* XXX - am_i_DR field should be removed */
+ stream_putc(s, false);
+ stream_putc(s, !(PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)));
+ stream_putl(s, vrf->vrf_id);
+ /* XXX - this field is a No-op for VXLAN*/
+ stream_put(s, NULL, INTERFACE_NAMSIZ);
+
+ stream_fifo_push_safe(router->mlag_fifo, s);
+ pim_mlag_signal_zpthread();
+}
+
+static void pim_mlag_up_local_del_send(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct stream *s = NULL;
+ struct vrf *vrf = pim->vrf;
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP))
+ return;
+
+ s = stream_new(sizeof(struct mlag_mroute_del) + PIM_MLAG_METADATA_LEN);
+ if (!s)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("local MLAG mroute del %s:%s",
+ vrf->name, up->sg_str);
+
+ ++router->mlag_stats.msg.mroute_del_tx;
+
+ stream_putl(s, MLAG_MROUTE_DEL);
+ stream_put(s, vrf->name, VRF_NAMSIZ);
+ stream_putl(s, ntohl(up->sg.src.s_addr));
+ stream_putl(s, ntohl(up->sg.grp.s_addr));
+ /* XXX - who is adding */
+ stream_putl(s, MLAG_OWNER_VXLAN);
+ stream_putl(s, vrf->vrf_id);
+ /* XXX - this field is a No-op for VXLAN */
+ stream_put(s, NULL, INTERFACE_NAMSIZ);
+
+ /* XXX - is this the the most optimal way to do things */
+ stream_fifo_push_safe(router->mlag_fifo, s);
+ pim_mlag_signal_zpthread();
+}
+
+
+/* Called when a local upstream entry is created or if it's cost changes */
+void pim_mlag_up_local_add(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ pim_mlag_up_df_role_elect(pim, up);
+ /* XXX - need to add some dup checks here */
+ pim_mlag_up_local_add_send(pim, up);
+}
+
+/* Called when local MLAG reference is removed from an upstream entry */
+void pim_mlag_up_local_del(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ pim_mlag_up_df_role_elect(pim, up);
+ pim_mlag_up_local_del_send(pim, up);
+}
+
+/* When connection to local MLAG daemon is established all the local
+ * MLAG upstream entries are replayed to it.
+ */
+static void pim_mlag_up_local_replay(void)
+{
+ struct pim_upstream *up;
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_up_mlag_is_local(up))
+ pim_mlag_up_local_add_send(pim, up);
+ }
+ }
+}
+
+/* on local/peer mlag connection and role changes the DF status needs
+ * to be re-evaluated
+ */
+static void pim_mlag_up_local_reeval(bool mlagd_send, const char *reason_code)
+{
+ struct pim_upstream *up;
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s re-run DF election because of %s",
+ __func__, reason_code);
+ RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!pim_up_mlag_is_local(up))
+ continue;
+ /* if role changes re-send to peer */
+ if (pim_mlag_up_df_role_elect(pim, up) &&
+ mlagd_send)
+ pim_mlag_up_local_add_send(pim, up);
+ }
+ }
+}
+
+/*****************PIM Actions for MLAG state changes**********************/
+
+/* notify the anycast VTEP component about state changes */
+static inline void pim_mlag_vxlan_state_update(void)
+{
+ bool enable = !!(router->mlag_flags & PIM_MLAGF_STATUS_RXED);
+ bool peer_state = !!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP);
+
+ pim_vxlan_mlag_update(enable, peer_state, router->mlag_role,
+ router->peerlink_rif_p, &router->local_vtep_ip);
+
+}
+
+/**************End of PIM Actions for MLAG State changes******************/
+
+
+/********************API to process PIM MLAG Data ************************/
+
+static void pim_mlag_process_mlagd_state_change(struct mlag_status msg)
+{
+ bool role_chg = false;
+ bool state_chg = false;
+ bool notify_vxlan = false;
+ struct interface *peerlink_rif_p;
+ char buf[MLAG_ROLE_STRSIZE];
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg dump: my_role: %s, peer_state: %s",
+ __func__,
+ mlag_role2str(msg.my_role, buf, sizeof(buf)),
+ (msg.peer_state == MLAG_STATE_RUNNING ? "RUNNING"
+ : "DOWN"));
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg ignored mlagd process state down",
+ __func__);
+ return;
+ }
+ ++router->mlag_stats.msg.mlag_status_updates;
+
+ /* evaluate the changes first */
+ if (router->mlag_role != msg.my_role) {
+ role_chg = true;
+ notify_vxlan = true;
+ router->mlag_role = msg.my_role;
+ }
+
+ strlcpy(router->peerlink_rif, msg.peerlink_rif,
+ sizeof(router->peerlink_rif));
+
+ /* XXX - handle the case where we may rx the interface name from the
+ * MLAG daemon before we get the interface from zebra.
+ */
+ peerlink_rif_p = if_lookup_by_name(router->peerlink_rif, VRF_DEFAULT);
+ if (router->peerlink_rif_p != peerlink_rif_p) {
+ router->peerlink_rif_p = peerlink_rif_p;
+ notify_vxlan = true;
+ }
+
+ if (msg.peer_state == MLAG_STATE_RUNNING) {
+ if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)) {
+ state_chg = true;
+ notify_vxlan = true;
+ router->mlag_flags |= PIM_MLAGF_PEER_CONN_UP;
+ }
+ router->connected_to_mlag = true;
+ } else {
+ if (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP) {
+ ++router->mlag_stats.peer_session_downs;
+ state_chg = true;
+ notify_vxlan = true;
+ router->mlag_flags &= ~PIM_MLAGF_PEER_CONN_UP;
+ }
+ router->connected_to_mlag = false;
+ }
+
+ /* apply the changes */
+ /* when connection to mlagd comes up we hold send mroutes till we have
+ * rxed the status and had a chance to re-valuate DF state
+ */
+ if (!(router->mlag_flags & PIM_MLAGF_STATUS_RXED)) {
+ router->mlag_flags |= PIM_MLAGF_STATUS_RXED;
+ pim_mlag_vxlan_state_update();
+ /* on session up re-eval DF status */
+ pim_mlag_up_local_reeval(false /*mlagd_send*/, "mlagd_up");
+ /* replay all the upstream entries to the local MLAG daemon */
+ pim_mlag_up_local_replay();
+ return;
+ }
+
+ if (notify_vxlan)
+ pim_mlag_vxlan_state_update();
+
+ if (state_chg) {
+ if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP))
+ /* when a connection goes down the primary takes over
+ * DF role for all entries
+ */
+ pim_mlag_up_local_reeval(true /*mlagd_send*/,
+ "peer_down");
+ else
+ /* XXX - when session comes up we need to wait for
+ * PEER_REPLAY_DONE before running re-election on
+ * local-mlag entries that are missing peer reference
+ */
+ pim_mlag_up_local_reeval(true /*mlagd_send*/,
+ "peer_up");
+ } else if (role_chg) {
+ /* MLAG role changed without a state change */
+ pim_mlag_up_local_reeval(true /*mlagd_send*/, "role_chg");
+ }
+}
+
+static void pim_mlag_process_peer_frr_state_change(struct mlag_frr_status msg)
+{
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: msg dump: peer_frr_state: %s", __func__,
+ (msg.frr_state == MLAG_FRR_STATE_UP ? "UP" : "DOWN"));
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg ignored mlagd process state down",
+ __func__);
+ return;
+ }
+ ++router->mlag_stats.msg.peer_zebra_status_updates;
+
+ /* evaluate the changes first */
+ if (msg.frr_state == MLAG_FRR_STATE_UP) {
+ if (!(router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)) {
+ router->mlag_flags |= PIM_MLAGF_PEER_ZEBRA_UP;
+ /* XXX - when peer zebra comes up we need to wait for
+ * for some time to let the peer setup MDTs before
+ * before relinquishing DF status
+ */
+ pim_mlag_up_local_reeval(true /*mlagd_send*/,
+ "zebra_up");
+ }
+ } else {
+ if (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP) {
+ ++router->mlag_stats.peer_zebra_downs;
+ router->mlag_flags &= ~PIM_MLAGF_PEER_ZEBRA_UP;
+ /* when a peer zebra goes down we assume DF role */
+ pim_mlag_up_local_reeval(true /*mlagd_send*/,
+ "zebra_down");
+ }
+ }
+}
+
+static void pim_mlag_process_vxlan_update(struct mlag_vxlan *msg)
+{
+ char addr_buf1[INET_ADDRSTRLEN];
+ char addr_buf2[INET_ADDRSTRLEN];
+ uint32_t local_ip;
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg ignored mlagd process state down",
+ __func__);
+ return;
+ }
+
+ ++router->mlag_stats.msg.vxlan_updates;
+ router->anycast_vtep_ip.s_addr = htonl(msg->anycast_ip);
+ local_ip = htonl(msg->local_ip);
+ if (router->local_vtep_ip.s_addr != local_ip) {
+ router->local_vtep_ip.s_addr = local_ip;
+ pim_mlag_vxlan_state_update();
+ }
+
+ if (PIM_DEBUG_MLAG) {
+ inet_ntop(AF_INET, &router->local_vtep_ip,
+ addr_buf1, INET_ADDRSTRLEN);
+ inet_ntop(AF_INET, &router->anycast_vtep_ip,
+ addr_buf2, INET_ADDRSTRLEN);
+
+ zlog_debug("%s: msg dump: local-ip:%s, anycast-ip:%s",
+ __func__, addr_buf1, addr_buf2);
+ }
+}
+
+static void pim_mlag_process_mroute_add(struct mlag_mroute_add msg)
+{
+ if (PIM_DEBUG_MLAG) {
+ pim_sgaddr sg;
+
+ sg.grp.s_addr = ntohl(msg.group_ip);
+ sg.src.s_addr = ntohl(msg.source_ip);
+
+ zlog_debug(
+ "%s: msg dump: vrf_name: %s, s.ip: 0x%x, g.ip: 0x%x (%pSG) cost: %u",
+ __func__, msg.vrf_name, msg.source_ip, msg.group_ip,
+ &sg, msg.cost_to_rp);
+ zlog_debug(
+ "(%pSG)owner_id: %d, DR: %d, Dual active: %d, vrf_id: 0x%x intf_name: %s",
+ &sg, msg.owner_id, msg.am_i_dr, msg.am_i_dual_active,
+ msg.vrf_id, msg.intf_name);
+ }
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg ignored mlagd process state down",
+ __func__);
+ return;
+ }
+
+ ++router->mlag_stats.msg.mroute_add_rx;
+
+ pim_mlag_up_peer_add(&msg);
+}
+
+static void pim_mlag_process_mroute_del(struct mlag_mroute_del msg)
+{
+ if (PIM_DEBUG_MLAG) {
+ pim_sgaddr sg;
+
+ sg.grp.s_addr = ntohl(msg.group_ip);
+ sg.src.s_addr = ntohl(msg.source_ip);
+ zlog_debug(
+ "%s: msg dump: vrf_name: %s, s.ip: 0x%x, g.ip: 0x%x(%pSG)",
+ __func__, msg.vrf_name, msg.source_ip, msg.group_ip,
+ &sg);
+ zlog_debug("(%pSG)owner_id: %d, vrf_id: 0x%x intf_name: %s",
+ &sg, msg.owner_id, msg.vrf_id, msg.intf_name);
+ }
+
+ if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: msg ignored mlagd process state down",
+ __func__);
+ return;
+ }
+
+ ++router->mlag_stats.msg.mroute_del_rx;
+
+ pim_mlag_up_peer_del(&msg);
+}
+
+int pim_zebra_mlag_handle_msg(int cmd, struct zclient *zclient,
+ uint16_t zapi_length, vrf_id_t vrf_id)
+{
+ struct stream *s = zclient->ibuf;
+ struct mlag_msg mlag_msg;
+ char buf[80];
+ int rc = 0;
+ size_t length;
+
+ rc = mlag_lib_decode_mlag_hdr(s, &mlag_msg, &length);
+ if (rc)
+ return (rc);
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Received msg type: %s length: %d, bulk_cnt: %d",
+ __func__,
+ mlag_lib_msgid_to_str(mlag_msg.msg_type, buf,
+ sizeof(buf)),
+ mlag_msg.data_len, mlag_msg.msg_cnt);
+
+ switch (mlag_msg.msg_type) {
+ case MLAG_STATUS_UPDATE: {
+ struct mlag_status msg;
+
+ rc = mlag_lib_decode_mlag_status(s, &msg);
+ if (rc)
+ return (rc);
+ pim_mlag_process_mlagd_state_change(msg);
+ } break;
+ case MLAG_PEER_FRR_STATUS: {
+ struct mlag_frr_status msg;
+
+ rc = mlag_lib_decode_frr_status(s, &msg);
+ if (rc)
+ return (rc);
+ pim_mlag_process_peer_frr_state_change(msg);
+ } break;
+ case MLAG_VXLAN_UPDATE: {
+ struct mlag_vxlan msg;
+
+ rc = mlag_lib_decode_vxlan_update(s, &msg);
+ if (rc)
+ return rc;
+ pim_mlag_process_vxlan_update(&msg);
+ } break;
+ case MLAG_MROUTE_ADD: {
+ struct mlag_mroute_add msg;
+
+ rc = mlag_lib_decode_mroute_add(s, &msg, &length);
+ if (rc)
+ return (rc);
+ pim_mlag_process_mroute_add(msg);
+ } break;
+ case MLAG_MROUTE_DEL: {
+ struct mlag_mroute_del msg;
+
+ rc = mlag_lib_decode_mroute_del(s, &msg, &length);
+ if (rc)
+ return (rc);
+ pim_mlag_process_mroute_del(msg);
+ } break;
+ case MLAG_MROUTE_ADD_BULK: {
+ struct mlag_mroute_add msg;
+ int i;
+
+ for (i = 0; i < mlag_msg.msg_cnt; i++) {
+ rc = mlag_lib_decode_mroute_add(s, &msg, &length);
+ if (rc)
+ return (rc);
+ pim_mlag_process_mroute_add(msg);
+ }
+ } break;
+ case MLAG_MROUTE_DEL_BULK: {
+ struct mlag_mroute_del msg;
+ int i;
+
+ for (i = 0; i < mlag_msg.msg_cnt; i++) {
+ rc = mlag_lib_decode_mroute_del(s, &msg, &length);
+ if (rc)
+ return (rc);
+ pim_mlag_process_mroute_del(msg);
+ }
+ } break;
+ case MLAG_MSG_NONE:
+ case MLAG_REGISTER:
+ case MLAG_DEREGISTER:
+ case MLAG_DUMP:
+ case MLAG_PIM_CFG_DUMP:
+ break;
+ }
+ return 0;
+}
+
+/****************End of PIM Mesasge processing handler********************/
+
+int pim_zebra_mlag_process_up(ZAPI_CALLBACK_ARGS)
+{
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Received Process-Up from Mlag", __func__);
+
+ /*
+ * Incase of local MLAG restart, PIM needs to replay all the data
+ * since MLAG is empty.
+ */
+ router->connected_to_mlag = true;
+ router->mlag_flags |= PIM_MLAGF_LOCAL_CONN_UP;
+ return 0;
+}
+
+static void pim_mlag_param_reset(void)
+{
+ /* reset the cached params and stats */
+ router->mlag_flags &= ~(PIM_MLAGF_STATUS_RXED |
+ PIM_MLAGF_LOCAL_CONN_UP |
+ PIM_MLAGF_PEER_CONN_UP |
+ PIM_MLAGF_PEER_ZEBRA_UP);
+ router->local_vtep_ip.s_addr = INADDR_ANY;
+ router->anycast_vtep_ip.s_addr = INADDR_ANY;
+ router->mlag_role = MLAG_ROLE_NONE;
+ memset(&router->mlag_stats.msg, 0, sizeof(router->mlag_stats.msg));
+ router->peerlink_rif[0] = '\0';
+}
+
+int pim_zebra_mlag_process_down(ZAPI_CALLBACK_ARGS)
+{
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Received Process-Down from Mlag", __func__);
+
+ /* Local CLAG is down, reset peer data and forward the traffic if
+ * we are DR
+ */
+ if (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)
+ ++router->mlag_stats.peer_session_downs;
+ if (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)
+ ++router->mlag_stats.peer_zebra_downs;
+ router->connected_to_mlag = false;
+ pim_mlag_param_reset();
+ /* on mlagd session down re-eval DF status */
+ pim_mlag_up_local_reeval(false /*mlagd_send*/, "mlagd_down");
+ /* flush all peer references */
+ pim_mlag_up_peer_del_all();
+ /* notify the vxlan component */
+ pim_mlag_vxlan_state_update();
+ return 0;
+}
+
+static void pim_mlag_register_handler(struct event *thread)
+{
+ uint32_t bit_mask = 0;
+
+ if (!zclient)
+ return;
+
+ SET_FLAG(bit_mask, (1 << MLAG_STATUS_UPDATE));
+ SET_FLAG(bit_mask, (1 << MLAG_MROUTE_ADD));
+ SET_FLAG(bit_mask, (1 << MLAG_MROUTE_DEL));
+ SET_FLAG(bit_mask, (1 << MLAG_DUMP));
+ SET_FLAG(bit_mask, (1 << MLAG_MROUTE_ADD_BULK));
+ SET_FLAG(bit_mask, (1 << MLAG_MROUTE_DEL_BULK));
+ SET_FLAG(bit_mask, (1 << MLAG_PIM_CFG_DUMP));
+ SET_FLAG(bit_mask, (1 << MLAG_VXLAN_UPDATE));
+ SET_FLAG(bit_mask, (1 << MLAG_PEER_FRR_STATUS));
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Posting Client Register to MLAG mask: 0x%x",
+ __func__, bit_mask);
+
+ zclient_send_mlag_register(zclient, bit_mask);
+}
+
+void pim_mlag_register(void)
+{
+ if (router->mlag_process_register)
+ return;
+
+ router->mlag_process_register = true;
+
+ event_add_event(router->master, pim_mlag_register_handler, NULL, 0,
+ NULL);
+}
+
+static void pim_mlag_deregister_handler(struct event *thread)
+{
+ if (!zclient)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Posting Client De-Register to MLAG from PIM",
+ __func__);
+ router->connected_to_mlag = false;
+ zclient_send_mlag_deregister(zclient);
+}
+
+void pim_mlag_deregister(void)
+{
+ /* if somebody still interested in the MLAG channel skip de-reg */
+ if (router->pim_mlag_intf_cnt || pim_vxlan_do_mlag_reg())
+ return;
+
+ /* not registered; nothing do */
+ if (!router->mlag_process_register)
+ return;
+
+ router->mlag_process_register = false;
+
+ event_add_event(router->master, pim_mlag_deregister_handler, NULL, 0,
+ NULL);
+}
+
+void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp)
+{
+ if (!pim_ifp || !pim_ifp->pim || pim_ifp->activeactive == true)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: Configuring active-active on Interface: %s",
+ __func__, "NULL");
+
+ pim_ifp->activeactive = true;
+ if (pim_ifp->pim)
+ pim_ifp->pim->inst_mlag_intf_cnt++;
+
+ router->pim_mlag_intf_cnt++;
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: Total MLAG configured Interfaces on router: %d, Inst: %d",
+ __func__, router->pim_mlag_intf_cnt,
+ pim_ifp->pim->inst_mlag_intf_cnt);
+
+ if (router->pim_mlag_intf_cnt == 1) {
+ /*
+ * at least one Interface is configured for MLAG, send register
+ * to Zebra for receiving MLAG Updates
+ */
+ pim_mlag_register();
+ }
+}
+
+void pim_if_unconfigure_mlag_dualactive(struct pim_interface *pim_ifp)
+{
+ if (!pim_ifp || !pim_ifp->pim || pim_ifp->activeactive == false)
+ return;
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug("%s: UnConfiguring active-active on Interface: %s",
+ __func__, "NULL");
+
+ pim_ifp->activeactive = false;
+ pim_ifp->pim->inst_mlag_intf_cnt--;
+
+ router->pim_mlag_intf_cnt--;
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: Total MLAG configured Interfaces on router: %d, Inst: %d",
+ __func__, router->pim_mlag_intf_cnt,
+ pim_ifp->pim->inst_mlag_intf_cnt);
+
+ if (router->pim_mlag_intf_cnt == 0) {
+ /*
+ * all the Interfaces are MLAG un-configured, post MLAG
+ * De-register to Zebra
+ */
+ pim_mlag_deregister();
+ pim_mlag_param_reset();
+ }
+}
+
+
+void pim_instance_mlag_init(struct pim_instance *pim)
+{
+ if (!pim)
+ return;
+
+ pim->inst_mlag_intf_cnt = 0;
+}
+
+
+void pim_instance_mlag_terminate(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ if (!pim)
+ return;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp || pim_ifp->activeactive == false)
+ continue;
+
+ pim_if_unconfigure_mlag_dualactive(pim_ifp);
+ }
+ pim->inst_mlag_intf_cnt = 0;
+}
+
+void pim_mlag_terminate(void)
+{
+ stream_free(router->mlag_stream);
+ router->mlag_stream = NULL;
+ stream_fifo_free(router->mlag_fifo);
+ router->mlag_fifo = NULL;
+}
+
+void pim_mlag_init(void)
+{
+ pim_mlag_param_reset();
+ router->pim_mlag_intf_cnt = 0;
+ router->connected_to_mlag = false;
+ router->mlag_fifo = stream_fifo_new();
+ router->zpthread_mlag_write = NULL;
+ router->mlag_stream = stream_new(MLAG_BUF_LIMIT);
+}
diff --git a/pimd/pim_mlag.h b/pimd/pim_mlag.h
new file mode 100644
index 0000000..9cabd32
--- /dev/null
+++ b/pimd/pim_mlag.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This is an implementation of PIM MLAG Functionality
+ *
+ * Module name: PIM MLAG
+ *
+ * Author: sathesh Kumar karra <sathk@cumulusnetworks.com>
+ *
+ * Copyright (C) 2019 Cumulus Networks http://www.cumulusnetworks.com
+ */
+#ifndef __PIM_MLAG_H__
+#define __PIM_MLAG_H__
+
+#include "zclient.h"
+#include "mlag.h"
+#include "pim_iface.h"
+
+#if PIM_IPV == 4
+extern void pim_mlag_init(void);
+extern void pim_mlag_terminate(void);
+extern void pim_instance_mlag_init(struct pim_instance *pim);
+extern void pim_instance_mlag_terminate(struct pim_instance *pim);
+extern void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp);
+extern void pim_if_unconfigure_mlag_dualactive(struct pim_interface *pim_ifp);
+extern int pim_zebra_mlag_process_up(ZAPI_CALLBACK_ARGS);
+extern int pim_zebra_mlag_process_down(ZAPI_CALLBACK_ARGS);
+extern int pim_zebra_mlag_handle_msg(ZAPI_CALLBACK_ARGS);
+
+/* pm_zpthread.c */
+extern int pim_mlag_signal_zpthread(void);
+extern void pim_zpthread_init(void);
+extern void pim_zpthread_terminate(void);
+
+extern void pim_mlag_register(void);
+extern void pim_mlag_deregister(void);
+extern void pim_mlag_up_local_add(struct pim_instance *pim,
+ struct pim_upstream *upstream);
+extern void pim_mlag_up_local_del(struct pim_instance *pim,
+ struct pim_upstream *upstream);
+extern bool pim_mlag_up_df_role_update(struct pim_instance *pim,
+ struct pim_upstream *up, bool is_df,
+ const char *reason);
+#else /* PIM_IPV == 4 */
+static inline void pim_mlag_terminate(void)
+{
+}
+
+static inline void pim_instance_mlag_init(struct pim_instance *pim)
+{
+}
+
+static inline void pim_instance_mlag_terminate(struct pim_instance *pim)
+{
+}
+
+static inline void pim_if_configure_mlag_dualactive(
+ struct pim_interface *pim_ifp)
+{
+}
+
+static inline void pim_if_unconfigure_mlag_dualactive(
+ struct pim_interface *pim_ifp)
+{
+}
+
+static inline void pim_mlag_register(void)
+{
+}
+
+static inline void pim_mlag_up_local_add(struct pim_instance *pim,
+ struct pim_upstream *upstream)
+{
+}
+
+static inline void pim_mlag_up_local_del(struct pim_instance *pim,
+ struct pim_upstream *upstream)
+{
+}
+
+static inline bool pim_mlag_up_df_role_update(struct pim_instance *pim,
+ struct pim_upstream *up,
+ bool is_df, const char *reason)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
new file mode 100644
index 0000000..7ea6ed9
--- /dev/null
+++ b/pimd/pim_mroute.c
@@ -0,0 +1,1366 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+#include "log.h"
+#include "privs.h"
+#include "if.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+#include "sockopt.h"
+#include "lib_errors.h"
+#include "lib/network.h"
+
+#include "pimd.h"
+#include "pim_rpf.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_str.h"
+#include "pim_time.h"
+#include "pim_iface.h"
+#include "pim_macro.h"
+#include "pim_rp.h"
+#include "pim_oil.h"
+#include "pim_register.h"
+#include "pim_ifchannel.h"
+#include "pim_zlookup.h"
+#include "pim_ssm.h"
+#include "pim_sock.h"
+#include "pim_vxlan.h"
+#include "pim_msg.h"
+
+static void mroute_read_on(struct pim_instance *pim);
+static int pim_upstream_mroute_update(struct channel_oil *c_oil,
+ const char *name);
+
+int pim_mroute_set(struct pim_instance *pim, int enable)
+{
+ int err;
+ int opt, data;
+ socklen_t data_len = sizeof(data);
+
+ /*
+ * We need to create the VRF table for the pim mroute_socket
+ */
+ if (enable && pim->vrf->vrf_id != VRF_DEFAULT) {
+ frr_with_privs (&pimd_privs) {
+
+ data = pim->vrf->data.l.table_id;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ MRT_TABLE, &data, data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT_TABLE=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ data, errno, safe_strerror(errno));
+ return -1;
+ }
+ }
+ }
+
+ frr_with_privs (&pimd_privs) {
+ opt = enable ? MRT_INIT : MRT_DONE;
+ /*
+ * *BSD *cares* about what value we pass down
+ * here
+ */
+ data = 1;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
+ data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ enable ? "MRT_INIT" : "MRT_DONE", data, errno,
+ safe_strerror(errno));
+ return -1;
+ }
+ }
+
+#if defined(HAVE_IP_PKTINFO)
+ if (enable) {
+ /* Linux and Solaris IP_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IP_PKTINFO,
+ &data, data_len)) {
+ zlog_warn(
+ "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+#endif
+
+#if PIM_IPV == 6
+ if (enable) {
+ /* Linux and Solaris IPV6_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ IPV6_RECVPKTINFO, &data, data_len)) {
+ zlog_warn(
+ "Could not set IPV6_RECVPKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+#endif
+ setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
+
+ if (set_nonblocking(pim->mroute_socket) < 0) {
+ zlog_warn(
+ "Could not set non blocking on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ if (enable) {
+#if defined linux
+ int upcalls = GMMSG_WRVIFWHOLE;
+ opt = MRT_PIM;
+
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
+ sizeof(upcalls));
+ if (err) {
+ zlog_warn(
+ "Failure to register for VIFWHOLE and WRONGVIF upcalls %d %s",
+ errno, safe_strerror(errno));
+ return -1;
+ }
+#else
+ zlog_warn(
+ "PIM-SM will not work properly on this platform, until the ability to receive the WRVIFWHOLE upcall");
+#endif
+ }
+
+ return 0;
+}
+
+static const char *const gmmsgtype2str[GMMSG_WRVIFWHOLE + 1] = {
+ "<unknown_upcall?>", "NOCACHE", "WRONGVIF", "WHOLEPKT", "WRVIFWHOLE"};
+
+
+int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_upstream *up;
+ pim_sgaddr sg;
+ bool desync = false;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = msg->msg_im_src;
+ sg.grp = msg->msg_im_dst;
+
+
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: %s on interface, dropping packet to %pSG",
+ ifp->name,
+ !pim_ifp ? "Multicast not enabled"
+ : "PIM not enabled",
+ &sg);
+ return 0;
+ }
+
+ if (!pim_is_grp_ssm(pim_ifp->pim, sg.grp)) {
+ /* for ASM, check that we have enough information (i.e. path
+ * to RP) to make a decision on what to do with this packet.
+ *
+ * for SSM, this is meaningless, everything is join-driven,
+ * and for NOCACHE we need to install an empty OIL MFC entry
+ * so the kernel doesn't keep nagging us.
+ */
+ struct pim_rpf *rpg;
+
+ rpg = RP(pim_ifp->pim, msg->msg_im_dst);
+ if (!rpg) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s: no RPF for packet to %pSG",
+ ifp->name, &sg);
+ return 0;
+ }
+ if (pim_rpf_addr_is_inaddr_any(rpg)) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s: null RPF for packet to %pSG",
+ ifp->name, &sg);
+ return 0;
+ }
+ }
+
+ /*
+ * If we've received a multicast packet that isn't connected to
+ * us
+ */
+ if (!pim_if_connected_to_source(ifp, msg->msg_im_src)) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: incoming packet to %pSG from non-connected source",
+ ifp->name, &sg);
+ return 0;
+ }
+
+ if (!(PIM_I_am_DR(pim_ifp))) {
+ /* unlike the other debug messages, this one is further in the
+ * "normal operation" category and thus under _DETAIL
+ */
+ if (PIM_DEBUG_MROUTE_DETAIL)
+ zlog_debug(
+ "%s: not DR on interface, not forwarding traffic for %pSG",
+ ifp->name, &sg);
+
+ /*
+ * We are not the DR, but we are still receiving packets
+ * Let's blackhole those packets for the moment
+ * As that they will be coming up to the cpu
+ * and causing us to consider them.
+ *
+ * This *will* create a dangling channel_oil
+ * that I see no way to get rid of. Just noting
+ * this for future reference.
+ */
+ up = pim_upstream_find_or_add(
+ &sg, ifp, PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE, __func__);
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+
+ return 0;
+ }
+
+ up = pim_upstream_find_or_add(&sg, ifp, PIM_UPSTREAM_FLAG_MASK_FHR,
+ __func__);
+ if (up->channel_oil->installed) {
+ zlog_warn(
+ "%s: NOCACHE for %pSG, MFC entry disappeared - reinstalling",
+ ifp->name, &sg);
+ desync = true;
+ }
+
+ /*
+ * I moved this debug till after the actual add because
+ * I want to take advantage of the up->sg_str being filled in.
+ */
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug("%s: Adding a Route %s for WHOLEPKT consumption",
+ __func__, up->sg_str);
+ }
+
+ PIM_UPSTREAM_FLAG_SET_SRC_STREAM(up->flags);
+ pim_upstream_keep_alive_timer_start(up, pim_ifp->pim->keep_alive_time);
+
+ up->channel_oil->cc.pktcnt++;
+ // resolve mfcc_parent prior to mroute_add in channel_add_oif
+ if (up->rpf.source_nexthop.interface &&
+ *oil_incoming_vif(up->channel_oil) >= MAXVIFS) {
+ pim_upstream_mroute_iif_update(up->channel_oil, __func__);
+ }
+ pim_register_join(up);
+ /* if we have receiver, inherit from parent */
+ pim_upstream_inherited_olist_decide(pim_ifp->pim, up);
+
+ /* we just got NOCACHE from the kernel, so... MFC is not in the
+ * kernel for some reason or another. Try installing again.
+ */
+ if (desync)
+ pim_upstream_mroute_update(up->channel_oil, __func__);
+ return 0;
+}
+
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf,
+ size_t len)
+{
+ struct pim_interface *pim_ifp;
+ pim_sgaddr sg;
+ struct pim_rpf *rpg;
+ const ipv_hdr *ip_hdr;
+ struct pim_upstream *up;
+
+ pim_ifp = ifp->info;
+
+ ip_hdr = (const ipv_hdr *)buf;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = IPV_SRC(ip_hdr);
+ sg.grp = IPV_DST(ip_hdr);
+
+ up = pim_upstream_find(pim_ifp->pim, &sg);
+ if (!up) {
+ pim_sgaddr star = sg;
+ star.src = PIMADDR_ANY;
+
+ up = pim_upstream_find(pim_ifp->pim, &star);
+
+ if (up && PIM_UPSTREAM_FLAG_TEST_CAN_BE_LHR(up->flags)) {
+ up = pim_upstream_add(pim_ifp->pim, &sg, ifp,
+ PIM_UPSTREAM_FLAG_MASK_SRC_LHR,
+ __func__, NULL);
+ if (!up) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: Unable to create upstream information for %pSG",
+ __func__, &sg);
+ return 0;
+ }
+ pim_upstream_keep_alive_timer_start(
+ up, pim_ifp->pim->keep_alive_time);
+ pim_upstream_inherited_olist(pim_ifp->pim, up);
+ pim_upstream_update_join_desired(pim_ifp->pim, up);
+
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s: Creating %s upstream on LHR",
+ __func__, up->sg_str);
+ return 0;
+ }
+ if (PIM_DEBUG_MROUTE_DETAIL) {
+ zlog_debug(
+ "%s: Unable to find upstream channel WHOLEPKT%pSG",
+ __func__, &sg);
+ }
+ return 0;
+ }
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return 0;
+ }
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+
+ rpg = pim_ifp ? RP(pim_ifp->pim, sg.grp) : NULL;
+
+ if ((pim_rpf_addr_is_inaddr_any(rpg)) || (!pim_ifp) ||
+ (!(PIM_I_am_DR(pim_ifp)))) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug("%s: Failed Check send packet", __func__);
+ }
+ return 0;
+ }
+
+ /*
+ * If we've received a register suppress
+ */
+ if (!up->t_rs_timer) {
+ if (pim_is_grp_ssm(pim_ifp->pim, sg.grp)) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "%pSG register forward skipped as group is SSM",
+ &sg);
+ return 0;
+ }
+
+ if (!PIM_UPSTREAM_FLAG_TEST_FHR(up->flags)) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "%s register forward skipped, not FHR",
+ up->sg_str);
+ return 0;
+ }
+
+ pim_register_send((uint8_t *)buf + sizeof(ipv_hdr),
+ len - sizeof(ipv_hdr),
+ pim_ifp->primary_address, rpg, 0, up);
+ }
+ return 0;
+}
+
+int pim_mroute_msg_wrongvif(int fd, struct interface *ifp, const kernmsg *msg)
+{
+ struct pim_ifchannel *ch;
+ struct pim_interface *pim_ifp;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = msg->msg_im_src;
+ sg.grp = msg->msg_im_dst;
+
+ /*
+ Send Assert(S,G) on iif as response to WRONGVIF kernel upcall.
+
+ RFC 4601 4.8.2. PIM-SSM-Only Routers
+
+ iif is the incoming interface of the packet.
+ if (iif is in inherited_olist(S,G)) {
+ send Assert(S,G) on iif
+ }
+ */
+
+ if (!ifp) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG could not find input interface for input_vif_index=%d",
+ __func__, &sg, msg->msg_im_vif);
+ return -1;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG multicast not enabled on interface %s",
+ __func__, &sg, ifp->name);
+ return -2;
+ }
+
+ ch = pim_ifchannel_find(ifp, &sg);
+ if (!ch) {
+ pim_sgaddr star_g = sg;
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG could not find channel on interface %s",
+ __func__, &sg, ifp->name);
+
+ star_g.src = PIMADDR_ANY;
+ ch = pim_ifchannel_find(ifp, &star_g);
+ if (!ch) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s: WRONGVIF (*,G)=%pSG could not find channel on interface %s",
+ __func__, &star_g, ifp->name);
+ return -3;
+ }
+ }
+
+ /*
+ RFC 4601: 4.6.1. (S,G) Assert Message State Machine
+
+ Transitions from NoInfo State
+
+ An (S,G) data packet arrives on interface I, AND
+ CouldAssert(S,G,I)==TRUE An (S,G) data packet arrived on an
+ downstream interface that is in our (S,G) outgoing interface
+ list. We optimistically assume that we will be the assert
+ winner for this (S,G), and so we transition to the "I am Assert
+ Winner" state and perform Actions A1 (below), which will
+ initiate the assert negotiation for (S,G).
+ */
+
+ if (ch->ifassert_state != PIM_IFASSERT_NOINFO) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%s channel is not on Assert NoInfo state for interface %s",
+ __func__, ch->sg_str, ifp->name);
+ }
+ return -4;
+ }
+
+ if (!PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags)) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%s interface %s is not downstream for channel",
+ __func__, ch->sg_str, ifp->name);
+ }
+ return -5;
+ }
+
+ if (assert_action_a1(ch)) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%s assert_action_a1 failure on interface %s",
+ __func__, ch->sg_str, ifp->name);
+ }
+ return -6;
+ }
+
+ return 0;
+}
+
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
+ size_t len)
+{
+ const ipv_hdr *ip_hdr = (const ipv_hdr *)buf;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+ struct pim_ifchannel *ch;
+ struct pim_upstream *up;
+ pim_sgaddr star_g;
+ pim_sgaddr sg;
+
+ pim_ifp = ifp->info;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = IPV_SRC(ip_hdr);
+ sg.grp = IPV_DST(ip_hdr);
+
+ ch = pim_ifchannel_find(ifp, &sg);
+ if (ch) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "WRVIFWHOLE (S,G)=%s found ifchannel on interface %s",
+ ch->sg_str, ifp->name);
+ return -1;
+ }
+
+ star_g = sg;
+ star_g.src = PIMADDR_ANY;
+
+ pim = pim_ifp->pim;
+ /*
+ * If the incoming interface is the pimreg, then
+ * we know the callback is associated with a pim register
+ * packet and there is nothing to do here as that
+ * normal pim processing will see the packet and allow
+ * us to do the right thing.
+ */
+ if (ifp == pim->regiface) {
+ return 0;
+ }
+
+ up = pim_upstream_find(pim_ifp->pim, &sg);
+ if (up) {
+ struct pim_upstream *parent;
+ struct pim_nexthop source;
+ struct pim_rpf *rpf = RP(pim_ifp->pim, sg.grp);
+
+ /* No RPF or No RPF interface or No mcast on RPF interface */
+ if (!rpf || !rpf->source_nexthop.interface ||
+ !rpf->source_nexthop.interface->info)
+ return 0;
+
+ /*
+ * If we have received a WRVIFWHOLE and are at this
+ * point, we could be receiving the packet on the *,G
+ * tree, let's check and if so we can safely drop
+ * it.
+ */
+ parent = pim_upstream_find(pim_ifp->pim, &star_g);
+ if (parent && parent->rpf.source_nexthop.interface == ifp)
+ return 0;
+
+ pim_ifp = rpf->source_nexthop.interface->info;
+
+ memset(&source, 0, sizeof(source));
+ /*
+ * If we are the fhr that means we are getting a callback during
+ * the pimreg period, so I believe we can ignore this packet
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_FHR(up->flags)) {
+ /*
+ * No if channel, but upstream we are at the RP.
+ *
+ * This could be a anycast RP too and we may
+ * not have received a register packet from
+ * the source here at all. So gracefully
+ * bow out of doing a nexthop lookup and
+ * setting the SPTBIT to true
+ */
+ if (!(pim_addr_is_any(up->upstream_register)) &&
+ pim_nexthop_lookup(pim_ifp->pim, &source,
+ up->upstream_register, 0)) {
+ pim_register_stop_send(source.interface, &sg,
+ pim_ifp->primary_address,
+ up->upstream_register);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ }
+
+ pim_upstream_inherited_olist(pim_ifp->pim, up);
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil,
+ __func__);
+ } else {
+ if (I_am_RP(pim_ifp->pim, up->sg.grp)) {
+ if (pim_nexthop_lookup(pim_ifp->pim, &source,
+ up->upstream_register,
+ 0))
+ pim_register_stop_send(
+ source.interface, &sg,
+ pim_ifp->primary_address,
+ up->upstream_register);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ } else {
+ /*
+ * At this point pimd is connected to
+ * the source, it has a parent, we are not
+ * the RP and the SPTBIT should be set
+ * since we know *the* S,G is on the SPT.
+ * The first time this happens, let's cause
+ * an immediate join to go out so that
+ * the RP can trim this guy immediately
+ * if necessary, instead of waiting
+ * one join/prune send cycle
+ */
+ if (up->sptbit != PIM_UPSTREAM_SPTBIT_TRUE &&
+ up->parent &&
+ up->rpf.source_nexthop.interface !=
+ up->parent->rpf.source_nexthop
+ .interface) {
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ pim_jp_agg_single_upstream_send(
+ &up->parent->rpf, up->parent,
+ true);
+ }
+ }
+ pim_upstream_keep_alive_timer_start(
+ up, pim_ifp->pim->keep_alive_time);
+ pim_upstream_inherited_olist(pim_ifp->pim, up);
+ pim_mroute_msg_wholepkt(fd, ifp, buf, len);
+ }
+ return 0;
+ }
+
+ pim_ifp = ifp->info;
+ if (pim_if_connected_to_source(ifp, sg.src)) {
+ up = pim_upstream_add(pim_ifp->pim, &sg, ifp,
+ PIM_UPSTREAM_FLAG_MASK_FHR, __func__,
+ NULL);
+ if (!up) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%pSG: WRONGVIF%s unable to create upstream on interface",
+ &sg, ifp->name);
+ return -2;
+ }
+ PIM_UPSTREAM_FLAG_SET_SRC_STREAM(up->flags);
+ pim_upstream_keep_alive_timer_start(
+ up, pim_ifp->pim->keep_alive_time);
+ up->channel_oil->cc.pktcnt++;
+ pim_register_join(up);
+ pim_upstream_inherited_olist(pim_ifp->pim, up);
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+
+ // Send the packet to the RP
+ pim_mroute_msg_wholepkt(fd, ifp, buf, len);
+ } else {
+ up = pim_upstream_add(pim_ifp->pim, &sg, ifp,
+ PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE,
+ __func__, NULL);
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+ }
+
+ return 0;
+}
+
+#if PIM_IPV == 4
+static int process_igmp_packet(struct pim_instance *pim, const char *buf,
+ size_t buf_size, ifindex_t ifindex)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr ifaddr;
+ struct gm_sock *igmp;
+ const struct prefix *connected_src;
+ const struct ip *ip_hdr = (const struct ip *)buf;
+
+ /* We have the IP packet but we do not know which interface this
+ * packet was
+ * received on. Find the interface that is on the same subnet as
+ * the source
+ * of the IP packet.
+ */
+ ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
+
+ if (!ifp || !ifp->info)
+ return 0;
+
+ connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
+
+ if (!connected_src && !pim_addr_is_any(ip_hdr->ip_src)) {
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "Recv IGMP packet on interface: %s from a non-connected source: %pI4",
+ ifp->name, &ip_hdr->ip_src);
+ }
+ return 0;
+ }
+
+ pim_ifp = ifp->info;
+ ifaddr = connected_src ? connected_src->u.prefix4
+ : pim_ifp->primary_address;
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr);
+
+ if (PIM_DEBUG_GM_PACKETS) {
+ zlog_debug(
+ "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
+ __func__, pim->vrf->name, ifp->name, igmp,
+ &ip_hdr->ip_src, &ip_hdr->ip_dst);
+ }
+ if (igmp)
+ pim_igmp_packet(igmp, (char *)buf, buf_size);
+ else if (PIM_DEBUG_GM_PACKETS)
+ zlog_debug(
+ "No IGMP socket on interface: %s with connected source: %pI4",
+ ifp->name, &ifaddr);
+
+ return 0;
+}
+#endif
+
+int pim_mroute_msg(struct pim_instance *pim, const char *buf, size_t buf_size,
+ ifindex_t ifindex)
+{
+ struct interface *ifp;
+ const ipv_hdr *ip_hdr;
+ const kernmsg *msg;
+
+ if (buf_size < (int)sizeof(ipv_hdr))
+ return 0;
+
+ ip_hdr = (const ipv_hdr *)buf;
+
+#if PIM_IPV == 4
+ if (ip_hdr->ip_p == IPPROTO_IGMP) {
+ process_igmp_packet(pim, buf, buf_size, ifindex);
+ } else if (ip_hdr->ip_p) {
+ if (PIM_DEBUG_MROUTE_DETAIL) {
+ zlog_debug(
+ "%s: no kernel upcall proto=%d src: %pI4 dst: %pI4 msg_size=%ld",
+ __func__, ip_hdr->ip_p, &ip_hdr->ip_src,
+ &ip_hdr->ip_dst, (long int)buf_size);
+ }
+
+ } else {
+#else
+
+ if ((ip_hdr->ip6_vfc & 0xf) == 0) {
+#endif
+ msg = (const kernmsg *)buf;
+
+ ifp = pim_if_find_by_vif_index(pim, msg->msg_im_vif);
+
+ if (!ifp)
+ return 0;
+ if (PIM_DEBUG_MROUTE) {
+#if PIM_IPV == 4
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI4,%pI4) on %s vifi=%d size=%ld",
+ __func__, gmmsgtype2str[msg->msg_im_msgtype],
+ msg->msg_im_msgtype, ip_hdr->ip_p,
+ pim->mroute_socket, &msg->msg_im_src,
+ &msg->msg_im_dst, ifp->name, msg->msg_im_vif,
+ (long int)buf_size);
+#else
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI6,%pI6) on %s vifi=%d size=%ld",
+ __func__, gmmsgtype2str[msg->msg_im_msgtype],
+ msg->msg_im_msgtype, ip_hdr->ip6_nxt,
+ pim->mroute_socket, &msg->msg_im_src,
+ &msg->msg_im_dst, ifp->name, msg->msg_im_vif,
+ (long int)buf_size);
+#endif
+ }
+
+ switch (msg->msg_im_msgtype) {
+ case GMMSG_WRONGVIF:
+ return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
+ msg);
+ case GMMSG_NOCACHE:
+ return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
+ msg);
+ case GMMSG_WHOLEPKT:
+ return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
+ (const char *)msg,
+ buf_size);
+ case GMMSG_WRVIFWHOLE:
+ return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
+ ifp, (const char *)msg,
+ buf_size);
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void mroute_read(struct event *t)
+{
+ struct pim_instance *pim;
+ static long long count;
+ char buf[10000];
+ int cont = 1;
+ int rd;
+ ifindex_t ifindex;
+ pim = EVENT_ARG(t);
+
+ while (cont) {
+ rd = pim_socket_recvfromto(pim->mroute_socket, (uint8_t *)buf,
+ sizeof(buf), NULL, NULL, NULL, NULL,
+ &ifindex);
+ if (rd <= 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EWOULDBLOCK || errno == EAGAIN)
+ break;
+
+ zlog_warn(
+ "%s: failure reading rd=%d: fd=%d: errno=%d: %s",
+ __func__, rd, pim->mroute_socket, errno,
+ safe_strerror(errno));
+ goto done;
+ }
+
+ pim_mroute_msg(pim, buf, rd, ifindex);
+
+ count++;
+ if (count % router->packet_process == 0)
+ cont = 0;
+ }
+/* Keep reading */
+done:
+ mroute_read_on(pim);
+
+ return;
+}
+
+static void mroute_read_on(struct pim_instance *pim)
+{
+ event_add_read(router->master, mroute_read, pim, pim->mroute_socket,
+ &pim->thread);
+}
+
+static void mroute_read_off(struct pim_instance *pim)
+{
+ EVENT_OFF(pim->thread);
+}
+
+int pim_mroute_socket_enable(struct pim_instance *pim)
+{
+ int fd;
+
+ frr_with_privs(&pimd_privs) {
+
+#if PIM_IPV == 4
+ fd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
+#else
+ fd = socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6);
+#endif
+ if (fd < 0) {
+ zlog_warn("Could not create mroute socket: errno=%d: %s",
+ errno,
+ safe_strerror(errno));
+ return -2;
+ }
+
+#if PIM_IPV == 6
+ struct icmp6_filter filter[1];
+ int ret;
+
+ /* Unlike IPv4, this socket is not used for MLD, so just drop
+ * everything with an empty ICMP6 filter. Otherwise we get
+ * all kinds of garbage here, possibly even non-multicast
+ * related ICMPv6 traffic (e.g. ping)
+ *
+ * (mroute kernel upcall "packets" are injected directly on the
+ * socket, this sockopt -or any other- has no effect on them)
+ */
+ ICMP6_FILTER_SETBLOCKALL(filter);
+ ret = setsockopt(fd, SOL_ICMPV6, ICMP6_FILTER, filter,
+ sizeof(filter));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to set mroute control filter: %m",
+ pim->vrf->name);
+#endif
+
+#ifdef SO_BINDTODEVICE
+ if (pim->vrf->vrf_id != VRF_DEFAULT
+ && setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
+ pim->vrf->name, strlen(pim->vrf->name))) {
+ zlog_warn("Could not setsockopt SO_BINDTODEVICE: %s",
+ safe_strerror(errno));
+ close(fd);
+ return -3;
+ }
+#endif
+
+ }
+
+ pim->mroute_socket = fd;
+ if (pim_mroute_set(pim, 1)) {
+ zlog_warn(
+ "Could not enable mroute on socket fd=%d: errno=%d: %s",
+ fd, errno, safe_strerror(errno));
+ close(fd);
+ pim->mroute_socket = -1;
+ return -3;
+ }
+
+ pim->mroute_socket_creation = pim_time_monotonic_sec();
+
+ mroute_read_on(pim);
+
+ return 0;
+}
+
+int pim_mroute_socket_disable(struct pim_instance *pim)
+{
+ if (pim_mroute_set(pim, 0)) {
+ zlog_warn(
+ "Could not disable mroute on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno, safe_strerror(errno));
+ return -2;
+ }
+
+ if (close(pim->mroute_socket)) {
+ zlog_warn("Failure closing mroute socket: fd=%d errno=%d: %s",
+ pim->mroute_socket, errno, safe_strerror(errno));
+ return -3;
+ }
+
+ mroute_read_off(pim);
+ pim->mroute_socket = -1;
+
+ return 0;
+}
+
+/*
+ For each network interface (e.g., physical or a virtual tunnel) that
+ would be used for multicast forwarding, a corresponding multicast
+ interface must be added to the kernel.
+ */
+int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
+ unsigned char flags)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_vifctl vc;
+ int err;
+
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s: Add Vif %d (%s[%s])", __func__,
+ pim_ifp->mroute_vif_index, ifp->name,
+ pim_ifp->pim->vrf->name);
+
+ memset(&vc, 0, sizeof(vc));
+ vc.vc_vifi = pim_ifp->mroute_vif_index;
+#if PIM_IPV == 4
+#ifdef VIFF_USE_IFINDEX
+ vc.vc_lcl_ifindex = ifp->ifindex;
+#else
+ if (ifaddr.s_addr == INADDR_ANY) {
+ zlog_warn(
+ "%s: unnumbered interfaces are not supported on this platform",
+ __func__);
+ return -1;
+ }
+ memcpy(&vc.vc_lcl_addr, &ifaddr, sizeof(vc.vc_lcl_addr));
+#endif
+#else
+ vc.vc_pifi = ifp->ifindex;
+#endif
+ vc.vc_flags = flags;
+ vc.vc_threshold = PIM_MROUTE_MIN_TTL;
+ vc.vc_rate_limit = 0;
+
+#if PIM_IPV == 4
+#ifdef PIM_DVMRP_TUNNEL
+ if (vc.vc_flags & VIFF_TUNNEL) {
+ memcpy(&vc.vc_rmt_addr, &vif_remote_addr,
+ sizeof(vc.vc_rmt_addr));
+ }
+#endif
+#endif
+
+ err = setsockopt(pim_ifp->pim->mroute_socket, PIM_IPPROTO, MRT_ADD_VIF,
+ (void *)&vc, sizeof(vc));
+ if (err) {
+ zlog_warn(
+ "%s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_ADD_VIF,vif_index=%d,ifaddr=%pPAs,flag=%d): errno=%d: %s",
+ __func__, pim_ifp->pim->mroute_socket, ifp->ifindex,
+ &ifaddr, flags, errno, safe_strerror(errno));
+ return -2;
+ }
+
+ return 0;
+}
+
+int pim_mroute_del_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_vifctl vc;
+ int err;
+
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s: Del Vif %d (%s[%s])", __func__,
+ pim_ifp->mroute_vif_index, ifp->name,
+ pim_ifp->pim->vrf->name);
+
+ memset(&vc, 0, sizeof(vc));
+ vc.vc_vifi = pim_ifp->mroute_vif_index;
+
+ err = setsockopt(pim_ifp->pim->mroute_socket, PIM_IPPROTO, MRT_DEL_VIF,
+ (void *)&vc, sizeof(vc));
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_DEL_VIF,vif_index=%d): errno=%d: %s",
+ __FILE__, __func__, pim_ifp->pim->mroute_socket,
+ pim_ifp->mroute_vif_index, errno, safe_strerror(errno));
+ return -2;
+ }
+
+ return 0;
+}
+
+/*
+ * Prevent creating MFC entry with OIF=IIF.
+ *
+ * This is a protection against implementation mistakes.
+ *
+ * PIM protocol implicitely ensures loopfree multicast topology.
+ *
+ * IGMP must be protected against adding looped MFC entries created
+ * by both source and receiver attached to the same interface. See
+ * TODO T22.
+ * We shall allow igmp to create upstream when it is DR for the intf.
+ * Assume RP reachable via non DR.
+ */
+bool pim_mroute_allow_iif_in_oil(struct channel_oil *c_oil,
+ int oif_index)
+{
+#ifdef PIM_ENFORCE_LOOPFREE_MFC
+ struct interface *ifp_out;
+ struct pim_interface *pim_ifp;
+
+ if (c_oil->up &&
+ PIM_UPSTREAM_FLAG_TEST_ALLOW_IIF_IN_OIL(c_oil->up->flags))
+ return true;
+
+ ifp_out = pim_if_find_by_vif_index(c_oil->pim, oif_index);
+ if (!ifp_out)
+ return false;
+ pim_ifp = ifp_out->info;
+ if (!pim_ifp)
+ return false;
+ if ((c_oil->oif_flags[oif_index] & PIM_OIF_FLAG_PROTO_GM) &&
+ PIM_I_am_DR(pim_ifp))
+ return true;
+
+ return false;
+#else
+ return true;
+#endif
+}
+
+static inline void pim_mroute_copy(struct channel_oil *out,
+ struct channel_oil *in)
+{
+ int i;
+
+ *oil_origin(out) = *oil_origin(in);
+ *oil_mcastgrp(out) = *oil_mcastgrp(in);
+ *oil_incoming_vif(out) = *oil_incoming_vif(in);
+
+ for (i = 0; i < MAXVIFS; ++i) {
+ if (*oil_incoming_vif(out) == i &&
+ !pim_mroute_allow_iif_in_oil(in, i)) {
+ oil_if_set(out, i, 0);
+ continue;
+ }
+
+ if (in->oif_flags[i] & PIM_OIF_FLAG_MUTE)
+ oil_if_set(out, i, 0);
+ else
+ oil_if_set(out, i, oil_if_has(in, i));
+ }
+}
+
+/* This function must not be called directly 0
+ * use pim_upstream_mroute_add or pim_static_mroute_add instead
+ */
+static int pim_mroute_add(struct channel_oil *c_oil, const char *name)
+{
+ struct pim_instance *pim = c_oil->pim;
+ struct channel_oil tmp_oil[1] = { };
+ int err;
+
+ pim->mroute_add_last = pim_time_monotonic_sec();
+ ++pim->mroute_add_events;
+
+ /* Copy the oil to a temporary structure to fixup (without need to
+ * later restore) before sending the mroute add to the dataplane
+ */
+ pim_mroute_copy(tmp_oil, c_oil);
+
+ /* The linux kernel *expects* the incoming
+ * vif to be part of the outgoing list
+ * in the case of a (*,G).
+ */
+ if (pim_addr_is_any(*oil_origin(c_oil))) {
+ oil_if_set(tmp_oil, *oil_incoming_vif(c_oil), 1);
+ }
+
+ /*
+ * If we have an unresolved cache entry for the S,G
+ * it is owned by the pimreg for the incoming IIF
+ * So set pimreg as the IIF temporarily to cause
+ * the packets to be forwarded. Then set it
+ * to the correct IIF afterwords.
+ */
+ if (!c_oil->installed && !pim_addr_is_any(*oil_origin(c_oil)) &&
+ *oil_incoming_vif(c_oil) != 0) {
+ *oil_incoming_vif(tmp_oil) = 0;
+ }
+ /* For IPv6 MRT_ADD_MFC is defined to MRT6_ADD_MFC */
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_ADD_MFC,
+ &tmp_oil->oil, sizeof(tmp_oil->oil));
+
+ if (!err && !c_oil->installed && !pim_addr_is_any(*oil_origin(c_oil)) &&
+ *oil_incoming_vif(c_oil) != 0) {
+ *oil_incoming_vif(tmp_oil) = *oil_incoming_vif(c_oil);
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_ADD_MFC,
+ &tmp_oil->oil, sizeof(tmp_oil->oil));
+ }
+
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_ADD_MFC): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket, errno,
+ safe_strerror(errno));
+ return -2;
+ }
+
+ if (PIM_DEBUG_MROUTE) {
+ char buf[1000];
+ zlog_debug("%s(%s), vrf %s Added Route: %s", __func__, name,
+ pim->vrf->name,
+ pim_channel_oil_dump(c_oil, buf, sizeof(buf)));
+ }
+
+ if (!c_oil->installed) {
+ c_oil->installed = 1;
+ c_oil->mroute_creation = pim_time_monotonic_sec();
+ }
+
+ return 0;
+}
+
+static int pim_upstream_get_mroute_iif(struct channel_oil *c_oil,
+ const char *name)
+{
+ vifi_t iif = MAXVIFS;
+ struct interface *ifp = NULL;
+ struct pim_interface *pim_ifp;
+ struct pim_upstream *up = c_oil->up;
+
+ if (up) {
+ if (PIM_UPSTREAM_FLAG_TEST_USE_RPT(up->flags)) {
+ if (up->parent)
+ ifp = up->parent->rpf.source_nexthop.interface;
+ } else {
+ ifp = up->rpf.source_nexthop.interface;
+ }
+ if (ifp) {
+ pim_ifp = (struct pim_interface *)ifp->info;
+ if (pim_ifp)
+ iif = pim_ifp->mroute_vif_index;
+ }
+ }
+ return iif;
+}
+
+static int pim_upstream_mroute_update(struct channel_oil *c_oil,
+ const char *name)
+{
+ char buf[1000];
+
+ if (*oil_incoming_vif(c_oil) >= MAXVIFS) {
+ /* the c_oil cannot be installed as a mroute yet */
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s(%s) %s mroute not ready to be installed; %s",
+ __func__, name,
+ pim_channel_oil_dump(c_oil, buf,
+ sizeof(buf)),
+ c_oil->installed ?
+ "uninstall" : "skip");
+ /* if already installed flush it out as we are going to stop
+ * updates to it leaving it in a stale state
+ */
+ if (c_oil->installed)
+ pim_mroute_del(c_oil, name);
+ /* return success (skipped) */
+ return 0;
+ }
+
+ return pim_mroute_add(c_oil, name);
+}
+
+/* IIF associated with SGrpt entries are re-evaluated when the parent
+ * (*,G) entries IIF changes
+ */
+static void pim_upstream_all_sources_iif_update(struct pim_upstream *up)
+{
+ struct listnode *listnode;
+ struct pim_upstream *child;
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, listnode,
+ child)) {
+ if (PIM_UPSTREAM_FLAG_TEST_USE_RPT(child->flags))
+ pim_upstream_mroute_iif_update(child->channel_oil,
+ __func__);
+ }
+}
+
+/* In the case of "PIM state machine" added mroutes an upstream entry
+ * must be present to decide on the SPT-forwarding vs. RPT-forwarding.
+ */
+int pim_upstream_mroute_add(struct channel_oil *c_oil, const char *name)
+{
+ vifi_t iif;
+
+ iif = pim_upstream_get_mroute_iif(c_oil, name);
+
+ if (*oil_incoming_vif(c_oil) != iif) {
+ *oil_incoming_vif(c_oil) = iif;
+ if (pim_addr_is_any(*oil_origin(c_oil)) &&
+ c_oil->up)
+ pim_upstream_all_sources_iif_update(c_oil->up);
+ } else {
+ *oil_incoming_vif(c_oil) = iif;
+ }
+
+ return pim_upstream_mroute_update(c_oil, name);
+}
+
+/* Look for IIF changes and update the dateplane entry only if the IIF
+ * has changed.
+ */
+int pim_upstream_mroute_iif_update(struct channel_oil *c_oil, const char *name)
+{
+ vifi_t iif;
+ char buf[1000];
+
+ iif = pim_upstream_get_mroute_iif(c_oil, name);
+ if (*oil_incoming_vif(c_oil) == iif) {
+ /* no change */
+ return 0;
+ }
+ *oil_incoming_vif(c_oil) = iif;
+
+ if (pim_addr_is_any(*oil_origin(c_oil)) &&
+ c_oil->up)
+ pim_upstream_all_sources_iif_update(c_oil->up);
+
+ if (PIM_DEBUG_MROUTE_DETAIL)
+ zlog_debug("%s(%s) %s mroute iif update %d",
+ __func__, name,
+ pim_channel_oil_dump(c_oil, buf,
+ sizeof(buf)), iif);
+ /* XXX: is this hack needed? */
+ c_oil->oil_inherited_rescan = 1;
+ return pim_upstream_mroute_update(c_oil, name);
+}
+
+int pim_static_mroute_add(struct channel_oil *c_oil, const char *name)
+{
+ return pim_mroute_add(c_oil, name);
+}
+
+void pim_static_mroute_iif_update(struct channel_oil *c_oil,
+ int input_vif_index,
+ const char *name)
+{
+ if (*oil_incoming_vif(c_oil) == input_vif_index)
+ return;
+
+ *oil_incoming_vif(c_oil) = input_vif_index;
+ if (input_vif_index == MAXVIFS)
+ pim_mroute_del(c_oil, name);
+ else
+ pim_static_mroute_add(c_oil, name);
+}
+
+int pim_mroute_del(struct channel_oil *c_oil, const char *name)
+{
+ struct pim_instance *pim = c_oil->pim;
+ int err;
+
+ pim->mroute_del_last = pim_time_monotonic_sec();
+ ++pim->mroute_del_events;
+
+ if (!c_oil->installed) {
+ if (PIM_DEBUG_MROUTE) {
+ char buf[1000];
+ struct interface *iifp =
+ pim_if_find_by_vif_index(pim, *oil_incoming_vif(
+ c_oil));
+
+ zlog_debug("%s %s: incoming interface %s for route is %s not installed, do not need to send del req. ",
+ __FILE__, __func__,
+ iifp ? iifp->name : "Unknown",
+ pim_channel_oil_dump(c_oil, buf,
+ sizeof(buf)));
+ }
+ return -2;
+ }
+
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_DEL_MFC,
+ &c_oil->oil, sizeof(c_oil->oil));
+ if (err) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_DEL_MFC): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket, errno,
+ safe_strerror(errno));
+ return -2;
+ }
+
+ if (PIM_DEBUG_MROUTE) {
+ char buf[1000];
+ zlog_debug("%s(%s), vrf %s Deleted Route: %s", __func__, name,
+ pim->vrf->name,
+ pim_channel_oil_dump(c_oil, buf, sizeof(buf)));
+ }
+
+ // Reset kernel installed flag
+ c_oil->installed = 0;
+
+ return 0;
+}
+
+void pim_mroute_update_counters(struct channel_oil *c_oil)
+{
+ struct pim_instance *pim = c_oil->pim;
+ pim_sioc_sg_req sgreq;
+
+ c_oil->cc.oldpktcnt = c_oil->cc.pktcnt;
+ c_oil->cc.oldbytecnt = c_oil->cc.bytecnt;
+ c_oil->cc.oldwrong_if = c_oil->cc.wrong_if;
+
+ if (!c_oil->installed) {
+ c_oil->cc.lastused = 100 * pim->keep_alive_time;
+ if (PIM_DEBUG_MROUTE) {
+ pim_sgaddr sg;
+
+ sg.src = *oil_origin(c_oil);
+ sg.grp = *oil_mcastgrp(c_oil);
+ zlog_debug("Channel%pSG is not installed no need to collect data from kernel",
+ &sg);
+ }
+ return;
+ }
+
+
+ memset(&sgreq, 0, sizeof(sgreq));
+
+ pim_zlookup_sg_statistics(c_oil);
+
+#if PIM_IPV == 4
+ sgreq.src = *oil_origin(c_oil);
+ sgreq.grp = *oil_mcastgrp(c_oil);
+#else
+ sgreq.src = c_oil->oil.mf6cc_origin;
+ sgreq.grp = c_oil->oil.mf6cc_mcastgrp;
+#endif
+ if (ioctl(pim->mroute_socket, PIM_SIOCGETSGCNT, &sgreq)) {
+ pim_sgaddr sg;
+
+ sg.src = *oil_origin(c_oil);
+ sg.grp = *oil_mcastgrp(c_oil);
+
+ zlog_warn(
+ "ioctl(PIM_SIOCGETSGCNT=%lu) failure for (S,G)=%pSG: errno=%d: %s",
+ (unsigned long)PIM_SIOCGETSGCNT, &sg, errno,
+ safe_strerror(errno));
+ return;
+ }
+
+ c_oil->cc.pktcnt = sgreq.pktcnt;
+ c_oil->cc.bytecnt = sgreq.bytecnt;
+ c_oil->cc.wrong_if = sgreq.wrong_if;
+ return;
+}
diff --git a/pimd/pim_mroute.h b/pimd/pim_mroute.h
new file mode 100644
index 0000000..8706f42
--- /dev/null
+++ b/pimd/pim_mroute.h
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_MROUTE_H
+#define PIM_MROUTE_H
+
+/*
+ For msghdr.msg_control in Solaris 10
+*/
+#ifndef _XPG4_2
+#define _XPG4_2
+#endif
+#ifndef __EXTENSIONS__
+#define __EXTENSIONS__
+#endif
+
+
+#define PIM_MROUTE_MIN_TTL (1)
+
+#if PIM_IPV == 4
+
+#include <netinet/in.h>
+#if defined(HAVE_LINUX_MROUTE_H)
+#include <linux/mroute.h>
+#else
+#include "linux/mroute.h"
+#endif
+
+typedef struct vifctl pim_vifctl;
+typedef struct igmpmsg kernmsg;
+typedef struct sioc_sg_req pim_sioc_sg_req;
+
+#define vc_vifi vifc_vifi
+#define vc_flags vifc_flags
+#define vc_threshold vifc_threshold
+#define vc_rate_limit vifc_rate_limit
+#define vc_lcl_addr vifc_lcl_addr
+#define vc_lcl_ifindex vifc_lcl_ifindex
+#define vc_rmt_addr vifc_rmt_addr
+
+#define msg_im_msgtype im_msgtype
+#define msg_im_vif im_vif
+#define msg_im_src im_src
+#define msg_im_dst im_dst
+
+#ifndef IGMPMSG_WRVIFWHOLE
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM processing */
+#endif
+
+#ifndef GMMSG_NOCACHE
+#define GMMSG_NOCACHE IGMPMSG_NOCACHE /* For PIM processing */
+#define GMMSG_WHOLEPKT IGMPMSG_WHOLEPKT /* For PIM processing */
+#define GMMSG_WRONGVIF IGMPMSG_WRONGVIF /* For PIM processing */
+#define GMMSG_WRVIFWHOLE IGMPMSG_WRVIFWHOLE /* For PIM processing */
+#endif
+
+#ifndef PIM_IPPROTO
+#define PIM_IPPROTO IPPROTO_IP
+#endif
+#ifndef PIM_SIOCGETSGCNT
+#define PIM_SIOCGETSGCNT SIOCGETSGCNT
+#endif
+
+#else /* PIM_IPV != 4 */
+
+#include <netinet/ip6.h>
+
+#if defined(HAVE_LINUX_MROUTE6_H)
+#include <linux/mroute6.h>
+#else
+#include "linux/mroute6.h"
+#endif
+
+#ifndef MRT_INIT
+#define MRT_BASE MRT6_BASE
+#define MRT_INIT MRT6_INIT
+#define MRT_DONE MRT6_DONE
+#define MRT_ADD_VIF MRT6_ADD_MIF
+#define MRT_DEL_VIF MRT6_DEL_MIF
+#define MRT_ADD_MFC MRT6_ADD_MFC
+#define MRT_DEL_MFC MRT6_DEL_MFC
+#define MRT_VERSION MRT6_VERSION
+#define MRT_ASSERT MRT6_ASSERT
+#define MRT_PIM MRT6_PIM
+#define MRT_TABLE MRT6_TABLE
+#endif
+
+#ifndef PIM_IPPROTO
+#define PIM_IPPROTO IPPROTO_IPV6
+#endif
+
+#ifndef PIM_SIOCGETSGCNT
+#define PIM_SIOCGETSGCNT SIOCGETSGCNT_IN6
+#endif
+
+#ifndef MRT6MSG_WRMIFWHOLE
+#define MRT6MSG_WRMIFWHOLE 4 /* For PIM processing */
+#endif
+
+#ifndef GMMSG_NOCACHE
+#define GMMSG_NOCACHE MRT6MSG_NOCACHE /* For PIM processing */
+#define GMMSG_WHOLEPKT MRT6MSG_WHOLEPKT /* For PIM processing */
+#define GMMSG_WRONGVIF MRT6MSG_WRONGMIF /* For PIM processing */
+#define GMMSG_WRVIFWHOLE MRT6MSG_WRMIFWHOLE /* For PIM processing */
+#endif
+
+typedef struct mif6ctl pim_vifctl;
+typedef struct mrt6msg kernmsg;
+typedef mifi_t vifi_t;
+typedef struct sioc_sg_req6 pim_sioc_sg_req;
+
+#define vc_vifi mif6c_mifi
+#define vc_flags mif6c_flags
+#define vc_threshold vifc_threshold
+#define vc_pifi mif6c_pifi
+#define vc_rate_limit vifc_rate_limit
+
+#define msg_im_msgtype im6_msgtype
+#define msg_im_vif im6_mif
+#define msg_im_src im6_src
+#define msg_im_dst im6_dst
+
+#ifndef MAXVIFS
+#define MAXVIFS IF_SETSIZE
+#endif
+
+#define VIFF_REGISTER MIFF_REGISTER
+#endif
+
+
+/*
+ Above: from <linux/mroute.h>
+*/
+
+struct channel_oil;
+struct pim_instance;
+
+int pim_mroute_socket_enable(struct pim_instance *pim);
+int pim_mroute_socket_disable(struct pim_instance *pim);
+
+int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
+ unsigned char flags);
+int pim_mroute_del_vif(struct interface *ifp);
+
+int pim_upstream_mroute_add(struct channel_oil *c_oil, const char *name);
+int pim_upstream_mroute_iif_update(struct channel_oil *c_oil, const char *name);
+int pim_static_mroute_add(struct channel_oil *c_oil, const char *name);
+void pim_static_mroute_iif_update(struct channel_oil *c_oil,
+ int input_vif_index,
+ const char *name);
+int pim_mroute_del(struct channel_oil *c_oil, const char *name);
+
+void pim_mroute_update_counters(struct channel_oil *c_oil);
+bool pim_mroute_allow_iif_in_oil(struct channel_oil *c_oil,
+ int oif_index);
+int pim_mroute_msg(struct pim_instance *pim, const char *buf, size_t buf_size,
+ ifindex_t ifindex);
+int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg);
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf,
+ size_t len);
+int pim_mroute_msg_wrongvif(int fd, struct interface *ifp, const kernmsg *msg);
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
+ size_t len);
+int pim_mroute_set(struct pim_instance *pim, int enable);
+#endif /* PIM_MROUTE_H */
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
new file mode 100644
index 0000000..623c14b
--- /dev/null
+++ b/pimd/pim_msdp.c
@@ -0,0 +1,1448 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP for Quagga
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+
+#include <zebra.h>
+
+#include <lib/hash.h>
+#include <lib/jhash.h>
+#include <lib/log.h>
+#include <lib/prefix.h>
+#include <lib/sockunion.h>
+#include <lib/stream.h>
+#include <frrevent.h>
+#include <lib/vty.h>
+#include <lib/plist.h>
+#include <lib/lib_errors.h>
+
+#include "pimd.h"
+#include "pim_memory.h"
+#include "pim_instance.h"
+#include "pim_iface.h"
+#include "pim_rp.h"
+#include "pim_str.h"
+#include "pim_time.h"
+#include "pim_upstream.h"
+#include "pim_oil.h"
+
+#include "pim_msdp.h"
+#include "pim_msdp_packet.h"
+#include "pim_msdp_socket.h"
+
+// struct pim_msdp pim_msdp, *msdp = &pim_msdp;
+
+static void pim_msdp_peer_listen(struct pim_msdp_peer *mp);
+static void pim_msdp_peer_cr_timer_setup(struct pim_msdp_peer *mp, bool start);
+static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start);
+static void pim_msdp_peer_hold_timer_setup(struct pim_msdp_peer *mp,
+ bool start);
+static void pim_msdp_peer_free(struct pim_msdp_peer *mp);
+static void pim_msdp_enable(struct pim_instance *pim);
+static void pim_msdp_sa_adv_timer_setup(struct pim_instance *pim, bool start);
+static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
+ enum pim_msdp_sa_flags flags);
+static int pim_msdp_mg_mbr_comp(const void *p1, const void *p2);
+static void pim_msdp_mg_mbr_free(struct pim_msdp_mg_mbr *mbr);
+
+/************************ SA cache management ******************************/
+static void pim_msdp_sa_timer_expiry_log(struct pim_msdp_sa *sa,
+ const char *timer_str)
+{
+ zlog_debug("MSDP SA %s %s timer expired", sa->sg_str, timer_str);
+}
+
+/* RFC-3618:Sec-5.1 - global active source advertisement timer */
+static void pim_msdp_sa_adv_timer_cb(struct event *t)
+{
+ struct pim_instance *pim = EVENT_ARG(t);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA advertisement timer expired");
+ }
+
+ pim_msdp_sa_adv_timer_setup(pim, true /* start */);
+ pim_msdp_pkt_sa_tx(pim);
+}
+
+static void pim_msdp_sa_adv_timer_setup(struct pim_instance *pim, bool start)
+{
+ EVENT_OFF(pim->msdp.sa_adv_timer);
+ if (start) {
+ event_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb, pim,
+ PIM_MSDP_SA_ADVERTISMENT_TIME,
+ &pim->msdp.sa_adv_timer);
+ }
+}
+
+/* RFC-3618:Sec-5.3 - SA cache state timer */
+static void pim_msdp_sa_state_timer_cb(struct event *t)
+{
+ struct pim_msdp_sa *sa;
+
+ sa = EVENT_ARG(t);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_sa_timer_expiry_log(sa, "state");
+ }
+
+ pim_msdp_sa_deref(sa, PIM_MSDP_SAF_PEER);
+}
+
+static void pim_msdp_sa_state_timer_setup(struct pim_msdp_sa *sa, bool start)
+{
+ EVENT_OFF(sa->sa_state_timer);
+ if (start) {
+ event_add_timer(sa->pim->msdp.master,
+ pim_msdp_sa_state_timer_cb, sa,
+ PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer);
+ }
+}
+
+static void pim_msdp_sa_upstream_del(struct pim_msdp_sa *sa)
+{
+ struct pim_upstream *up = sa->up;
+ if (!up) {
+ return;
+ }
+
+ sa->up = NULL;
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags)) {
+ PIM_UPSTREAM_FLAG_UNSET_SRC_MSDP(up->flags);
+ sa->flags |= PIM_MSDP_SAF_UP_DEL_IN_PROG;
+ up = pim_upstream_del(sa->pim, up, __func__);
+ /* re-eval joinDesired; clearing peer-msdp-sa flag can
+ * cause JD to change
+ */
+ if (up)
+ pim_upstream_update_join_desired(sa->pim, up);
+ sa->flags &= ~PIM_MSDP_SAF_UP_DEL_IN_PROG;
+ }
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s de-referenced SPT", sa->sg_str);
+ }
+}
+
+static bool pim_msdp_sa_upstream_add_ok(struct pim_msdp_sa *sa,
+ struct pim_upstream *xg_up)
+{
+ if (!(sa->flags & PIM_MSDP_SAF_PEER)) {
+ /* SA should have been rxed from a peer */
+ return false;
+ }
+ /* check if we are RP */
+ if (!I_am_RP(sa->pim, sa->sg.grp)) {
+ return false;
+ }
+
+ /* check if we have a (*, G) with a non-empty immediate OIL */
+ if (!xg_up) {
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.grp = sa->sg.grp;
+
+ xg_up = pim_upstream_find(sa->pim, &sg);
+ }
+ if (!xg_up || (xg_up->join_state != PIM_UPSTREAM_JOINED)) {
+ /* join desired will be true for such (*, G) entries so we will
+ * just look at join_state and let the PIM state machine do the
+ * rest of
+ * the magic */
+ return false;
+ }
+
+ return true;
+}
+
+/* Upstream add evaluation needs to happen everytime -
+ * 1. Peer reference is added or removed.
+ * 2. The RP for a group changes.
+ * 3. joinDesired for the associated (*, G) changes
+ * 4. associated (*, G) is removed - this seems like a bit redundant
+ * (considering #4); but just in case an entry gets nuked without
+ * upstream state transition
+ * */
+static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa,
+ struct pim_upstream *xg_up,
+ const char *ctx)
+{
+ struct pim_upstream *up;
+
+ if (!pim_msdp_sa_upstream_add_ok(sa, xg_up)) {
+ pim_msdp_sa_upstream_del(sa);
+ return;
+ }
+
+ if (sa->up) {
+ /* nothing to do */
+ return;
+ }
+
+ up = pim_upstream_find(sa->pim, &sa->sg);
+ if (up && (PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags))) {
+ /* somehow we lost track of the upstream ptr? best log it */
+ sa->up = up;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s SPT reference missing",
+ sa->sg_str);
+ }
+ return;
+ }
+
+ /* RFC3618: "RP triggers a (S, G) join event towards the data source
+ * as if a JP message was rxed addressed to the RP itself." */
+ up = pim_upstream_add(sa->pim, &sa->sg, NULL /* iif */,
+ PIM_UPSTREAM_FLAG_MASK_SRC_MSDP, __func__, NULL);
+
+ sa->up = up;
+ if (up) {
+ /* update inherited oil */
+ pim_upstream_inherited_olist(sa->pim, up);
+ /* should we also start the kat in parallel? we will need it
+ * when the
+ * SA ages out */
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s referenced SPT", sa->sg_str);
+ }
+ } else {
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s SPT reference failed",
+ sa->sg_str);
+ }
+ }
+}
+
+/* release all mem associated with a sa */
+static void pim_msdp_sa_free(struct pim_msdp_sa *sa)
+{
+ pim_msdp_sa_state_timer_setup(sa, false);
+
+ XFREE(MTYPE_PIM_MSDP_SA, sa);
+}
+
+static struct pim_msdp_sa *pim_msdp_sa_new(struct pim_instance *pim,
+ pim_sgaddr *sg, struct in_addr rp)
+{
+ struct pim_msdp_sa *sa;
+
+ sa = XCALLOC(MTYPE_PIM_MSDP_SA, sizeof(*sa));
+
+ sa->pim = pim;
+ sa->sg = *sg;
+ snprintfrr(sa->sg_str, sizeof(sa->sg_str), "%pSG", sg);
+ sa->rp = rp;
+ sa->uptime = pim_time_monotonic_sec();
+
+ /* insert into misc tables for easy access */
+ sa = hash_get(pim->msdp.sa_hash, sa, hash_alloc_intern);
+ listnode_add_sort(pim->msdp.sa_list, sa);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s created", sa->sg_str);
+ }
+
+ return sa;
+}
+
+static struct pim_msdp_sa *pim_msdp_sa_find(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+ struct pim_msdp_sa lookup;
+
+ lookup.sg = *sg;
+ return hash_lookup(pim->msdp.sa_hash, &lookup);
+}
+
+static struct pim_msdp_sa *pim_msdp_sa_add(struct pim_instance *pim,
+ pim_sgaddr *sg, struct in_addr rp)
+{
+ struct pim_msdp_sa *sa;
+
+ sa = pim_msdp_sa_find(pim, sg);
+ if (sa) {
+ return sa;
+ }
+
+ return pim_msdp_sa_new(pim, sg, rp);
+}
+
+static void pim_msdp_sa_del(struct pim_msdp_sa *sa)
+{
+ /* this is somewhat redundant - still want to be careful not to leave
+ * stale upstream references */
+ pim_msdp_sa_upstream_del(sa);
+
+ /* stop timers */
+ pim_msdp_sa_state_timer_setup(sa, false /* start */);
+
+ /* remove the entry from various tables */
+ listnode_delete(sa->pim->msdp.sa_list, sa);
+ hash_release(sa->pim->msdp.sa_hash, sa);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s deleted", sa->sg_str);
+ }
+
+ /* free up any associated memory */
+ pim_msdp_sa_free(sa);
+}
+
+static void pim_msdp_sa_peer_ip_set(struct pim_msdp_sa *sa,
+ struct pim_msdp_peer *mp, struct in_addr rp)
+{
+ struct pim_msdp_peer *old_mp;
+
+ /* optimize the "no change" case as it will happen
+ * frequently/periodically */
+ if (mp && (sa->peer.s_addr == mp->peer.s_addr)) {
+ return;
+ }
+
+ /* any time the peer ip changes also update the rp address */
+ if (sa->peer.s_addr != INADDR_ANY) {
+ old_mp = pim_msdp_peer_find(sa->pim, sa->peer);
+ if (old_mp && old_mp->sa_cnt) {
+ --old_mp->sa_cnt;
+ }
+ }
+
+ if (mp) {
+ ++mp->sa_cnt;
+ sa->peer = mp->peer;
+ } else {
+ sa->peer.s_addr = PIM_NET_INADDR_ANY;
+ }
+ sa->rp = rp;
+}
+
+/* When a local active-source is removed there is no way to withdraw the
+ * source from peers. We will simply remove it from the SA cache so it will
+ * not be sent in supsequent SA updates. Peers will consequently timeout the
+ * SA.
+ * Similarly a "peer-added" SA is never explicitly deleted. It is simply
+ * aged out overtime if not seen in the SA updates from the peers.
+ * XXX: should we provide a knob to drop entries learnt from a peer when the
+ * peer goes down? */
+static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
+ enum pim_msdp_sa_flags flags)
+{
+ bool update_up = false;
+
+ if ((sa->flags & PIM_MSDP_SAF_LOCAL)) {
+ if (flags & PIM_MSDP_SAF_LOCAL) {
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s local reference removed",
+ sa->sg_str);
+ }
+ if (sa->pim->msdp.local_cnt)
+ --sa->pim->msdp.local_cnt;
+ }
+ }
+
+ if ((sa->flags & PIM_MSDP_SAF_PEER)) {
+ if (flags & PIM_MSDP_SAF_PEER) {
+ struct in_addr rp;
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s peer reference removed",
+ sa->sg_str);
+ }
+ pim_msdp_sa_state_timer_setup(sa, false /* start */);
+ rp.s_addr = INADDR_ANY;
+ pim_msdp_sa_peer_ip_set(sa, NULL /* mp */, rp);
+ /* if peer ref was removed we need to remove the msdp
+ * reference on the
+ * msdp entry */
+ update_up = true;
+ }
+ }
+
+ sa->flags &= ~flags;
+ if (update_up) {
+ pim_msdp_sa_upstream_update(sa, NULL /* xg_up */, "sa-deref");
+ }
+
+ if (!(sa->flags & PIM_MSDP_SAF_REF)) {
+ pim_msdp_sa_del(sa);
+ }
+}
+
+void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
+ pim_sgaddr *sg, struct in_addr rp)
+{
+ struct pim_msdp_sa *sa;
+ struct rp_info *rp_info;
+ struct prefix grp;
+
+ sa = pim_msdp_sa_add(pim, sg, rp);
+ if (!sa) {
+ return;
+ }
+
+ /* reference it */
+ if (mp) {
+ if (!(sa->flags & PIM_MSDP_SAF_PEER)) {
+ sa->flags |= PIM_MSDP_SAF_PEER;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s added by peer",
+ sa->sg_str);
+ }
+ }
+ pim_msdp_sa_peer_ip_set(sa, mp, rp);
+ /* start/re-start the state timer to prevent cache expiry */
+ pim_msdp_sa_state_timer_setup(sa, true /* start */);
+ /* We re-evaluate SA "SPT-trigger" everytime we hear abt it from
+ * a
+ * peer. XXX: If this becomes too much of a periodic overhead we
+ * can make it event based */
+ pim_msdp_sa_upstream_update(sa, NULL /* xg_up */, "peer-ref");
+ } else {
+ if (!(sa->flags & PIM_MSDP_SAF_LOCAL)) {
+ sa->flags |= PIM_MSDP_SAF_LOCAL;
+ ++sa->pim->msdp.local_cnt;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP SA %s added locally",
+ sa->sg_str);
+ }
+ /* send an immediate SA update to peers */
+ pim_addr_to_prefix(&grp, sa->sg.grp);
+ rp_info = pim_rp_find_match_group(pim, &grp);
+ if (rp_info) {
+ sa->rp = rp_info->rp.rpf_addr;
+ } else
+ {
+ sa->rp = pim->msdp.originator_id;
+ }
+ sa->rp = pim->msdp.originator_id;
+ pim_msdp_pkt_sa_tx_one(sa);
+ }
+ sa->flags &= ~PIM_MSDP_SAF_STALE;
+ }
+}
+
+/* The following criteria must be met to originate an SA from the MSDP
+ * speaker -
+ * 1. KAT must be running i.e. source is active.
+ * 2. We must be RP for the group.
+ * 3. Source must be registrable to the RP (this is where the RFC is vague
+ * and especially ambiguous in CLOS networks; with anycast RP all sources
+ * are potentially registrable to all RPs in the domain). We assume #3 is
+ * satisfied if -
+ * a. We are also the FHR-DR for the source (OR)
+ * b. We rxed a pim register (null or data encapsulated) within the last
+ * (3 * (1.5 * register_suppression_timer))).
+ */
+static bool pim_msdp_sa_local_add_ok(struct pim_upstream *up)
+{
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ if (!(pim->msdp.flags & PIM_MSDPF_ENABLE)) {
+ return false;
+ }
+
+ if (!pim_upstream_is_kat_running(up))
+ /* stream is not active */
+ return false;
+
+ if (!I_am_RP(pim, up->sg.grp)) {
+ /* we are not RP for the group */
+ return false;
+ }
+
+ /* we are the FHR-DR for this stream or we are RP and have seen
+ * registers
+ * from a FHR for this source */
+ if (PIM_UPSTREAM_FLAG_TEST_FHR(up->flags) || up->t_msdp_reg_timer) {
+ return true;
+ }
+
+ return false;
+}
+
+static void pim_msdp_sa_local_add(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct in_addr rp;
+ rp.s_addr = INADDR_ANY;
+ pim_msdp_sa_ref(pim, NULL /* mp */, sg, rp);
+}
+
+void pim_msdp_sa_local_del(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct pim_msdp_sa *sa;
+
+ sa = pim_msdp_sa_find(pim, sg);
+ if (sa) {
+ pim_msdp_sa_deref(sa, PIM_MSDP_SAF_LOCAL);
+ }
+}
+
+/* we need to be very cautious with this API as SA del too can trigger an
+ * upstream del and we will get stuck in a simple loop */
+static void pim_msdp_sa_local_del_on_up_del(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+ struct pim_msdp_sa *sa;
+
+ sa = pim_msdp_sa_find(pim, sg);
+ if (sa) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP local sa %s del on up del",
+ sa->sg_str);
+ }
+
+ /* if there is no local reference escape */
+ if (!(sa->flags & PIM_MSDP_SAF_LOCAL)) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP local sa %s del; no local ref",
+ sa->sg_str);
+ }
+ return;
+ }
+
+ if (sa->flags & PIM_MSDP_SAF_UP_DEL_IN_PROG) {
+ /* MSDP is the one that triggered the upstream del. if
+ * this happens
+ * we most certainly have a bug in the PIM upstream
+ * state machine. We
+ * will not have a local reference unless the KAT is
+ * running. And if the
+ * KAT is running there MUST be an additional
+ * source-stream reference to
+ * the flow. Accounting for such cases requires lot of
+ * changes; perhaps
+ * address this in the next release? - XXX */
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "MSDP sa %s SPT teardown is causing the local entry to be removed",
+ sa->sg_str);
+ return;
+ }
+
+ /* we are dropping the sa on upstream del we should not have an
+ * upstream reference */
+ if (sa->up) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP local sa %s del; up non-NULL",
+ sa->sg_str);
+ }
+ sa->up = NULL;
+ }
+ pim_msdp_sa_deref(sa, PIM_MSDP_SAF_LOCAL);
+ }
+}
+
+/* Local SA qualification needs to be re-evaluated when -
+ * 1. KAT is started or stopped
+ * 2. on RP changes
+ * 3. Whenever FHR status changes for a (S,G) - XXX - currently there
+ * is no clear path to transition an entry out of "MASK_FHR" need
+ * to discuss this with Donald. May result in some strangeness if the
+ * FHR is also the RP.
+ * 4. When msdp_reg timer is started or stopped
+ */
+void pim_msdp_sa_local_update(struct pim_upstream *up)
+{
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ if (pim_msdp_sa_local_add_ok(up)) {
+ pim_msdp_sa_local_add(pim, &up->sg);
+ } else {
+ pim_msdp_sa_local_del(pim, &up->sg);
+ }
+}
+
+static void pim_msdp_sa_local_setup(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up)
+ pim_msdp_sa_local_update(up);
+}
+
+/* whenever the RP changes we need to re-evaluate the "local" SA-cache */
+/* XXX: needs to be tested */
+void pim_msdp_i_am_rp_changed(struct pim_instance *pim)
+{
+ struct listnode *sanode;
+ struct listnode *nextnode;
+ struct pim_msdp_sa *sa;
+
+ if (!(pim->msdp.flags & PIM_MSDPF_ENABLE)) {
+ /* if the feature is not enabled do nothing */
+ return;
+ }
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP i_am_rp changed");
+ }
+
+ /* mark all local entries as stale */
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ if (sa->flags & PIM_MSDP_SAF_LOCAL) {
+ sa->flags |= PIM_MSDP_SAF_STALE;
+ }
+ }
+
+ /* re-setup local SA entries */
+ pim_msdp_sa_local_setup(pim);
+
+ for (ALL_LIST_ELEMENTS(pim->msdp.sa_list, sanode, nextnode, sa)) {
+ /* purge stale SA entries */
+ if (sa->flags & PIM_MSDP_SAF_STALE) {
+ /* clear the stale flag; the entry may be kept even
+ * after
+ * "local-deref" */
+ sa->flags &= ~PIM_MSDP_SAF_STALE;
+ /* sa_deref can end up freeing the sa; so don't access
+ * contents after */
+ pim_msdp_sa_deref(sa, PIM_MSDP_SAF_LOCAL);
+ } else {
+ /* if the souce is still active check if we can
+ * influence SPT */
+ pim_msdp_sa_upstream_update(sa, NULL /* xg_up */,
+ "rp-change");
+ }
+ }
+}
+
+/* We track the join state of (*, G) entries. If G has sources in the SA-cache
+ * we need to setup or teardown SPT when the JoinDesired status changes for
+ * (*, G) */
+void pim_msdp_up_join_state_changed(struct pim_instance *pim,
+ struct pim_upstream *xg_up)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP join state changed for %s", xg_up->sg_str);
+ }
+
+ /* If this is not really an XG entry just move on */
+ if (!pim_addr_is_any(xg_up->sg.src) || pim_addr_is_any(xg_up->sg.grp)) {
+ return;
+ }
+
+ /* XXX: Need to maintain SAs per-group to avoid all this unnecessary
+ * walking */
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ if (pim_addr_cmp(sa->sg.grp, xg_up->sg.grp)) {
+ continue;
+ }
+ pim_msdp_sa_upstream_update(sa, xg_up, "up-jp-change");
+ }
+}
+
+static void pim_msdp_up_xg_del(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP %pSG del", sg);
+ }
+
+ /* If this is not really an XG entry just move on */
+ if (!pim_addr_is_any(sg->src) || pim_addr_is_any(sg->grp)) {
+ return;
+ }
+
+ /* XXX: Need to maintain SAs per-group to avoid all this unnecessary
+ * walking */
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ if (pim_addr_cmp(sa->sg.grp, sg->grp)) {
+ continue;
+ }
+ pim_msdp_sa_upstream_update(sa, NULL /* xg */, "up-jp-change");
+ }
+}
+
+void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP up %pSG del", sg);
+ }
+ if (pim_addr_is_any(sg->src)) {
+ pim_msdp_up_xg_del(pim, sg);
+ } else {
+ pim_msdp_sa_local_del_on_up_del(pim, sg);
+ }
+}
+
+/* sa hash and peer list helpers */
+static unsigned int pim_msdp_sa_hash_key_make(const void *p)
+{
+ const struct pim_msdp_sa *sa = p;
+
+ return pim_sgaddr_hash(sa->sg, 0);
+}
+
+static bool pim_msdp_sa_hash_eq(const void *p1, const void *p2)
+{
+ const struct pim_msdp_sa *sa1 = p1;
+ const struct pim_msdp_sa *sa2 = p2;
+
+ return !pim_sgaddr_cmp(sa1->sg, sa2->sg);
+}
+
+static int pim_msdp_sa_comp(const void *p1, const void *p2)
+{
+ const struct pim_msdp_sa *sa1 = p1;
+ const struct pim_msdp_sa *sa2 = p2;
+
+ return pim_sgaddr_cmp(sa1->sg, sa2->sg);
+}
+
+/* RFC-3618:Sec-10.1.3 - Peer-RPF forwarding */
+/* XXX: this can use a bit of refining and extensions */
+bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp)
+{
+ struct pim_nexthop nexthop = {0};
+
+ if (mp->peer.s_addr == rp.s_addr) {
+ return true;
+ }
+
+ /* check if the MSDP peer is the nexthop for the RP */
+ if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) &&
+ nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) {
+ return true;
+ }
+
+ return false;
+}
+
+/************************ Peer session management **************************/
+char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf,
+ int buf_size)
+{
+ switch (state) {
+ case PIM_MSDP_DISABLED:
+ snprintf(buf, buf_size, "%s", "disabled");
+ break;
+ case PIM_MSDP_INACTIVE:
+ snprintf(buf, buf_size, "%s", "inactive");
+ break;
+ case PIM_MSDP_LISTEN:
+ snprintf(buf, buf_size, "%s", "listen");
+ break;
+ case PIM_MSDP_CONNECTING:
+ snprintf(buf, buf_size, "%s", "connecting");
+ break;
+ case PIM_MSDP_ESTABLISHED:
+ snprintf(buf, buf_size, "%s", "established");
+ break;
+ default:
+ snprintf(buf, buf_size, "unk-%d", state);
+ }
+ return buf;
+}
+
+static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp)
+{
+ char state_str[PIM_MSDP_STATE_STRLEN];
+
+ pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
+ zlog_debug("MSDP peer %s state chg to %s", mp->key_str, state_str);
+}
+
+/* MSDP Connection State Machine actions (defined in RFC-3618:Sec-11.2) */
+/* 11.2.A2: active peer - start connect retry timer; when the timer fires
+ * a tcp connection will be made */
+static void pim_msdp_peer_connect(struct pim_msdp_peer *mp)
+{
+ mp->state = PIM_MSDP_CONNECTING;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_state_chg_log(mp);
+ }
+
+ pim_msdp_peer_cr_timer_setup(mp, true /* start */);
+}
+
+/* 11.2.A3: passive peer - just listen for connections */
+static void pim_msdp_peer_listen(struct pim_msdp_peer *mp)
+{
+ mp->state = PIM_MSDP_LISTEN;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_state_chg_log(mp);
+ }
+
+ /* this is interntionally asymmetric i.e. we set up listen-socket when
+ * the
+ * first listening peer is configured; but don't bother tearing it down
+ * when
+ * all the peers go down */
+ pim_msdp_sock_listen(mp->pim);
+}
+
+/* 11.2.A4 and 11.2.A5: transition active or passive peer to
+ * established state */
+void pim_msdp_peer_established(struct pim_msdp_peer *mp)
+{
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ ++mp->est_flaps;
+ }
+
+ mp->state = PIM_MSDP_ESTABLISHED;
+ mp->uptime = pim_time_monotonic_sec();
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_state_chg_log(mp);
+ }
+
+ /* stop retry timer on active peers */
+ pim_msdp_peer_cr_timer_setup(mp, false /* start */);
+
+ /* send KA; start KA and hold timers */
+ pim_msdp_pkt_ka_tx(mp);
+ pim_msdp_peer_ka_timer_setup(mp, true /* start */);
+ pim_msdp_peer_hold_timer_setup(mp, true /* start */);
+
+ pim_msdp_pkt_sa_tx_to_one_peer(mp);
+
+ PIM_MSDP_PEER_WRITE_ON(mp);
+ PIM_MSDP_PEER_READ_ON(mp);
+}
+
+/* 11.2.A6, 11.2.A7 and 11.2.A8: shutdown the peer tcp connection */
+void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state)
+{
+ if (chg_state) {
+ if (mp->state == PIM_MSDP_ESTABLISHED) {
+ ++mp->est_flaps;
+ }
+ mp->state = PIM_MSDP_INACTIVE;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_state_chg_log(mp);
+ }
+ }
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_peer_stop_tcp_conn",
+ mp->key_str);
+ }
+ /* stop read and write threads */
+ PIM_MSDP_PEER_READ_OFF(mp);
+ PIM_MSDP_PEER_WRITE_OFF(mp);
+
+ /* reset buffers */
+ mp->packet_size = 0;
+ if (mp->ibuf)
+ stream_reset(mp->ibuf);
+ if (mp->obuf)
+ stream_fifo_clean(mp->obuf);
+
+ /* stop all peer timers */
+ pim_msdp_peer_ka_timer_setup(mp, false /* start */);
+ pim_msdp_peer_cr_timer_setup(mp, false /* start */);
+ pim_msdp_peer_hold_timer_setup(mp, false /* start */);
+
+ /* close connection */
+ if (mp->fd >= 0) {
+ close(mp->fd);
+ mp->fd = -1;
+ }
+}
+
+/* RFC-3618:Sec-5.6 - stop the peer tcp connection and startover */
+void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str)
+{
+ if (PIM_DEBUG_EVENTS) {
+ zlog_debug("MSDP peer %s tcp reset %s", mp->key_str, rc_str);
+ snprintf(mp->last_reset, sizeof(mp->last_reset), "%s", rc_str);
+ }
+
+ /* close the connection and transition to listening or connecting */
+ pim_msdp_peer_stop_tcp_conn(mp, true /* chg_state */);
+ if (PIM_MSDP_PEER_IS_LISTENER(mp)) {
+ pim_msdp_peer_listen(mp);
+ } else {
+ pim_msdp_peer_connect(mp);
+ }
+}
+
+static void pim_msdp_peer_timer_expiry_log(struct pim_msdp_peer *mp,
+ const char *timer_str)
+{
+ zlog_debug("MSDP peer %s %s timer expired", mp->key_str, timer_str);
+}
+
+/* RFC-3618:Sec-5.4 - peer hold timer */
+static void pim_msdp_peer_hold_timer_cb(struct event *t)
+{
+ struct pim_msdp_peer *mp;
+
+ mp = EVENT_ARG(t);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_timer_expiry_log(mp, "hold");
+ }
+
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ return;
+ }
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_state_chg_log(mp);
+ }
+ pim_msdp_peer_reset_tcp_conn(mp, "ht-expired");
+}
+
+static void pim_msdp_peer_hold_timer_setup(struct pim_msdp_peer *mp, bool start)
+{
+ struct pim_instance *pim = mp->pim;
+ EVENT_OFF(mp->hold_timer);
+ if (start) {
+ event_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
+ mp, pim->msdp.hold_time, &mp->hold_timer);
+ }
+}
+
+
+/* RFC-3618:Sec-5.5 - peer keepalive timer */
+static void pim_msdp_peer_ka_timer_cb(struct event *t)
+{
+ struct pim_msdp_peer *mp;
+
+ mp = EVENT_ARG(t);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_timer_expiry_log(mp, "ka");
+ }
+
+ pim_msdp_pkt_ka_tx(mp);
+ pim_msdp_peer_ka_timer_setup(mp, true /* start */);
+}
+
+static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start)
+{
+ EVENT_OFF(mp->ka_timer);
+ if (start) {
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_ka_timer_cb,
+ mp, mp->pim->msdp.keep_alive, &mp->ka_timer);
+ }
+}
+
+static void pim_msdp_peer_active_connect(struct pim_msdp_peer *mp)
+{
+ int rc;
+ ++mp->conn_attempts;
+ rc = pim_msdp_sock_connect(mp);
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_peer_active_connect: %d",
+ mp->key_str, rc);
+ }
+
+ switch (rc) {
+ case connect_error:
+ case -1:
+ /* connect failed restart the connect-retry timer */
+ pim_msdp_peer_cr_timer_setup(mp, true /* start */);
+ break;
+
+ case connect_success:
+ /* connect was sucessful move to established */
+ pim_msdp_peer_established(mp);
+ break;
+
+ case connect_in_progress:
+ /* for NB content we need to wait till sock is readable or
+ * writeable */
+ PIM_MSDP_PEER_WRITE_ON(mp);
+ PIM_MSDP_PEER_READ_ON(mp);
+ /* also restart connect-retry timer to reset the socket if
+ * connect is
+ * not sucessful */
+ pim_msdp_peer_cr_timer_setup(mp, true /* start */);
+ break;
+ }
+}
+
+/* RFC-3618:Sec-5.6 - connection retry on active peer */
+static void pim_msdp_peer_cr_timer_cb(struct event *t)
+{
+ struct pim_msdp_peer *mp;
+
+ mp = EVENT_ARG(t);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ pim_msdp_peer_timer_expiry_log(mp, "connect-retry");
+ }
+
+ if (mp->state != PIM_MSDP_CONNECTING || PIM_MSDP_PEER_IS_LISTENER(mp)) {
+ return;
+ }
+
+ pim_msdp_peer_active_connect(mp);
+}
+
+static void pim_msdp_peer_cr_timer_setup(struct pim_msdp_peer *mp, bool start)
+{
+ EVENT_OFF(mp->cr_timer);
+ if (start) {
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_cr_timer_cb,
+ mp, mp->pim->msdp.connection_retry,
+ &mp->cr_timer);
+ }
+}
+
+/* if a valid packet is rxed from the peer we can restart hold timer */
+void pim_msdp_peer_pkt_rxed(struct pim_msdp_peer *mp)
+{
+ if (mp->state == PIM_MSDP_ESTABLISHED) {
+ pim_msdp_peer_hold_timer_setup(mp, true /* start */);
+ }
+}
+
+/* if a valid packet is txed to the peer we can restart ka timer and avoid
+ * unnecessary ka noise in the network */
+void pim_msdp_peer_pkt_txed(struct pim_msdp_peer *mp)
+{
+ if (mp->state == PIM_MSDP_ESTABLISHED) {
+ pim_msdp_peer_ka_timer_setup(mp, true /* start */);
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP ka timer restart on pkt tx to %s",
+ mp->key_str);
+ }
+ }
+}
+
+static void pim_msdp_addr2su(union sockunion *su, struct in_addr addr)
+{
+ sockunion_init(su);
+ su->sin.sin_addr = addr;
+ su->sin.sin_family = AF_INET;
+#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ su->sin.sin_len = sizeof(struct sockaddr_in);
+#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+}
+
+/* 11.2.A1: create a new peer and transition state to listen or connecting */
+struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
+ const struct in_addr *peer,
+ const struct in_addr *local,
+ const char *mesh_group_name)
+{
+ struct pim_msdp_peer *mp;
+
+ pim_msdp_enable(pim);
+
+ mp = XCALLOC(MTYPE_PIM_MSDP_PEER, sizeof(*mp));
+
+ mp->pim = pim;
+ mp->peer = *peer;
+ pim_inet4_dump("<peer?>", mp->peer, mp->key_str, sizeof(mp->key_str));
+ pim_msdp_addr2su(&mp->su_peer, mp->peer);
+ mp->local = *local;
+ /* XXX: originator_id setting needs to move to the mesh group */
+ pim->msdp.originator_id = *local;
+ pim_msdp_addr2su(&mp->su_local, mp->local);
+ if (mesh_group_name)
+ mp->mesh_group_name =
+ XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
+
+ mp->state = PIM_MSDP_INACTIVE;
+ mp->fd = -1;
+ strlcpy(mp->last_reset, "-", sizeof(mp->last_reset));
+ /* higher IP address is listener */
+ if (ntohl(mp->local.s_addr) > ntohl(mp->peer.s_addr)) {
+ mp->flags |= PIM_MSDP_PEERF_LISTENER;
+ }
+
+ /* setup packet buffers */
+ mp->ibuf = stream_new(PIM_MSDP_MAX_PACKET_SIZE);
+ mp->obuf = stream_fifo_new();
+
+ /* insert into misc tables for easy access */
+ mp = hash_get(pim->msdp.peer_hash, mp, hash_alloc_intern);
+ listnode_add_sort(pim->msdp.peer_list, mp);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP peer %s created", mp->key_str);
+
+ pim_msdp_peer_state_chg_log(mp);
+ }
+
+ /* fireup the connect state machine */
+ if (PIM_MSDP_PEER_IS_LISTENER(mp)) {
+ pim_msdp_peer_listen(mp);
+ } else {
+ pim_msdp_peer_connect(mp);
+ }
+ return mp;
+}
+
+struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
+ struct in_addr peer_addr)
+{
+ struct pim_msdp_peer lookup;
+
+ lookup.peer = peer_addr;
+ return hash_lookup(pim->msdp.peer_hash, &lookup);
+}
+
+/* release all mem associated with a peer */
+static void pim_msdp_peer_free(struct pim_msdp_peer *mp)
+{
+ /*
+ * Let's make sure we are not running when we delete
+ * the underlying data structure
+ */
+ pim_msdp_peer_stop_tcp_conn(mp, false);
+
+ if (mp->ibuf) {
+ stream_free(mp->ibuf);
+ }
+
+ if (mp->obuf) {
+ stream_fifo_free(mp->obuf);
+ }
+
+ XFREE(MTYPE_PIM_MSDP_MG_NAME, mp->mesh_group_name);
+
+ mp->pim = NULL;
+ XFREE(MTYPE_PIM_MSDP_PEER, mp);
+}
+
+/* delete the peer config */
+void pim_msdp_peer_del(struct pim_msdp_peer **mp)
+{
+ if (*mp == NULL)
+ return;
+
+ /* stop the tcp connection and shutdown all timers */
+ pim_msdp_peer_stop_tcp_conn(*mp, true /* chg_state */);
+
+ /* remove the session from various tables */
+ listnode_delete((*mp)->pim->msdp.peer_list, *mp);
+ hash_release((*mp)->pim->msdp.peer_hash, *mp);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP peer %s deleted", (*mp)->key_str);
+ }
+
+ /* free up any associated memory */
+ pim_msdp_peer_free(*mp);
+ *mp = NULL;
+}
+
+void pim_msdp_peer_change_source(struct pim_msdp_peer *mp,
+ const struct in_addr *addr)
+{
+ pim_msdp_peer_stop_tcp_conn(mp, true);
+
+ mp->local = *addr;
+
+ if (PIM_MSDP_PEER_IS_LISTENER(mp))
+ pim_msdp_peer_listen(mp);
+ else
+ pim_msdp_peer_connect(mp);
+}
+
+/* peer hash and peer list helpers */
+static unsigned int pim_msdp_peer_hash_key_make(const void *p)
+{
+ const struct pim_msdp_peer *mp = p;
+ return (jhash_1word(mp->peer.s_addr, 0));
+}
+
+static bool pim_msdp_peer_hash_eq(const void *p1, const void *p2)
+{
+ const struct pim_msdp_peer *mp1 = p1;
+ const struct pim_msdp_peer *mp2 = p2;
+
+ return (mp1->peer.s_addr == mp2->peer.s_addr);
+}
+
+static int pim_msdp_peer_comp(const void *p1, const void *p2)
+{
+ const struct pim_msdp_peer *mp1 = p1;
+ const struct pim_msdp_peer *mp2 = p2;
+
+ if (ntohl(mp1->peer.s_addr) < ntohl(mp2->peer.s_addr))
+ return -1;
+
+ if (ntohl(mp1->peer.s_addr) > ntohl(mp2->peer.s_addr))
+ return 1;
+
+ return 0;
+}
+
+/************************** Mesh group management **************************/
+void pim_msdp_mg_free(struct pim_instance *pim, struct pim_msdp_mg **mgp)
+{
+ struct pim_msdp_mg_mbr *mbr;
+ struct listnode *n, *nn;
+
+ if (*mgp == NULL)
+ return;
+
+ /* SIP is being removed - tear down all active peer sessions */
+ for (ALL_LIST_ELEMENTS((*mgp)->mbr_list, n, nn, mbr))
+ pim_msdp_mg_mbr_del((*mgp), mbr);
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP mesh-group %s deleted",
+ (*mgp)->mesh_group_name);
+ }
+
+ XFREE(MTYPE_PIM_MSDP_MG_NAME, (*mgp)->mesh_group_name);
+
+ if ((*mgp)->mbr_list)
+ list_delete(&(*mgp)->mbr_list);
+
+ SLIST_REMOVE(&pim->msdp.mglist, (*mgp), pim_msdp_mg, mg_entry);
+ XFREE(MTYPE_PIM_MSDP_MG, (*mgp));
+}
+
+struct pim_msdp_mg *pim_msdp_mg_new(struct pim_instance *pim,
+ const char *mesh_group_name)
+{
+ struct pim_msdp_mg *mg;
+
+ mg = XCALLOC(MTYPE_PIM_MSDP_MG, sizeof(*mg));
+
+ mg->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
+ mg->mbr_list = list_new();
+ mg->mbr_list->del = (void (*)(void *))pim_msdp_mg_mbr_free;
+ mg->mbr_list->cmp = (int (*)(void *, void *))pim_msdp_mg_mbr_comp;
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP mesh-group %s created", mg->mesh_group_name);
+ }
+
+ SLIST_INSERT_HEAD(&pim->msdp.mglist, mg, mg_entry);
+
+ return mg;
+}
+
+static int pim_msdp_mg_mbr_comp(const void *p1, const void *p2)
+{
+ const struct pim_msdp_mg_mbr *mbr1 = p1;
+ const struct pim_msdp_mg_mbr *mbr2 = p2;
+
+ if (ntohl(mbr1->mbr_ip.s_addr) < ntohl(mbr2->mbr_ip.s_addr))
+ return -1;
+
+ if (ntohl(mbr1->mbr_ip.s_addr) > ntohl(mbr2->mbr_ip.s_addr))
+ return 1;
+
+ return 0;
+}
+
+static void pim_msdp_mg_mbr_free(struct pim_msdp_mg_mbr *mbr)
+{
+ XFREE(MTYPE_PIM_MSDP_MG_MBR, mbr);
+}
+
+void pim_msdp_mg_mbr_del(struct pim_msdp_mg *mg, struct pim_msdp_mg_mbr *mbr)
+{
+ /* Delete active peer session if any */
+ if (mbr->mp) {
+ pim_msdp_peer_del(&mbr->mp);
+ }
+
+ listnode_delete(mg->mbr_list, mbr);
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ char ip_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<mbr?>", mbr->mbr_ip, ip_str, sizeof(ip_str));
+ zlog_debug("MSDP mesh-group %s mbr %s deleted",
+ mg->mesh_group_name, ip_str);
+ }
+ pim_msdp_mg_mbr_free(mbr);
+ if (mg->mbr_cnt) {
+ --mg->mbr_cnt;
+ }
+}
+
+static void pim_msdp_src_del(struct pim_msdp_mg *mg)
+{
+ struct pim_msdp_mg_mbr *mbr;
+ struct listnode *mbr_node;
+
+ /* SIP is being removed - tear down all active peer sessions */
+ for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbr_node, mbr)) {
+ if (mbr->mp)
+ pim_msdp_peer_del(&mbr->mp);
+ }
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_debug("MSDP mesh-group %s src cleared",
+ mg->mesh_group_name);
+ }
+}
+
+/*********************** MSDP feature APIs *********************************/
+int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *spaces)
+{
+ struct pim_msdp_mg *mg;
+ struct listnode *mbrnode;
+ struct pim_msdp_mg_mbr *mbr;
+ char src_str[INET_ADDRSTRLEN];
+ int count = 0;
+
+ if (SLIST_EMPTY(&pim->msdp.mglist))
+ return count;
+
+ SLIST_FOREACH (mg, &pim->msdp.mglist, mg_entry) {
+ if (mg->src_ip.s_addr != INADDR_ANY) {
+ pim_inet4_dump("<src?>", mg->src_ip, src_str,
+ sizeof(src_str));
+ vty_out(vty, "%sip msdp mesh-group %s source %s\n",
+ spaces, mg->mesh_group_name, src_str);
+ ++count;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbrnode, mbr)) {
+ vty_out(vty, "%sip msdp mesh-group %s member %pI4\n",
+ spaces, mg->mesh_group_name, &mbr->mbr_ip);
+ ++count;
+ }
+ }
+
+ return count;
+}
+
+bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim,
+ const char *spaces)
+{
+ struct pim_msdp_peer *mp;
+ struct listnode *node;
+ bool written = false;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, mp)) {
+ /* Skip meshed group peers. */
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ continue;
+
+ vty_out(vty, "%sip msdp peer %pI4 source %pI4\n", spaces,
+ &mp->peer, &mp->local);
+ written = true;
+ }
+
+ return written;
+}
+
+/* Enable feature including active/periodic timers etc. on the first peer
+ * config. Till then MSDP should just stay quiet. */
+static void pim_msdp_enable(struct pim_instance *pim)
+{
+ if (pim->msdp.flags & PIM_MSDPF_ENABLE) {
+ /* feature is already enabled */
+ return;
+ }
+ pim->msdp.flags |= PIM_MSDPF_ENABLE;
+ pim->msdp.work_obuf = stream_new(PIM_MSDP_MAX_PACKET_SIZE);
+ pim_msdp_sa_adv_timer_setup(pim, true /* start */);
+ /* setup sa cache based on local sources */
+ pim_msdp_sa_local_setup(pim);
+}
+
+/* MSDP init */
+void pim_msdp_init(struct pim_instance *pim, struct event_loop *master)
+{
+ pim->msdp.master = master;
+ char hash_name[64];
+
+ snprintf(hash_name, sizeof(hash_name), "PIM %s MSDP Peer Hash",
+ pim->vrf->name);
+ pim->msdp.peer_hash = hash_create(pim_msdp_peer_hash_key_make,
+ pim_msdp_peer_hash_eq, hash_name);
+ pim->msdp.peer_list = list_new();
+ pim->msdp.peer_list->del = (void (*)(void *))pim_msdp_peer_free;
+ pim->msdp.peer_list->cmp = (int (*)(void *, void *))pim_msdp_peer_comp;
+
+ snprintf(hash_name, sizeof(hash_name), "PIM %s MSDP SA Hash",
+ pim->vrf->name);
+ pim->msdp.sa_hash = hash_create(pim_msdp_sa_hash_key_make,
+ pim_msdp_sa_hash_eq, hash_name);
+ pim->msdp.sa_list = list_new();
+ pim->msdp.sa_list->del = (void (*)(void *))pim_msdp_sa_free;
+ pim->msdp.sa_list->cmp = (int (*)(void *, void *))pim_msdp_sa_comp;
+}
+
+/* counterpart to MSDP init; XXX: unused currently */
+void pim_msdp_exit(struct pim_instance *pim)
+{
+ struct pim_msdp_mg *mg;
+
+ pim_msdp_sa_adv_timer_setup(pim, false);
+
+ /* Stop listener and delete all peer sessions */
+ while ((mg = SLIST_FIRST(&pim->msdp.mglist)) != NULL)
+ pim_msdp_mg_free(pim, &mg);
+
+ hash_clean_and_free(&pim->msdp.peer_hash, NULL);
+
+ if (pim->msdp.peer_list) {
+ list_delete(&pim->msdp.peer_list);
+ }
+
+ hash_clean_and_free(&pim->msdp.sa_hash, NULL);
+
+ if (pim->msdp.sa_list) {
+ list_delete(&pim->msdp.sa_list);
+ }
+
+ if (pim->msdp.work_obuf)
+ stream_free(pim->msdp.work_obuf);
+ pim->msdp.work_obuf = NULL;
+}
+
+void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg,
+ struct in_addr *ai)
+{
+ struct pim_msdp_mg_mbr *mbr;
+ struct listnode *mbr_node;
+
+ /* Stop all connections and remove data structures. */
+ pim_msdp_src_del(mg);
+
+ /* Set new address. */
+ mg->src_ip = *ai;
+
+ /* No new address, disable everyone. */
+ if (ai->s_addr == INADDR_ANY) {
+ if (PIM_DEBUG_MSDP_EVENTS)
+ zlog_debug("MSDP mesh-group %s src unset",
+ mg->mesh_group_name);
+ return;
+ }
+
+ /* Create data structures and start TCP connection. */
+ for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbr_node, mbr))
+ mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
+ mg->mesh_group_name);
+
+ if (PIM_DEBUG_MSDP_EVENTS)
+ zlog_debug("MSDP mesh-group %s src %pI4 set",
+ mg->mesh_group_name, &mg->src_ip);
+}
+
+struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim,
+ struct pim_msdp_mg *mg,
+ struct in_addr *ia)
+{
+ struct pim_msdp_mg_mbr *mbr;
+
+ mbr = XCALLOC(MTYPE_PIM_MSDP_MG_MBR, sizeof(*mbr));
+ mbr->mbr_ip = *ia;
+ listnode_add_sort(mg->mbr_list, mbr);
+
+ /* if valid SIP has been configured add peer session */
+ if (mg->src_ip.s_addr != INADDR_ANY)
+ mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
+ mg->mesh_group_name);
+
+ if (PIM_DEBUG_MSDP_EVENTS)
+ zlog_debug("MSDP mesh-group %s mbr %pI4 created",
+ mg->mesh_group_name, &mbr->mbr_ip);
+
+ ++mg->mbr_cnt;
+
+ return mbr;
+}
diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h
new file mode 100644
index 0000000..ddc015f
--- /dev/null
+++ b/pimd/pim_msdp.h
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP for Quagga
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+#ifndef PIM_MSDP_H
+#define PIM_MSDP_H
+
+#include "lib/openbsd-queue.h"
+
+enum pim_msdp_peer_state {
+ PIM_MSDP_DISABLED,
+ PIM_MSDP_INACTIVE,
+ PIM_MSDP_LISTEN,
+ PIM_MSDP_CONNECTING,
+ PIM_MSDP_ESTABLISHED
+};
+
+/* SA and KA TLVs are processed; rest ignored */
+enum pim_msdp_tlv {
+ PIM_MSDP_V4_SOURCE_ACTIVE = 1,
+ PIM_MSDP_V4_SOURCE_ACTIVE_REQUEST,
+ PIM_MSDP_V4_SOURCE_ACTIVE_RESPONSE,
+ PIM_MSDP_KEEPALIVE,
+ PIM_MSDP_RESERVED,
+ PIM_MSDP_TRACEROUTE_PROGRESS,
+ PIM_MSDP_TRACEROUTE_REPLY,
+};
+
+/* MSDP error codes */
+enum pim_msdp_err {
+ PIM_MSDP_ERR_NONE = 0,
+ PIM_MSDP_ERR_OOM = -1,
+ PIM_MSDP_ERR_PEER_EXISTS = -2,
+ PIM_MSDP_ERR_MAX_MESH_GROUPS = -3,
+ PIM_MSDP_ERR_NO_PEER = -4,
+ PIM_MSDP_ERR_MG_MBR_EXISTS = -5,
+ PIM_MSDP_ERR_NO_MG = -6,
+ PIM_MSDP_ERR_NO_MG_MBR = -7,
+ PIM_MSDP_ERR_SIP_EQ_DIP = -8,
+};
+
+#define PIM_MSDP_STATE_STRLEN 16
+#define PIM_MSDP_UPTIME_STRLEN 80
+#define PIM_MSDP_TIMER_STRLEN 12
+#define PIM_MSDP_TCP_PORT 639
+#define PIM_MSDP_SOCKET_SNDBUF_SIZE 65536
+
+enum pim_msdp_sa_flags {
+ PIM_MSDP_SAF_NONE = 0,
+ /* There are two cases where we can pickup an active source locally -
+ * 1. We are RP and got a source-register from the FHR
+ * 2. We are RP and FHR and learnt a new directly connected source on a
+ * DR interface */
+ PIM_MSDP_SAF_LOCAL = (1 << 0),
+ /* We got this in the MSDP SA TLV from a peer (and this passed peer-RPF
+ * checks) */
+ PIM_MSDP_SAF_PEER = (1 << 1),
+ PIM_MSDP_SAF_REF = (PIM_MSDP_SAF_LOCAL | PIM_MSDP_SAF_PEER),
+ PIM_MSDP_SAF_STALE = (1 << 2), /* local entries can get kicked out on
+ * misc pim events such as RP change */
+ PIM_MSDP_SAF_UP_DEL_IN_PROG = (1 << 3)
+};
+
+struct pim_msdp_sa {
+ struct pim_instance *pim;
+
+ pim_sgaddr sg;
+ char sg_str[PIM_SG_LEN];
+ struct in_addr rp; /* Last RP address associated with this SA */
+ struct in_addr peer; /* last peer from who we heard this SA */
+ enum pim_msdp_sa_flags flags;
+
+/* rfc-3618 is missing default value for SA-hold-down-Period. pulled
+ * this number from industry-standards */
+#define PIM_MSDP_SA_HOLD_TIME ((3*60)+30)
+ struct event *sa_state_timer; // 5.6
+ int64_t uptime;
+
+ struct pim_upstream *up;
+};
+
+enum pim_msdp_peer_flags {
+ PIM_MSDP_PEERF_NONE = 0,
+ PIM_MSDP_PEERF_LISTENER = (1 << 0),
+#define PIM_MSDP_PEER_IS_LISTENER(mp) (mp->flags & PIM_MSDP_PEERF_LISTENER)
+ PIM_MSDP_PEERF_SA_JUST_SENT = (1 << 1),
+ /** Flag to signalize that peer belongs to a group. */
+ PIM_MSDP_PEERF_IN_GROUP = (1 << 2),
+};
+
+struct pim_msdp_peer {
+ struct pim_instance *pim;
+
+ /* configuration */
+ struct in_addr local;
+ struct in_addr peer;
+ char *mesh_group_name;
+ char key_str[INET_ADDRSTRLEN];
+
+ /* state */
+ enum pim_msdp_peer_state state;
+ enum pim_msdp_peer_flags flags;
+
+ /* TCP socket info */
+ union sockunion su_local;
+ union sockunion su_peer;
+ int fd;
+
+/* protocol timers */
+#define PIM_MSDP_PEER_HOLD_TIME 75
+ struct event *hold_timer; // 5.4
+#define PIM_MSDP_PEER_KA_TIME 60
+ struct event *ka_timer; // 5.5
+#define PIM_MSDP_PEER_CONNECT_RETRY_TIME 30
+ struct event *cr_timer; // 5.6
+
+ /* packet thread and buffers */
+ uint32_t packet_size;
+ struct stream *ibuf;
+ struct stream_fifo *obuf;
+ struct event *t_read;
+ struct event *t_write;
+
+ /* stats */
+ uint32_t conn_attempts;
+ uint32_t est_flaps;
+ uint32_t sa_cnt; /* number of SAs attributed to this peer */
+#define PIM_MSDP_PEER_LAST_RESET_STR 20
+ char last_reset[PIM_MSDP_PEER_LAST_RESET_STR];
+
+ /* packet stats */
+ uint32_t ka_tx_cnt;
+ uint32_t sa_tx_cnt;
+ uint32_t ka_rx_cnt;
+ uint32_t sa_rx_cnt;
+ uint32_t unk_rx_cnt;
+
+ /* timestamps */
+ int64_t uptime;
+};
+
+struct pim_msdp_mg_mbr {
+ struct in_addr mbr_ip;
+ struct pim_msdp_peer *mp;
+};
+
+/* PIM MSDP mesh-group */
+struct pim_msdp_mg {
+ char *mesh_group_name;
+ struct in_addr src_ip;
+ uint32_t mbr_cnt;
+ struct list *mbr_list;
+
+ /** Belongs to PIM instance list. */
+ SLIST_ENTRY(pim_msdp_mg) mg_entry;
+};
+
+SLIST_HEAD(pim_mesh_group_list, pim_msdp_mg);
+
+enum pim_msdp_flags {
+ PIM_MSDPF_NONE = 0,
+ PIM_MSDPF_ENABLE = (1 << 0),
+ PIM_MSDPF_LISTENER = (1 << 1)
+};
+
+struct pim_msdp_listener {
+ int fd;
+ union sockunion su;
+ struct event *thread;
+};
+
+struct pim_msdp {
+ enum pim_msdp_flags flags;
+ struct event_loop *master;
+ struct pim_msdp_listener listener;
+ uint32_t rejected_accepts;
+
+ /* MSDP peer info */
+ struct hash *peer_hash;
+ struct list *peer_list;
+
+/* MSDP active-source info */
+#define PIM_MSDP_SA_ADVERTISMENT_TIME 60
+ struct event *sa_adv_timer; // 5.6
+ struct hash *sa_hash;
+ struct list *sa_list;
+ uint32_t local_cnt;
+
+ /* keep a scratch pad for building SA TLVs */
+ struct stream *work_obuf;
+
+ struct in_addr originator_id;
+
+ /** List of mesh groups. */
+ struct pim_mesh_group_list mglist;
+
+ /** MSDP global hold time period. */
+ uint32_t hold_time;
+ /** MSDP global keep alive period. */
+ uint32_t keep_alive;
+ /** MSDP global connection retry period. */
+ uint32_t connection_retry;
+};
+
+#define PIM_MSDP_PEER_READ_ON(mp) \
+ event_add_read(mp->pim->msdp.master, pim_msdp_read, mp, mp->fd, \
+ &mp->t_read)
+
+#define PIM_MSDP_PEER_WRITE_ON(mp) \
+ event_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
+ &mp->t_write)
+
+#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
+#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
+
+#if PIM_IPV != 6
+// struct pim_msdp *msdp;
+struct pim_instance;
+void pim_msdp_init(struct pim_instance *pim, struct event_loop *master);
+void pim_msdp_exit(struct pim_instance *pim);
+char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf,
+ int buf_size);
+struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
+ struct in_addr peer_addr);
+void pim_msdp_peer_established(struct pim_msdp_peer *mp);
+void pim_msdp_peer_pkt_rxed(struct pim_msdp_peer *mp);
+void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state);
+void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str);
+void pim_msdp_write(struct event *thread);
+int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *spaces);
+bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim,
+ const char *spaces);
+void pim_msdp_peer_pkt_txed(struct pim_msdp_peer *mp);
+void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
+ pim_sgaddr *sg, struct in_addr rp);
+void pim_msdp_sa_local_update(struct pim_upstream *up);
+void pim_msdp_sa_local_del(struct pim_instance *pim, pim_sgaddr *sg);
+void pim_msdp_i_am_rp_changed(struct pim_instance *pim);
+bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp);
+void pim_msdp_up_join_state_changed(struct pim_instance *pim,
+ struct pim_upstream *xg_up);
+void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg);
+enum pim_msdp_err pim_msdp_mg_del(struct pim_instance *pim,
+ const char *mesh_group_name);
+
+/**
+ * Allocates a new mesh group data structure under PIM instance.
+ */
+struct pim_msdp_mg *pim_msdp_mg_new(struct pim_instance *pim,
+ const char *mesh_group_name);
+/**
+ * Deallocates mesh group data structure under PIM instance.
+ */
+void pim_msdp_mg_free(struct pim_instance *pim, struct pim_msdp_mg **mgp);
+
+/**
+ * Change the source address of a mesh group peers. It will do the following:
+ * - Close all peers TCP connections
+ * - Recreate peers data structure
+ * - Start TCP connections with new local address.
+ */
+void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg,
+ struct in_addr *ai);
+
+/**
+ * Add new peer to mesh group and starts the connection if source address is
+ * configured.
+ */
+struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim,
+ struct pim_msdp_mg *mg,
+ struct in_addr *ia);
+
+/**
+ * Stops the connection and removes the peer data structures.
+ */
+void pim_msdp_mg_mbr_del(struct pim_msdp_mg *mg, struct pim_msdp_mg_mbr *mbr);
+
+/**
+ * Allocates MSDP peer data structure, adds peer to group name
+ * `mesh_group_name` and starts state machine. If no group name is provided then
+ * the peer will work standalone.
+ *
+ * \param pim PIM instance
+ * \param peer_addr peer address
+ * \param local_addr local listening address
+ * \param mesh_group_name mesh group name (or `NULL` for peers without group).
+ */
+struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
+ const struct in_addr *peer_addr,
+ const struct in_addr *local_addr,
+ const char *mesh_group_name);
+
+/**
+ * Stops peer state machine and free memory.
+ */
+void pim_msdp_peer_del(struct pim_msdp_peer **mp);
+
+/**
+ * Changes peer source address.
+ *
+ * NOTE:
+ * This will cause the connection to drop and start again.
+ */
+void pim_msdp_peer_change_source(struct pim_msdp_peer *mp,
+ const struct in_addr *addr);
+
+#else /* PIM_IPV == 6 */
+static inline void pim_msdp_init(struct pim_instance *pim,
+ struct event_loop *master)
+{
+}
+
+static inline void pim_msdp_exit(struct pim_instance *pim)
+{
+}
+
+static inline void pim_msdp_i_am_rp_changed(struct pim_instance *pim)
+{
+}
+
+static inline void pim_msdp_up_join_state_changed(struct pim_instance *pim,
+ struct pim_upstream *xg_up)
+{
+}
+
+static inline void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg)
+{
+}
+
+static inline void pim_msdp_sa_local_update(struct pim_upstream *up)
+{
+}
+
+static inline void pim_msdp_sa_local_del(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+}
+
+static inline int pim_msdp_config_write(struct pim_instance *pim,
+ struct vty *vty, const char *spaces)
+{
+ return 0;
+}
+
+static inline bool pim_msdp_peer_config_write(struct vty *vty,
+ struct pim_instance *pim,
+ const char *spaces)
+{
+ return false;
+}
+#endif /* PIM_IPV == 6 */
+
+#endif
diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c
new file mode 100644
index 0000000..4324a96
--- /dev/null
+++ b/pimd/pim_msdp_packet.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP packet helper
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+#include <zebra.h>
+
+#include <lib/log.h>
+#include <lib/network.h>
+#include <lib/stream.h>
+#include "frrevent.h"
+#include <lib/vty.h>
+#include <lib/lib_errors.h>
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_rp.h"
+#include "pim_str.h"
+#include "pim_util.h"
+#include "pim_errors.h"
+
+#include "pim_msdp.h"
+#include "pim_msdp_packet.h"
+#include "pim_msdp_socket.h"
+
+static char *pim_msdp_pkt_type_dump(enum pim_msdp_tlv type, char *buf,
+ int buf_size)
+{
+ switch (type) {
+ case PIM_MSDP_V4_SOURCE_ACTIVE:
+ snprintf(buf, buf_size, "%s", "SA");
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE_REQUEST:
+ snprintf(buf, buf_size, "%s", "SA_REQ");
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE_RESPONSE:
+ snprintf(buf, buf_size, "%s", "SA_RESP");
+ break;
+ case PIM_MSDP_KEEPALIVE:
+ snprintf(buf, buf_size, "%s", "KA");
+ break;
+ case PIM_MSDP_RESERVED:
+ snprintf(buf, buf_size, "%s", "RSVD");
+ break;
+ case PIM_MSDP_TRACEROUTE_PROGRESS:
+ snprintf(buf, buf_size, "%s", "TRACE_PROG");
+ break;
+ case PIM_MSDP_TRACEROUTE_REPLY:
+ snprintf(buf, buf_size, "%s", "TRACE_REPLY");
+ break;
+ default:
+ snprintf(buf, buf_size, "UNK-%d", type);
+ }
+ return buf;
+}
+
+static void pim_msdp_pkt_sa_dump_one(struct stream *s)
+{
+ pim_sgaddr sg;
+
+ /* just throw away the three reserved bytes */
+ stream_get3(s);
+ /* throw away the prefix length also */
+ stream_getc(s);
+
+ memset(&sg, 0, sizeof(sg));
+ sg.grp.s_addr = stream_get_ipv4(s);
+ sg.src.s_addr = stream_get_ipv4(s);
+
+ zlog_debug(" sg %pSG", &sg);
+}
+
+static void pim_msdp_pkt_sa_dump(struct stream *s)
+{
+ const size_t header_length = PIM_MSDP_SA_X_SIZE - PIM_MSDP_HEADER_SIZE;
+ size_t payload_length;
+ int entry_cnt;
+ int i;
+ struct in_addr rp; /* Last RP address associated with this SA */
+
+ if (header_length > STREAM_READABLE(s)) {
+ zlog_err("BUG MSDP SA bad header (readable %zu expected %zu)",
+ STREAM_READABLE(s), header_length);
+ return;
+ }
+
+ entry_cnt = stream_getc(s);
+ rp.s_addr = stream_get_ipv4(s);
+
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ char rp_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<rp?>", rp, rp_str, sizeof(rp_str));
+ zlog_debug(" entry_cnt %d rp %s", entry_cnt, rp_str);
+ }
+
+ payload_length = (size_t)entry_cnt * PIM_MSDP_SA_ONE_ENTRY_SIZE;
+ if (payload_length > STREAM_READABLE(s)) {
+ zlog_err("BUG MSDP SA bad length (readable %zu expected %zu)",
+ STREAM_READABLE(s), payload_length);
+ return;
+ }
+
+ /* dump SAs */
+ for (i = 0; i < entry_cnt; ++i) {
+ pim_msdp_pkt_sa_dump_one(s);
+ }
+}
+
+static void pim_msdp_pkt_dump(struct pim_msdp_peer *mp, int type, int len,
+ bool rx, struct stream *s)
+{
+ char type_str[PIM_MSDP_PKT_TYPE_STRLEN];
+
+ pim_msdp_pkt_type_dump(type, type_str, sizeof(type_str));
+
+ zlog_debug("MSDP peer %s pkt %s type %s len %d", mp->key_str,
+ rx ? "rx" : "tx", type_str, len);
+
+ if (!s) {
+ return;
+ }
+
+ if (len < PIM_MSDP_HEADER_SIZE) {
+ zlog_err("invalid MSDP header length");
+ return;
+ }
+
+ switch (type) {
+ case PIM_MSDP_V4_SOURCE_ACTIVE:
+ pim_msdp_pkt_sa_dump(s);
+ break;
+ default:;
+ }
+}
+
+/* Check file descriptor whether connect is established. */
+static void pim_msdp_connect_check(struct pim_msdp_peer *mp)
+{
+ int status;
+ socklen_t slen;
+ int ret;
+
+ if (mp->state != PIM_MSDP_CONNECTING) {
+ /* if we are here it means we are not in a connecting or
+ * established state
+ * for now treat this as a fatal error */
+ pim_msdp_peer_reset_tcp_conn(mp, "invalid-state");
+ return;
+ }
+
+ PIM_MSDP_PEER_READ_OFF(mp);
+ PIM_MSDP_PEER_WRITE_OFF(mp);
+
+ /* Check file descriptor. */
+ slen = sizeof(status);
+ ret = getsockopt(mp->fd, SOL_SOCKET, SO_ERROR, (void *)&status, &slen);
+
+ /* If getsockopt is fail, this is fatal error. */
+ if (ret < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "can't get sockopt for nonblocking connect");
+ pim_msdp_peer_reset_tcp_conn(mp, "connect-failed");
+ return;
+ }
+
+ /* When status is 0 then TCP connection is established. */
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_connect_check %s", mp->key_str,
+ status ? "fail" : "success");
+ }
+ if (status == 0) {
+ pim_msdp_peer_established(mp);
+ } else {
+ pim_msdp_peer_reset_tcp_conn(mp, "connect-failed");
+ }
+}
+
+static void pim_msdp_pkt_delete(struct pim_msdp_peer *mp)
+{
+ stream_free(stream_fifo_pop(mp->obuf));
+}
+
+static void pim_msdp_pkt_add(struct pim_msdp_peer *mp, struct stream *s)
+{
+ stream_fifo_push(mp->obuf, s);
+}
+
+static void pim_msdp_write_proceed_actions(struct pim_msdp_peer *mp)
+{
+ if (stream_fifo_head(mp->obuf)) {
+ PIM_MSDP_PEER_WRITE_ON(mp);
+ }
+}
+
+void pim_msdp_write(struct event *thread)
+{
+ struct pim_msdp_peer *mp;
+ struct stream *s;
+ int num;
+ enum pim_msdp_tlv type;
+ int len;
+ int work_cnt = 0;
+ int work_max_cnt = 100;
+
+ mp = EVENT_ARG(thread);
+ mp->t_write = NULL;
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_write", mp->key_str);
+ }
+ if (mp->fd < 0) {
+ return;
+ }
+
+ /* check if TCP connection is established */
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ pim_msdp_connect_check(mp);
+ return;
+ }
+
+ s = stream_fifo_head(mp->obuf);
+ if (!s) {
+ pim_msdp_write_proceed_actions(mp);
+ return;
+ }
+
+ /* Nonblocking write until TCP output buffer is full */
+ do {
+ int writenum;
+
+ /* Number of bytes to be sent */
+ writenum = stream_get_endp(s) - stream_get_getp(s);
+
+ /* Call write() system call */
+ num = write(mp->fd, stream_pnt(s), writenum);
+ if (num < 0) {
+ /* write failed either retry needed or error */
+ if (ERRNO_IO_RETRY(errno)) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(
+ "MSDP peer %s pim_msdp_write io retry",
+ mp->key_str);
+ }
+ break;
+ }
+
+ pim_msdp_peer_reset_tcp_conn(mp, "pkt-tx-failed");
+ return;
+ }
+
+ if (num != writenum) {
+ /* Partial write */
+ stream_forward_getp(s, num);
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(
+ "MSDP peer %s pim_msdp_partial_write",
+ mp->key_str);
+ }
+ break;
+ }
+
+ /* Retrieve msdp packet type. */
+ stream_set_getp(s, 0);
+ type = stream_getc(s);
+ len = stream_getw(s);
+ switch (type) {
+ case PIM_MSDP_KEEPALIVE:
+ mp->ka_tx_cnt++;
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE:
+ mp->sa_tx_cnt++;
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE_REQUEST:
+ case PIM_MSDP_V4_SOURCE_ACTIVE_RESPONSE:
+ case PIM_MSDP_RESERVED:
+ case PIM_MSDP_TRACEROUTE_PROGRESS:
+ case PIM_MSDP_TRACEROUTE_REPLY:
+ break;
+ }
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ pim_msdp_pkt_dump(mp, type, len, false /*rx*/, s);
+ }
+
+ /* packet sent delete it. */
+ pim_msdp_pkt_delete(mp);
+
+ ++work_cnt;
+ /* may need to pause if we have done too much work in this
+ * loop */
+ if (work_cnt >= work_max_cnt) {
+ break;
+ }
+ } while ((s = stream_fifo_head(mp->obuf)) != NULL);
+ pim_msdp_write_proceed_actions(mp);
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_write wrote %d packets",
+ mp->key_str, work_cnt);
+ }
+}
+
+static void pim_msdp_pkt_send(struct pim_msdp_peer *mp, struct stream *s)
+{
+ /* Add packet to the end of list. */
+ pim_msdp_pkt_add(mp, s);
+
+ PIM_MSDP_PEER_WRITE_ON(mp);
+}
+
+void pim_msdp_pkt_ka_tx(struct pim_msdp_peer *mp)
+{
+ struct stream *s;
+
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ /* don't tx anything unless a session is established */
+ return;
+ }
+ s = stream_new(PIM_MSDP_KA_TLV_MAX_SIZE);
+ stream_putc(s, PIM_MSDP_KEEPALIVE);
+ stream_putw(s, PIM_MSDP_KA_TLV_MAX_SIZE);
+
+ pim_msdp_pkt_send(mp, s);
+}
+
+static void pim_msdp_pkt_sa_push_to_one_peer(struct pim_instance *pim,
+ struct pim_msdp_peer *mp)
+{
+ struct stream *s;
+
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ /* don't tx anything unless a session is established */
+ return;
+ }
+ s = stream_dup(pim->msdp.work_obuf);
+ if (s) {
+ pim_msdp_pkt_send(mp, s);
+ mp->flags |= PIM_MSDP_PEERF_SA_JUST_SENT;
+ }
+}
+
+/* push the stream into the obuf fifo of all the peers */
+static void pim_msdp_pkt_sa_push(struct pim_instance *pim,
+ struct pim_msdp_peer *mp)
+{
+ struct listnode *mpnode;
+
+ if (mp) {
+ pim_msdp_pkt_sa_push_to_one_peer(pim, mp);
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_pkt_sa_push",
+ mp->key_str);
+ }
+ pim_msdp_pkt_sa_push_to_one_peer(pim, mp);
+ }
+ }
+}
+
+static int pim_msdp_pkt_sa_fill_hdr(struct pim_instance *pim, int local_cnt,
+ struct in_addr rp)
+{
+ int curr_tlv_ecnt;
+
+ stream_reset(pim->msdp.work_obuf);
+ curr_tlv_ecnt = local_cnt > PIM_MSDP_SA_MAX_ENTRY_CNT
+ ? PIM_MSDP_SA_MAX_ENTRY_CNT
+ : local_cnt;
+ local_cnt -= curr_tlv_ecnt;
+ stream_putc(pim->msdp.work_obuf, PIM_MSDP_V4_SOURCE_ACTIVE);
+ stream_putw(pim->msdp.work_obuf,
+ PIM_MSDP_SA_ENTRY_CNT2SIZE(curr_tlv_ecnt));
+ stream_putc(pim->msdp.work_obuf, curr_tlv_ecnt);
+ stream_put_ipv4(pim->msdp.work_obuf, rp.s_addr);
+
+ return local_cnt;
+}
+
+static void pim_msdp_pkt_sa_fill_one(struct pim_msdp_sa *sa)
+{
+ stream_put3(sa->pim->msdp.work_obuf, 0 /* reserved */);
+ stream_putc(sa->pim->msdp.work_obuf, 32 /* sprefix len */);
+ stream_put_ipv4(sa->pim->msdp.work_obuf, sa->sg.grp.s_addr);
+ stream_put_ipv4(sa->pim->msdp.work_obuf, sa->sg.src.s_addr);
+}
+
+static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
+ struct pim_msdp_peer *mp)
+{
+ struct listnode *sanode;
+ struct pim_msdp_sa *sa;
+ struct rp_info *rp_info;
+ struct prefix group_all;
+ struct in_addr rp;
+ int sa_count;
+ int local_cnt = pim->msdp.local_cnt;
+
+ sa_count = 0;
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(" sa gen %d", local_cnt);
+ }
+
+ rp = pim->msdp.originator_id;
+ if (pim_get_all_mcast_group(&group_all)) {
+ rp_info = pim_rp_find_match_group(pim, &group_all);
+ if (rp_info) {
+ rp = rp_info->rp.rpf_addr;
+ }
+ }
+
+ local_cnt = pim_msdp_pkt_sa_fill_hdr(pim, local_cnt, rp);
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
+ if (!(sa->flags & PIM_MSDP_SAF_LOCAL)) {
+ /* current implementation of MSDP is for anycast i.e.
+ * full mesh. so
+ * no re-forwarding of SAs that we learnt from other
+ * peers */
+ continue;
+ }
+ /* add sa into scratch pad */
+ pim_msdp_pkt_sa_fill_one(sa);
+ ++sa_count;
+ if (sa_count >= PIM_MSDP_SA_MAX_ENTRY_CNT) {
+ pim_msdp_pkt_sa_push(pim, mp);
+ /* reset headers */
+ sa_count = 0;
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(" sa gen for remainder %d",
+ local_cnt);
+ }
+ local_cnt = pim_msdp_pkt_sa_fill_hdr(
+ pim, local_cnt, rp);
+ }
+ }
+
+ if (sa_count) {
+ pim_msdp_pkt_sa_push(pim, mp);
+ }
+ return;
+}
+
+static void pim_msdp_pkt_sa_tx_done(struct pim_instance *pim)
+{
+ struct listnode *mpnode;
+ struct pim_msdp_peer *mp;
+
+ /* if SA were sent to the peers we restart ka timer and avoid
+ * unnecessary ka noise */
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
+ if (mp->flags & PIM_MSDP_PEERF_SA_JUST_SENT) {
+ mp->flags &= ~PIM_MSDP_PEERF_SA_JUST_SENT;
+ pim_msdp_peer_pkt_txed(mp);
+ }
+ }
+}
+
+void pim_msdp_pkt_sa_tx(struct pim_instance *pim)
+{
+ pim_msdp_pkt_sa_gen(pim, NULL /* mp */);
+ pim_msdp_pkt_sa_tx_done(pim);
+}
+
+void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa)
+{
+ pim_msdp_pkt_sa_fill_hdr(sa->pim, 1 /* cnt */, sa->rp);
+ pim_msdp_pkt_sa_fill_one(sa);
+ pim_msdp_pkt_sa_push(sa->pim, NULL);
+ pim_msdp_pkt_sa_tx_done(sa->pim);
+}
+
+/* when a connection is first established we push all SAs immediately */
+void pim_msdp_pkt_sa_tx_to_one_peer(struct pim_msdp_peer *mp)
+{
+ pim_msdp_pkt_sa_gen(mp->pim, mp);
+ pim_msdp_pkt_sa_tx_done(mp->pim);
+}
+
+void pim_msdp_pkt_sa_tx_one_to_one_peer(struct pim_msdp_peer *mp,
+ struct in_addr rp, pim_sgaddr sg)
+{
+ struct pim_msdp_sa sa;
+
+ /* Fills the SA header. */
+ pim_msdp_pkt_sa_fill_hdr(mp->pim, 1, rp);
+
+ /* Fills the message contents. */
+ sa.pim = mp->pim;
+ sa.sg = sg;
+ pim_msdp_pkt_sa_fill_one(&sa);
+
+ /* Pushes the message. */
+ pim_msdp_pkt_sa_push(sa.pim, mp);
+ pim_msdp_pkt_sa_tx_done(sa.pim);
+}
+
+static void pim_msdp_pkt_rxed_with_fatal_error(struct pim_msdp_peer *mp)
+{
+ pim_msdp_peer_reset_tcp_conn(mp, "invalid-pkt-rx");
+}
+
+static void pim_msdp_pkt_ka_rx(struct pim_msdp_peer *mp, int len)
+{
+ mp->ka_rx_cnt++;
+ if (len != PIM_MSDP_KA_TLV_MAX_SIZE) {
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return;
+ }
+ pim_msdp_peer_pkt_rxed(mp);
+}
+
+static void pim_msdp_pkt_sa_rx_one(struct pim_msdp_peer *mp, struct in_addr rp)
+{
+ int prefix_len;
+ pim_sgaddr sg;
+ struct listnode *peer_node;
+ struct pim_msdp_peer *peer;
+
+ /* just throw away the three reserved bytes */
+ stream_get3(mp->ibuf);
+ prefix_len = stream_getc(mp->ibuf);
+
+ memset(&sg, 0, sizeof(sg));
+ sg.grp.s_addr = stream_get_ipv4(mp->ibuf);
+ sg.src.s_addr = stream_get_ipv4(mp->ibuf);
+
+ if (prefix_len != IPV4_MAX_BITLEN) {
+ /* ignore SA update if the prefix length is not 32 */
+ flog_err(EC_PIM_MSDP_PACKET,
+ "rxed sa update with invalid prefix length %d",
+ prefix_len);
+ return;
+ }
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ zlog_debug(" sg %pSG", &sg);
+ }
+ pim_msdp_sa_ref(mp->pim, mp, &sg, rp);
+
+ /* Forwards the SA to the peers that are not in the RPF to the RP nor in
+ * the same mesh group as the peer from which we received the message.
+ * If the message group is not set, i.e. "default", then we assume that
+ * the message must be forwarded.*/
+ for (ALL_LIST_ELEMENTS_RO(mp->pim->msdp.peer_list, peer_node, peer)) {
+ /* Not a RPF peer, so skip it. */
+ if (pim_msdp_peer_rpf_check(peer, rp))
+ continue;
+ /* Don't forward inside the meshed group. */
+ if ((mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ && strcmp(mp->mesh_group_name, peer->mesh_group_name) == 0)
+ continue;
+
+ pim_msdp_pkt_sa_tx_one_to_one_peer(peer, rp, sg);
+ }
+}
+
+static void pim_msdp_pkt_sa_rx(struct pim_msdp_peer *mp, int len)
+{
+ int entry_cnt;
+ int i;
+ struct in_addr rp; /* Last RP address associated with this SA */
+
+ mp->sa_rx_cnt++;
+
+ if (len < PIM_MSDP_SA_TLV_MIN_SIZE) {
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return;
+ }
+
+ entry_cnt = stream_getc(mp->ibuf);
+ /* some vendors include the actual multicast data in the tlv (at the
+ * end). we will ignore such data. in the future we may consider pushing
+ * it down the RPT
+ */
+ if (len < PIM_MSDP_SA_ENTRY_CNT2SIZE(entry_cnt)) {
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return;
+ }
+ rp.s_addr = stream_get_ipv4(mp->ibuf);
+
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ char rp_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<rp?>", rp, rp_str, sizeof(rp_str));
+ zlog_debug(" entry_cnt %d rp %s", entry_cnt, rp_str);
+ }
+
+ pim_msdp_peer_pkt_rxed(mp);
+
+ if (!pim_msdp_peer_rpf_check(mp, rp)) {
+ /* if peer-RPF check fails don't process the packet any further
+ */
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ zlog_debug(" peer RPF check failed");
+ }
+ return;
+ }
+
+ /* update SA cache */
+ for (i = 0; i < entry_cnt; ++i) {
+ pim_msdp_pkt_sa_rx_one(mp, rp);
+ }
+}
+
+static void pim_msdp_pkt_rx(struct pim_msdp_peer *mp)
+{
+ enum pim_msdp_tlv type;
+ int len;
+
+ /* re-read type and len */
+ type = stream_getc_from(mp->ibuf, 0);
+ len = stream_getw_from(mp->ibuf, 1);
+ if (len < PIM_MSDP_HEADER_SIZE) {
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return;
+ }
+
+ if (len > PIM_MSDP_SA_TLV_MAX_SIZE) {
+ /* if tlv size if greater than max just ignore the tlv */
+ return;
+ }
+
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ pim_msdp_pkt_dump(mp, type, len, true /*rx*/, NULL /*s*/);
+ }
+
+ switch (type) {
+ case PIM_MSDP_KEEPALIVE:
+ pim_msdp_pkt_ka_rx(mp, len);
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE:
+ mp->sa_rx_cnt++;
+ pim_msdp_pkt_sa_rx(mp, len);
+ break;
+ case PIM_MSDP_V4_SOURCE_ACTIVE_REQUEST:
+ case PIM_MSDP_V4_SOURCE_ACTIVE_RESPONSE:
+ case PIM_MSDP_RESERVED:
+ case PIM_MSDP_TRACEROUTE_PROGRESS:
+ case PIM_MSDP_TRACEROUTE_REPLY:
+ mp->unk_rx_cnt++;
+ break;
+ }
+}
+
+/* pim msdp read utility function. */
+static int pim_msdp_read_packet(struct pim_msdp_peer *mp)
+{
+ int nbytes;
+ int readsize;
+ int old_endp;
+ int new_endp;
+
+ old_endp = stream_get_endp(mp->ibuf);
+ readsize = mp->packet_size - old_endp;
+ if (!readsize) {
+ return 0;
+ }
+
+ /* Read packet from fd */
+ nbytes = stream_read_try(mp->ibuf, mp->fd, readsize);
+ new_endp = stream_get_endp(mp->ibuf);
+ if (nbytes < 0) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s read failed %d", mp->key_str,
+ nbytes);
+ }
+ if (nbytes == -2) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(
+ "MSDP peer %s pim_msdp_read io retry old_end: %d new_end: %d",
+ mp->key_str, old_endp, new_endp);
+ }
+ /* transient error retry */
+ return -1;
+ }
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return -1;
+ }
+
+ if (!nbytes) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s read failed %d", mp->key_str,
+ nbytes);
+ }
+ pim_msdp_peer_reset_tcp_conn(mp, "peer-down");
+ return -1;
+ }
+
+ /* We read partial packet. */
+ if (stream_get_endp(mp->ibuf) != mp->packet_size) {
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug(
+ "MSDP peer %s read partial len %d old_endp %d new_endp %d",
+ mp->key_str, mp->packet_size, old_endp,
+ new_endp);
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+void pim_msdp_read(struct event *thread)
+{
+ struct pim_msdp_peer *mp;
+ int rc;
+ uint32_t len;
+
+ mp = EVENT_ARG(thread);
+ mp->t_read = NULL;
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s pim_msdp_read", mp->key_str);
+ }
+
+ if (mp->fd < 0) {
+ return;
+ }
+
+ /* check if TCP connection is established */
+ if (mp->state != PIM_MSDP_ESTABLISHED) {
+ pim_msdp_connect_check(mp);
+ return;
+ }
+
+ PIM_MSDP_PEER_READ_ON(mp);
+
+ if (!mp->packet_size) {
+ mp->packet_size = PIM_MSDP_HEADER_SIZE;
+ }
+
+ if (stream_get_endp(mp->ibuf) < PIM_MSDP_HEADER_SIZE) {
+ /* start by reading the TLV header */
+ rc = pim_msdp_read_packet(mp);
+ if (rc < 0)
+ return;
+
+ /* Find TLV type and len */
+ stream_getc(mp->ibuf);
+ len = stream_getw(mp->ibuf);
+ if (len < PIM_MSDP_HEADER_SIZE) {
+ pim_msdp_pkt_rxed_with_fatal_error(mp);
+ return;
+ }
+
+ /*
+ * Handle messages with longer than expected TLV size: resize
+ * the stream to handle reading the whole message.
+ *
+ * RFC 3618 Section 12. 'Packet Formats':
+ * > ... If an implementation receives a TLV whose length
+ * > exceeds the maximum TLV length specified below, the TLV
+ * > SHOULD be accepted. Any additional data, including possible
+ * > next TLV's in the same message, SHOULD be ignored, and the
+ * > MSDP session should not be reset. ...
+ */
+ if (len > PIM_MSDP_SA_TLV_MAX_SIZE) {
+ /* Check if the current buffer is big enough. */
+ if (mp->ibuf->size < len) {
+ if (PIM_DEBUG_MSDP_PACKETS)
+ zlog_debug(
+ "MSDP peer %s sent TLV with unexpected large length (%d bytes)",
+ mp->key_str, len);
+
+ stream_resize_inplace(&mp->ibuf, len);
+ }
+ }
+
+ /* read complete TLV */
+ mp->packet_size = len;
+ }
+
+ rc = pim_msdp_read_packet(mp);
+ if (rc < 0)
+ return;
+
+ pim_msdp_pkt_rx(mp);
+
+ /* reset input buffers and get ready for the next packet */
+ mp->packet_size = 0;
+ stream_reset(mp->ibuf);
+}
diff --git a/pimd/pim_msdp_packet.h b/pimd/pim_msdp_packet.h
new file mode 100644
index 0000000..1584a24
--- /dev/null
+++ b/pimd/pim_msdp_packet.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP packet helpers
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+#ifndef PIM_MSDP_PACKET_H
+#define PIM_MSDP_PACKET_H
+
+/* type and length of a single tlv can be consider packet header */
+#define PIM_MSDP_HEADER_SIZE 3
+
+/* Keepalive TLV
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 4 | 3 |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+#define PIM_MSDP_KA_TLV_MAX_SIZE PIM_MSDP_HEADER_SIZE
+
+/* Source-Active TLV (x=8, y=12xEntryCount)
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 1 | x + y | Entry Count |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| RP Address |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Reserved | Sprefix Len | \
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ \
+| Group Address | ) z
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /
+| Source Address | /
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+#define PIM_MSDP_SA_TLV_MAX_SIZE 9192
+#define PIM_MSDP_SA_X_SIZE 8
+#define PIM_MSDP_SA_ONE_ENTRY_SIZE 12
+#define PIM_MSDP_SA_Y_SIZE(entry_cnt) (PIM_MSDP_SA_ONE_ENTRY_SIZE * entry_cnt)
+#define PIM_MSDP_SA_ENTRY_CNT2SIZE(entry_cnt) \
+ (PIM_MSDP_SA_X_SIZE + PIM_MSDP_SA_Y_SIZE(entry_cnt))
+/* SA TLV has to have atleast only one entry in it so x=8 + y=12 */
+#define PIM_MSDP_SA_TLV_MIN_SIZE PIM_MSDP_SA_ENTRY_CNT2SIZE(1)
+/* XXX: theoretically we can fix a max of 255 but that may result in packet
+ * fragmentation */
+#define PIM_MSDP_SA_MAX_ENTRY_CNT 120
+
+#define PIM_MSDP_MAX_PACKET_SIZE \
+ MAX(PIM_MSDP_SA_TLV_MAX_SIZE, PIM_MSDP_KA_TLV_MAX_SIZE)
+
+#define PIM_MSDP_PKT_TYPE_STRLEN 16
+
+void pim_msdp_pkt_ka_tx(struct pim_msdp_peer *mp);
+void pim_msdp_read(struct event *thread);
+void pim_msdp_pkt_sa_tx(struct pim_instance *pim);
+void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa);
+void pim_msdp_pkt_sa_tx_to_one_peer(struct pim_msdp_peer *mp);
+void pim_msdp_pkt_sa_tx_one_to_one_peer(struct pim_msdp_peer *mp,
+ struct in_addr rp, pim_sgaddr sg);
+
+#endif
diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c
new file mode 100644
index 0000000..fe8d5e9
--- /dev/null
+++ b/pimd/pim_msdp_socket.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP socket management
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+
+#include <zebra.h>
+
+#include <lib/log.h>
+#include <lib/network.h>
+#include <lib/sockunion.h>
+#include "frrevent.h"
+#include <lib/vty.h>
+#include <lib/if.h>
+#include <lib/vrf.h>
+#include <lib/lib_errors.h>
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_sock.h"
+#include "pim_errors.h"
+
+#include "pim_msdp.h"
+#include "pim_msdp_socket.h"
+
+#include "sockopt.h"
+
+/* increase socket send buffer size */
+static void pim_msdp_update_sock_send_buffer_size(int fd)
+{
+ int size = PIM_MSDP_SOCKET_SNDBUF_SIZE;
+ int optval;
+ socklen_t optlen = sizeof(optval);
+
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &optval, &optlen) < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "getsockopt of SO_SNDBUF failed %s",
+ safe_strerror(errno));
+ return;
+ }
+
+ if (optval < size) {
+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size))
+ < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "Couldn't increase send buffer: %s",
+ safe_strerror(errno));
+ }
+ }
+}
+
+/* passive peer socket accept */
+static void pim_msdp_sock_accept(struct event *thread)
+{
+ union sockunion su;
+ struct pim_instance *pim = EVENT_ARG(thread);
+ int accept_sock;
+ int msdp_sock;
+ struct pim_msdp_peer *mp;
+
+ sockunion_init(&su);
+
+ /* re-register accept thread */
+ accept_sock = EVENT_FD(thread);
+ if (accept_sock < 0) {
+ flog_err(EC_LIB_DEVELOPMENT, "accept_sock is negative value %d",
+ accept_sock);
+ return;
+ }
+ pim->msdp.listener.thread = NULL;
+ event_add_read(router->master, pim_msdp_sock_accept, pim, accept_sock,
+ &pim->msdp.listener.thread);
+
+ /* accept client connection. */
+ msdp_sock = sockunion_accept(accept_sock, &su);
+ if (msdp_sock < 0) {
+ flog_err_sys(EC_LIB_SOCKET, "pim_msdp_sock_accept failed (%s)",
+ safe_strerror(errno));
+ return;
+ }
+
+ /* see if have peer config for this */
+ mp = pim_msdp_peer_find(pim, su.sin.sin_addr);
+ if (!mp || !PIM_MSDP_PEER_IS_LISTENER(mp)) {
+ ++pim->msdp.rejected_accepts;
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ flog_err(EC_PIM_MSDP_PACKET,
+ "msdp peer connection refused from %pSU", &su);
+ }
+ close(msdp_sock);
+ return;
+ }
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s accept success%s", mp->key_str,
+ mp->fd >= 0 ? "(dup)" : "");
+ }
+
+ /* if we have an existing connection we need to kill that one
+ * with this one */
+ if (mp->fd >= 0) {
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_notice(
+ "msdp peer new connection from %pSU stop old connection",
+ &su);
+ }
+ pim_msdp_peer_stop_tcp_conn(mp, true /* chg_state */);
+ }
+ mp->fd = msdp_sock;
+ set_nonblocking(mp->fd);
+ pim_msdp_update_sock_send_buffer_size(mp->fd);
+ pim_msdp_peer_established(mp);
+}
+
+/* global listener for the MSDP well know TCP port */
+int pim_msdp_sock_listen(struct pim_instance *pim)
+{
+ int sock;
+ int socklen;
+ struct sockaddr_in sin;
+ int rc;
+ struct pim_msdp_listener *listener = &pim->msdp.listener;
+
+ if (pim->msdp.flags & PIM_MSDPF_LISTENER) {
+ /* listener already setup */
+ return 0;
+ }
+
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock < 0) {
+ flog_err_sys(EC_LIB_SOCKET, "socket: %s", safe_strerror(errno));
+ return sock;
+ }
+
+ memset(&sin, 0, sizeof(struct sockaddr_in));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(PIM_MSDP_TCP_PORT);
+ socklen = sizeof(struct sockaddr_in);
+#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ sin.sin_len = socklen;
+#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+
+ sockopt_reuseaddr(sock);
+ sockopt_reuseport(sock);
+
+ if (pim->vrf->vrf_id != VRF_DEFAULT) {
+ struct interface *ifp =
+ if_lookup_by_name(pim->vrf->name, pim->vrf->vrf_id);
+ if (!ifp) {
+ flog_err(EC_LIB_INTERFACE,
+ "%s: Unable to lookup vrf interface: %s",
+ __func__, pim->vrf->name);
+ close(sock);
+ return -1;
+ }
+ if (pim_socket_bind(sock, ifp)) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "%s: Unable to bind to socket: %s",
+ __func__, safe_strerror(errno));
+ close(sock);
+ return -1;
+ }
+ }
+
+ frr_with_privs(&pimd_privs) {
+ /* bind to well known TCP port */
+ rc = bind(sock, (struct sockaddr *)&sin, socklen);
+ }
+
+ if (rc < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "pim_msdp_socket bind to port %d: %s",
+ ntohs(sin.sin_port), safe_strerror(errno));
+ close(sock);
+ return rc;
+ }
+
+ rc = listen(sock, 3 /* backlog */);
+ if (rc < 0) {
+ flog_err_sys(EC_LIB_SOCKET, "pim_msdp_socket listen: %s",
+ safe_strerror(errno));
+ close(sock);
+ return rc;
+ }
+
+ /* Set socket DSCP byte */
+ if (setsockopt_ipv4_tos(sock, IPTOS_PREC_INTERNETCONTROL)) {
+ zlog_warn("can't set sockopt IP_TOS to MSDP socket %d: %s",
+ sock, safe_strerror(errno));
+ }
+
+ /* add accept thread */
+ listener->fd = sock;
+ memcpy(&listener->su, &sin, socklen);
+ event_add_read(pim->msdp.master, pim_msdp_sock_accept, pim, sock,
+ &listener->thread);
+
+ pim->msdp.flags |= PIM_MSDPF_LISTENER;
+ return 0;
+}
+
+/* active peer socket setup */
+int pim_msdp_sock_connect(struct pim_msdp_peer *mp)
+{
+ int rc;
+
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ zlog_debug("MSDP peer %s attempt connect%s", mp->key_str,
+ mp->fd < 0 ? "" : "(dup)");
+ }
+
+ /* if we have an existing connection we need to kill that one
+ * with this one */
+ if (mp->fd >= 0) {
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ zlog_notice(
+ "msdp duplicate connect to %s nuke old connection",
+ mp->key_str);
+ }
+ pim_msdp_peer_stop_tcp_conn(mp, false /* chg_state */);
+ }
+
+ /* Make socket for the peer. */
+ mp->fd = sockunion_socket(&mp->su_peer);
+ if (mp->fd < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "pim_msdp_socket socket failure: %s",
+ safe_strerror(errno));
+ return -1;
+ }
+
+ if (mp->pim->vrf->vrf_id != VRF_DEFAULT) {
+ struct interface *ifp = if_lookup_by_name(mp->pim->vrf->name,
+ mp->pim->vrf->vrf_id);
+ if (!ifp) {
+ flog_err(EC_LIB_INTERFACE,
+ "%s: Unable to lookup vrf interface: %s",
+ __func__, mp->pim->vrf->name);
+ return -1;
+ }
+ if (pim_socket_bind(mp->fd, ifp)) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "%s: Unable to bind to socket: %s",
+ __func__, safe_strerror(errno));
+ close(mp->fd);
+ mp->fd = -1;
+ return -1;
+ }
+ }
+
+ set_nonblocking(mp->fd);
+
+ /* Set socket send buffer size */
+ pim_msdp_update_sock_send_buffer_size(mp->fd);
+ sockopt_reuseaddr(mp->fd);
+ sockopt_reuseport(mp->fd);
+
+ /* source bind */
+ rc = sockunion_bind(mp->fd, &mp->su_local, 0, &mp->su_local);
+ if (rc < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "pim_msdp_socket connect bind failure: %s",
+ safe_strerror(errno));
+ close(mp->fd);
+ mp->fd = -1;
+ return rc;
+ }
+
+ /* Set socket DSCP byte */
+ if (setsockopt_ipv4_tos(mp->fd, IPTOS_PREC_INTERNETCONTROL)) {
+ zlog_warn("can't set sockopt IP_TOS to MSDP socket %d: %s",
+ mp->fd, safe_strerror(errno));
+ }
+
+ /* Connect to the remote mp. */
+ return (sockunion_connect(mp->fd, &mp->su_peer,
+ htons(PIM_MSDP_TCP_PORT), 0));
+}
diff --git a/pimd/pim_msdp_socket.h b/pimd/pim_msdp_socket.h
new file mode 100644
index 0000000..ae31664
--- /dev/null
+++ b/pimd/pim_msdp_socket.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP MSDP socket management for Quagga
+ * Copyright (C) 2016 Cumulus Networks, Inc.
+ */
+#ifndef PIM_MSDP_SOCKET_H
+#define PIM_MSDP_SOCKET_H
+
+int pim_msdp_sock_listen(struct pim_instance *pim);
+int pim_msdp_sock_connect(struct pim_msdp_peer *mp);
+#endif
diff --git a/pimd/pim_msg.c b/pimd/pim_msg.c
new file mode 100644
index 0000000..6814798
--- /dev/null
+++ b/pimd/pim_msg.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+#include "log.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_vty.h"
+#include "pim_pim.h"
+#include "pim_msg.h"
+#include "pim_util.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_rp.h"
+#include "pim_rpf.h"
+#include "pim_register.h"
+#include "pim_jp_agg.h"
+#include "pim_oil.h"
+
+void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ size_t pim_msg_size, uint8_t pim_msg_type,
+ bool no_fwd)
+{
+ struct pim_msg_header *header = (struct pim_msg_header *)pim_msg;
+ struct iovec iov[2], *iovp = iov;
+
+ /*
+ * The checksum for Registers is done only on the first 8 bytes of the
+ * packet, including the PIM header and the next 4 bytes, excluding the
+ * data packet portion
+ *
+ * for IPv6, the pseudoheader upper-level protocol length is also
+ * truncated, so let's just set it here before everything else.
+ */
+ if (pim_msg_type == PIM_MSG_TYPE_REGISTER)
+ pim_msg_size = PIM_MSG_REGISTER_LEN;
+
+#if PIM_IPV == 6
+ struct ipv6_ph phdr = {
+ .src = src,
+ .dst = dst,
+ .ulpl = htonl(pim_msg_size),
+ .next_hdr = IPPROTO_PIM,
+ };
+
+ iovp->iov_base = &phdr;
+ iovp->iov_len = sizeof(phdr);
+ iovp++;
+#endif
+
+ /*
+ * Write header
+ */
+ header->ver = PIM_PROTO_VERSION;
+ header->type = pim_msg_type;
+ header->Nbit = no_fwd;
+ header->reserved = 0;
+
+ header->checksum = 0;
+ iovp->iov_base = header;
+ iovp->iov_len = pim_msg_size;
+ iovp++;
+
+ header->checksum = in_cksumv(iov, iovp - iov);
+}
+
+uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV4; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ memcpy(buf + 2, &addr, sizeof(struct in_addr));
+
+ return buf + PIM_ENCODED_IPV4_UCAST_SIZE;
+}
+
+uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV4; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ buf[2] = '\0'; /* reserved */
+ buf[3] = 32; /* mask len */
+ memcpy(buf + 4, &addr, sizeof(struct in_addr));
+
+ return buf + PIM_ENCODED_IPV4_GROUP_SIZE;
+}
+
+uint8_t *pim_msg_addr_encode_ipv4_source(uint8_t *buf, struct in_addr addr,
+ uint8_t bits)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV4; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ buf[2] = bits;
+ buf[3] = 32; /* mask len */
+ memcpy(buf + 4, &addr, sizeof(struct in_addr));
+
+ return buf + PIM_ENCODED_IPV4_SOURCE_SIZE;
+}
+
+uint8_t *pim_msg_addr_encode_ipv6_source(uint8_t *buf, struct in6_addr addr,
+ uint8_t bits)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV6; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ buf[2] = bits;
+ buf[3] = 128; /* mask len */
+ buf += 4;
+
+ memcpy(buf, &addr, sizeof(addr));
+ buf += sizeof(addr);
+
+ return buf;
+}
+
+uint8_t *pim_msg_addr_encode_ipv6_ucast(uint8_t *buf, struct in6_addr addr)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV6; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ buf += 2;
+
+ memcpy(buf, &addr, sizeof(addr));
+ buf += sizeof(addr);
+
+ return buf;
+}
+
+uint8_t *pim_msg_addr_encode_ipv6_group(uint8_t *buf, struct in6_addr addr)
+{
+ buf[0] = PIM_MSG_ADDRESS_FAMILY_IPV6; /* addr family */
+ buf[1] = '\0'; /* native encoding */
+ buf[2] = '\0'; /* reserved */
+ buf[3] = 128; /* mask len */
+ buf += 4;
+
+ memcpy(buf, &addr, sizeof(addr));
+ buf += sizeof(addr);
+
+ return buf;
+}
+
+#if PIM_IPV == 4
+#define pim_msg_addr_encode(what) pim_msg_addr_encode_ipv4_##what
+#else
+#define pim_msg_addr_encode(what) pim_msg_addr_encode_ipv6_##what
+#endif
+
+uint8_t *pim_msg_addr_encode_ucast(uint8_t *buf, pim_addr addr)
+{
+ return pim_msg_addr_encode(ucast)(buf, addr);
+}
+
+uint8_t *pim_msg_addr_encode_group(uint8_t *buf, pim_addr addr)
+{
+ return pim_msg_addr_encode(group)(buf, addr);
+}
+
+uint8_t *pim_msg_addr_encode_source(uint8_t *buf, pim_addr addr, uint8_t bits)
+{
+ return pim_msg_addr_encode(source)(buf, addr, bits);
+}
+
+/*
+ * For the given 'struct pim_jp_sources' list
+ * determine the size_t it would take up.
+ */
+size_t pim_msg_get_jp_group_size(struct list *sources)
+{
+ struct pim_jp_sources *js;
+ size_t size = 0;
+
+ if (!sources)
+ return 0;
+
+ size += sizeof(pim_encoded_group);
+ size += 4; // Joined sources (2) + Pruned Sources (2)
+
+ size += sizeof(pim_encoded_source) * sources->count;
+
+ js = listgetdata(listhead(sources));
+ if (js && pim_addr_is_any(js->up->sg.src) && js->is_join) {
+ struct pim_upstream *child, *up;
+ struct listnode *up_node;
+
+ up = js->up;
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: Considering (%s) children for (S,G,rpt) prune",
+ __func__, up->sg_str);
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node, child)) {
+ /*
+ * PIM VXLAN is weird
+ * It auto creates the S,G and populates a bunch
+ * of flags that make it look like a SPT prune should
+ * be sent. But this regularly scheduled join
+ * for the *,G in the VXLAN setup can happen at
+ * scheduled times *before* the null register
+ * is received by the RP to cause it to initiate
+ * the S,G joins toward the source. Let's just
+ * assume that if this is a SRC VXLAN ORIG route
+ * and no actual ifchannels( joins ) have been
+ * created then do not send the embedded prune
+ * Why you may ask? Well if the prune is S,G
+ * RPT Prune is received *before* the join
+ * from the RP( if it flows to this routers
+ * upstream interface ) then we'll just wisely
+ * create a mroute with an empty oil on
+ * the upstream intermediate router preventing
+ * packets from flowing to the RP
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(child->flags) &&
+ listcount(child->ifchannels) == 0) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("%s: %s Vxlan originated S,G route with no ifchannels, not adding prune to compound message",
+ __func__, child->sg_str);
+ } else if (!PIM_UPSTREAM_FLAG_TEST_USE_RPT(child->flags)) {
+ /* If we are using SPT and the SPT and RPT IIFs
+ * are different we can prune the source off
+ * of the RPT.
+ * If RPF_interface(S) is not resolved hold
+ * decision to prune as SPT may end up on the
+ * same IIF as RPF_interface(RP).
+ */
+ if (child->rpf.source_nexthop.interface &&
+ !pim_rpf_is_same(&up->rpf,
+ &child->rpf)) {
+ size += sizeof(pim_encoded_source);
+ PIM_UPSTREAM_FLAG_SET_SEND_SG_RPT_PRUNE(
+ child->flags);
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: SPT Bit and RPF'(%s) != RPF'(S,G): Add Prune (%s,rpt) to compound message",
+ __func__, up->sg_str,
+ child->sg_str);
+ } else if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: SPT Bit and RPF'(%s) == RPF'(S,G): Not adding Prune for (%s,rpt)",
+ __func__, up->sg_str,
+ child->sg_str);
+ } else if (pim_upstream_empty_inherited_olist(child)) {
+ /* S is supposed to be forwarded along the RPT
+ * but it's inherited OIL is empty. So just
+ * prune it off.
+ */
+ size += sizeof(pim_encoded_source);
+ PIM_UPSTREAM_FLAG_SET_SEND_SG_RPT_PRUNE(
+ child->flags);
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: inherited_olist(%s,rpt) is NULL, Add Prune to compound message",
+ __func__, child->sg_str);
+ } else if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: Do not add Prune %s to compound message %s",
+ __func__, child->sg_str, up->sg_str);
+ }
+ }
+ return size;
+}
+
+size_t pim_msg_build_jp_groups(struct pim_jp_groups *grp,
+ struct pim_jp_agg_group *sgs, size_t size)
+{
+ struct listnode *node, *nnode;
+ struct pim_jp_sources *source;
+ struct pim_upstream *up = NULL;
+ pim_addr stosend;
+ uint8_t bits;
+ uint8_t tgroups = 0;
+
+ memset(grp, 0, size);
+ pim_msg_addr_encode_group((uint8_t *)&grp->g, sgs->group);
+
+ for (ALL_LIST_ELEMENTS(sgs->sources, node, nnode, source)) {
+ /* number of joined/pruned sources */
+ if (source->is_join)
+ grp->joins++;
+ else
+ grp->prunes++;
+
+ if (pim_addr_is_any(source->up->sg.src)) {
+ struct pim_instance *pim = source->up->channel_oil->pim;
+ struct pim_rpf *rpf = pim_rp_g(pim, source->up->sg.grp);
+ bits = PIM_ENCODE_SPARSE_BIT | PIM_ENCODE_WC_BIT
+ | PIM_ENCODE_RPT_BIT;
+ stosend = rpf->rpf_addr;
+ /* Only Send SGRpt in case of *,G Join */
+ if (source->is_join)
+ up = source->up;
+ } else {
+ bits = PIM_ENCODE_SPARSE_BIT;
+ stosend = source->up->sg.src;
+ }
+
+ pim_msg_addr_encode_source((uint8_t *)&grp->s[tgroups], stosend,
+ bits);
+ tgroups++;
+ }
+
+ if (up) {
+ struct pim_upstream *child;
+
+ for (ALL_LIST_ELEMENTS(up->sources, node, nnode, child)) {
+ if (PIM_UPSTREAM_FLAG_TEST_SEND_SG_RPT_PRUNE(
+ child->flags)) {
+ pim_msg_addr_encode_source(
+ (uint8_t *)&grp->s[tgroups],
+ child->sg.src,
+ PIM_ENCODE_SPARSE_BIT |
+ PIM_ENCODE_RPT_BIT);
+ tgroups++;
+ PIM_UPSTREAM_FLAG_UNSET_SEND_SG_RPT_PRUNE(
+ child->flags);
+ grp->prunes++;
+ }
+ }
+ }
+
+ grp->joins = htons(grp->joins);
+ grp->prunes = htons(grp->prunes);
+
+ return size;
+}
diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h
new file mode 100644
index 0000000..56923b7
--- /dev/null
+++ b/pimd/pim_msg.h
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_MSG_H
+#define PIM_MSG_H
+
+#include <netinet/in.h>
+#if PIM_IPV == 6
+#include <netinet/ip6.h>
+#endif
+
+#include "pim_jp_agg.h"
+
+#define PIM_HDR_LEN sizeof(struct pim_msg_header)
+/*
+ Number Description
+ ---------- ------------------
+ 0 Reserved
+ 1 IP (IP version 4)
+ 2 IP6 (IP version 6)
+
+ From:
+ http://www.iana.org/assignments/address-family-numbers
+*/
+enum pim_msg_address_family {
+ PIM_MSG_ADDRESS_FAMILY_RESERVED,
+ PIM_MSG_ADDRESS_FAMILY_IPV4,
+ PIM_MSG_ADDRESS_FAMILY_IPV6,
+};
+
+/*
+ * pim_msg_hdr
+ * =========================
+ * PIM Header definition as per RFC 5059. N bit introduced to indicate
+ * do-not-forward option in PIM Boot strap Message.
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct pim_msg_header {
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+ uint8_t type : 4;
+ uint8_t ver : 4;
+ uint8_t reserved : 7;
+ uint8_t Nbit : 1; /* No Fwd Bit */
+#elif (BYTE_ORDER == BIG_ENDIAN)
+ uint8_t ver : 4;
+ uint8_t type : 4;
+ uint8_t Nbit : 1; /* No Fwd Bit */
+ uint8_t reserved : 7;
+#else
+#error"Please set byte order"
+#endif
+ uint16_t checksum;
+} __attribute__((packed));
+
+struct pim_encoded_ipv4_unicast {
+ uint8_t family;
+ uint8_t reserved;
+ struct in_addr addr;
+} __attribute__((packed));
+
+struct pim_encoded_ipv6_unicast {
+ uint8_t family;
+ uint8_t reserved;
+ struct in6_addr addr;
+} __attribute__((packed));
+
+/*
+ * Encoded Group format. RFC 4601 Sec 4.9.1
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Addr Family | Encoding Type |B| Reserved |Z| Mask Len |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group multicast Address
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+...
+ */
+struct pim_encoded_group_ipv4 {
+ uint8_t family;
+ uint8_t ne;
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+ uint8_t sz : 1; /* scope zone bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t bidir : 1; /* Bidir bit */
+#elif (BYTE_ORDER == BIG_ENDIAN)
+ uint8_t bidir : 1; /* Bidir bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t sz : 1; /* scope zone bit */
+#else
+#error"Please set byte order"
+#endif
+ uint8_t mask;
+ struct in_addr addr;
+} __attribute__((packed));
+
+struct pim_encoded_group_ipv6 {
+ uint8_t family;
+ uint8_t ne;
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+ uint8_t sz : 1; /* scope zone bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t bidir : 1; /* Bidir bit */
+#elif (BYTE_ORDER == BIG_ENDIAN)
+ uint8_t bidir : 1; /* Bidir bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t sz : 1; /* scope zone bit */
+#else
+#error "Please set byte order"
+#endif
+ uint8_t mask;
+ struct in6_addr addr;
+} __attribute__((packed));
+
+/*
+ * Encoded Source format. RFC 4601 Sec 4.9.1
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Source Address
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-...
+ */
+struct pim_encoded_source_ipv4 {
+ uint8_t family;
+ uint8_t ne;
+ uint8_t bits;
+ uint8_t mask;
+ struct in_addr addr;
+} __attribute__((packed));
+
+struct pim_encoded_source_ipv6 {
+ uint8_t family;
+ uint8_t ne;
+ uint8_t bits;
+ uint8_t mask;
+ struct in6_addr addr;
+} __attribute__((packed));
+
+/* clang-format off */
+#if PIM_IPV == 4
+typedef struct pim_encoded_ipv4_unicast pim_encoded_unicast;
+typedef struct pim_encoded_group_ipv4 pim_encoded_group;
+typedef struct pim_encoded_source_ipv4 pim_encoded_source;
+typedef struct ip ipv_hdr;
+#define IPV_SRC(ip_hdr) ((ip_hdr))->ip_src
+#define IPV_DST(ip_hdr) ((ip_hdr))->ip_dst
+#define IPV_LEN(ip_hdr) ((ip_hdr))->ip_len
+#else
+typedef struct pim_encoded_ipv6_unicast pim_encoded_unicast;
+typedef struct pim_encoded_group_ipv6 pim_encoded_group;
+typedef struct pim_encoded_source_ipv6 pim_encoded_source;
+typedef struct ip6_hdr ipv_hdr;
+#define IPV_SRC(ip_hdr) ((ip_hdr))->ip6_src
+#define IPV_DST(ip_hdr) ((ip_hdr))->ip6_dst
+#define IPV_LEN(ip_hdr) ((ip_hdr))->ip6_plen
+#endif
+/* clang-format on */
+
+struct pim_jp_groups {
+ pim_encoded_group g;
+ uint16_t joins;
+ uint16_t prunes;
+ pim_encoded_source s[1];
+} __attribute__((packed));
+
+struct pim_jp {
+ struct pim_msg_header header;
+ pim_encoded_unicast addr;
+ uint8_t reserved;
+ uint8_t num_groups;
+ uint16_t holdtime;
+ struct pim_jp_groups groups[1];
+} __attribute__((packed));
+
+#if PIM_IPV == 4
+static inline pim_sgaddr pim_sgaddr_from_iphdr(const void *iphdr)
+{
+ const struct ip *ipv4_hdr = iphdr;
+ pim_sgaddr sg;
+
+ sg.src = ipv4_hdr->ip_src;
+ sg.grp = ipv4_hdr->ip_dst;
+
+ return sg;
+}
+#else
+static inline pim_sgaddr pim_sgaddr_from_iphdr(const void *iphdr)
+{
+ const struct ip6_hdr *ipv6_hdr = iphdr;
+ pim_sgaddr sg;
+
+ sg.src = ipv6_hdr->ip6_src;
+ sg.grp = ipv6_hdr->ip6_dst;
+
+ return sg;
+}
+#endif
+
+void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ size_t pim_msg_size, uint8_t pim_msg_type,
+ bool no_fwd);
+uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);
+uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr);
+
+#define PIM_ENCODE_SPARSE_BIT 0x04
+#define PIM_ENCODE_WC_BIT 0x02
+#define PIM_ENCODE_RPT_BIT 0x01
+uint8_t *pim_msg_addr_encode_ipv4_source(uint8_t *buf, struct in_addr addr,
+ uint8_t bits);
+
+uint8_t *pim_msg_addr_encode_ipv6_ucast(uint8_t *buf, struct in6_addr addr);
+uint8_t *pim_msg_addr_encode_ipv6_group(uint8_t *buf, struct in6_addr addr);
+uint8_t *pim_msg_addr_encode_ipv6_source(uint8_t *buf, struct in6_addr addr,
+ uint8_t bits);
+
+uint8_t *pim_msg_addr_encode_ucast(uint8_t *buf, pim_addr addr);
+uint8_t *pim_msg_addr_encode_group(uint8_t *buf, pim_addr addr);
+uint8_t *pim_msg_addr_encode_source(uint8_t *buf, pim_addr addr, uint8_t bits);
+
+size_t pim_msg_get_jp_group_size(struct list *sources);
+size_t pim_msg_build_jp_groups(struct pim_jp_groups *grp,
+ struct pim_jp_agg_group *sgs, size_t size);
+#endif /* PIM_MSG_H */
diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c
new file mode 100644
index 0000000..339935f
--- /dev/null
+++ b/pimd/pim_nb.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 VmWare
+ * Sarita Patra
+ */
+
+#include <zebra.h>
+
+#include "northbound.h"
+#include "libfrr.h"
+#include "vrf.h"
+#include "pimd/pim_nb.h"
+
+/* clang-format off */
+const struct frr_yang_module_info frr_pim_info = {
+ .name = "frr-pim",
+ .nodes = {
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ecmp",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ecmp-rebalance",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_rebalance_modify,
+ }
+ },
+ {
+ .xpath = "/frr-pim:pim/address-family/join-prune-interval",
+ .cbs = {
+ .modify = pim_address_family_join_prune_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/keep-alive-timer",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_keep_alive_timer_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/rp-keep-alive-timer",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_keep_alive_timer_modify,
+ }
+ },
+ {
+ .xpath = "/frr-pim:pim/address-family",
+ .cbs = {
+ .create = pim_address_family_create,
+ .destroy = pim_address_family_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-pim:pim/address-family/packets",
+ .cbs = {
+ .modify = pim_address_family_packets_modify,
+ }
+ },
+ {
+ .xpath = "/frr-pim:pim/address-family/register-suppress-time",
+ .cbs = {
+ .modify = pim_address_family_register_suppress_time_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/send-v6-secondary",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover",
+ .cbs = {
+ .apply_finish = routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_apply_finish,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover/spt-action",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_action_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover/spt-infinity-prefix-list",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ssm-prefix-list",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ssm-pingd-source-ip",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time",
+ .cbs = {
+ .modify = pim_msdp_hold_time_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/keep-alive",
+ .cbs = {
+ .modify = pim_msdp_keep_alive_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/connection-retry",
+ .cbs = {
+ .modify = pim_msdp_connection_retry_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups",
+ .cbs = {
+ .create = pim_msdp_mesh_group_create,
+ .destroy = pim_msdp_mesh_group_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups/source",
+ .cbs = {
+ .modify = pim_msdp_mesh_group_source_modify,
+ .destroy = pim_msdp_mesh_group_source_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups/members",
+ .cbs = {
+ .create = pim_msdp_mesh_group_members_create,
+ .destroy = pim_msdp_mesh_group_members_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/source-ip",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy,
+ .apply_finish = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_apply_finish,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/peerlink-rif",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/reg-address",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/my-role",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_my_role_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/peer-state",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peer_state_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/register-accept-list",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family",
+ .cbs = {
+ .create = lib_interface_pim_address_family_create,
+ .destroy = lib_interface_pim_address_family_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/pim-enable",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_pim_enable_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/pim-passive-enable",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_pim_passive_enable_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/dr-priority",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_dr_priority_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/hello-interval",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_hello_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/hello-holdtime",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_hello_holdtime_modify,
+ .destroy = lib_interface_pim_address_family_hello_holdtime_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bfd",
+ .cbs = {
+ .create = lib_interface_pim_address_family_bfd_create,
+ .destroy = lib_interface_pim_address_family_bfd_destroy,
+ .apply_finish = lib_interface_pim_address_family_bfd_apply_finish,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bfd/min-rx-interval",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_bfd_min_rx_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bfd/min-tx-interval",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_bfd_min_tx_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bfd/detect_mult",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_bfd_detect_mult_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bfd/profile",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_bfd_profile_modify,
+ .destroy = lib_interface_pim_address_family_bfd_profile_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/bsm",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_bsm_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/unicast-bsm",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_unicast_bsm_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/active-active",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_active_active_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/use-source",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_use_source_modify,
+ .destroy = lib_interface_pim_address_family_use_source_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-oil",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_multicast_boundary_oil_modify,
+ .destroy = lib_interface_pim_address_family_multicast_boundary_oil_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/mroute",
+ .cbs = {
+ .create = lib_interface_pim_address_family_mroute_create,
+ .destroy = lib_interface_pim_address_family_mroute_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/mroute/oif",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_mroute_oif_modify,
+ .destroy = lib_interface_pim_address_family_mroute_oif_destroy,
+ }
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
+
+/* clang-format off */
+const struct frr_yang_module_info frr_pim_rp_info = {
+ .name = "frr-pim-rp",
+ .nodes = {
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list/group-list",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list/prefix-list",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy,
+ }
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
+
+/* clang-format off */
+const struct frr_yang_module_info frr_gmp_info = {
+ .name = "frr-gmp",
+ .nodes = {
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family",
+ .cbs = {
+ .create = lib_interface_gmp_address_family_create,
+ .destroy = lib_interface_gmp_address_family_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/enable",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_enable_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/igmp-version",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_igmp_version_modify,
+ .destroy = lib_interface_gmp_address_family_igmp_version_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/mld-version",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_mld_version_modify,
+ .destroy = lib_interface_gmp_address_family_mld_version_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/query-interval",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_query_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/query-max-response-time",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_query_max_response_time_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/last-member-query-interval",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_last_member_query_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/robustness-variable",
+ .cbs = {
+ .modify = lib_interface_gmp_address_family_robustness_variable_modify,
+ }
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group",
+ .cbs = {
+ .create = lib_interface_gmp_address_family_static_group_create,
+ .destroy = lib_interface_gmp_address_family_static_group_destroy,
+ }
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
+
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
new file mode 100644
index 0000000..0321d07
--- /dev/null
+++ b/pimd/pim_nb.h
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 VmWare
+ * Sarita Patra
+ */
+
+#ifndef _FRR_PIM_NB_H_
+#define _FRR_PIM_NB_H_
+
+extern const struct frr_yang_module_info frr_pim_info;
+extern const struct frr_yang_module_info frr_pim_rp_info;
+extern const struct frr_yang_module_info frr_gmp_info;
+
+/* frr-pim prototypes*/
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_rebalance_modify(
+ struct nb_cb_modify_args *args);
+int pim_address_family_join_prune_interval_modify(struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_keep_alive_timer_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_keep_alive_timer_modify(
+ struct nb_cb_modify_args *args);
+int pim_address_family_create(struct nb_cb_create_args *args);
+int pim_address_family_destroy(struct nb_cb_destroy_args *args);
+int pim_address_family_packets_modify(struct nb_cb_modify_args *args);
+int pim_address_family_register_suppress_time_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_destroy(
+ struct nb_cb_destroy_args *args);
+void routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_apply_finish(
+ struct nb_cb_apply_finish_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_action_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_destroy(
+ struct nb_cb_destroy_args *args);
+int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args);
+int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args);
+int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args);
+int pim_msdp_mesh_group_create(struct nb_cb_create_args *args);
+int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args);
+int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args);
+int pim_msdp_mesh_group_members_destroy(struct nb_cb_destroy_args *args);
+int pim_msdp_mesh_group_source_modify(struct nb_cb_modify_args *args);
+int pim_msdp_mesh_group_source_destroy(struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy(
+ struct nb_cb_destroy_args *args);
+void routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_apply_finish(
+ struct nb_cb_apply_finish_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_my_role_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peer_state_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_dr_priority_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_create(struct nb_cb_create_args *args);
+int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_pim_enable_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_pim_passive_enable_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_hello_interval_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_hello_holdtime_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_hello_holdtime_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_bfd_create(struct nb_cb_create_args *args);
+int lib_interface_pim_address_family_bfd_destroy(
+ struct nb_cb_destroy_args *args);
+void lib_interface_pim_address_family_bfd_apply_finish(
+ struct nb_cb_apply_finish_args *args);
+int lib_interface_pim_address_family_bfd_min_rx_interval_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_bfd_min_tx_interval_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_bfd_detect_mult_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_bfd_profile_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_bfd_profile_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_bsm_modify(struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_unicast_bsm_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_active_active_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_use_source_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_use_source_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_multicast_boundary_oil_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_mroute_create(
+ struct nb_cb_create_args *args);
+int lib_interface_pim_address_family_mroute_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_mroute_oif_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_mroute_oif_destroy(
+ struct nb_cb_destroy_args *args);
+
+/* frr-pim-rp prototypes*/
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy(
+ struct nb_cb_destroy_args *args);
+
+/* frr-gmp prototypes*/
+int lib_interface_gmp_address_family_create(
+ struct nb_cb_create_args *args);
+int lib_interface_gmp_address_family_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_gmp_address_family_enable_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_igmp_version_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_igmp_version_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_gmp_address_family_mld_version_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_mld_version_destroy(
+ struct nb_cb_destroy_args *args);
+int lib_interface_gmp_address_family_query_interval_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_query_max_response_time_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_last_member_query_interval_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_robustness_variable_modify(
+ struct nb_cb_modify_args *args);
+int lib_interface_gmp_address_family_static_group_create(
+ struct nb_cb_create_args *args);
+int lib_interface_gmp_address_family_static_group_destroy(
+ struct nb_cb_destroy_args *args);
+
+/*
+ * Callback registered with routing_nb lib to validate only
+ * one instance of staticd is allowed
+ */
+int routing_control_plane_protocols_name_validate(
+ struct nb_cb_create_args *args);
+
+#if PIM_IPV == 4
+#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv4"
+#else
+#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv6"
+#endif
+
+#define FRR_PIM_VRF_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-pim:pim/address-family[address-family='%s']"
+#define FRR_PIM_INTERFACE_XPATH \
+ "./frr-pim:pim/address-family[address-family='%s']"
+#define FRR_PIM_ENABLE_XPATH \
+ "%s/frr-pim:pim/address-family[address-family='%s']/pim-enable"
+#define FRR_PIM_ROUTER_XPATH \
+ "/frr-pim:pim/address-family[address-family='%s']"
+#define FRR_PIM_MROUTE_XPATH \
+ "./frr-pim:pim/address-family[address-family='%s']/" \
+ "mroute[source-addr='%s'][group-addr='%s']"
+#define FRR_PIM_STATIC_RP_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-pim:pim/address-family[address-family='%s']/" \
+ "frr-pim-rp:rp/static-rp/rp-list[rp-address='%s']"
+#define FRR_GMP_INTERFACE_XPATH \
+ "./frr-gmp:gmp/address-family[address-family='%s']"
+#define FRR_GMP_ENABLE_XPATH \
+ "%s/frr-gmp:gmp/address-family[address-family='%s']/enable"
+#define FRR_GMP_JOIN_XPATH \
+ "./frr-gmp:gmp/address-family[address-family='%s']/" \
+ "static-group[group-addr='%s'][source-addr='%s']"
+#define FRR_PIM_MSDP_XPATH FRR_PIM_VRF_XPATH "/msdp"
+
+#endif /* _FRR_PIM_NB_H_ */
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
new file mode 100644
index 0000000..be05b69
--- /dev/null
+++ b/pimd/pim_nb_config.c
@@ -0,0 +1,2912 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 VmWare
+ * Sarita Patra
+ */
+
+#include <zebra.h>
+
+#include "pimd.h"
+#include "pim_nb.h"
+#include "lib/northbound_cli.h"
+#include "pim_igmpv3.h"
+#include "pim_neighbor.h"
+#include "pim_nht.h"
+#include "pim_pim.h"
+#include "pim_mlag.h"
+#include "pim_bfd.h"
+#include "pim_static.h"
+#include "pim_ssm.h"
+#include "pim_ssmpingd.h"
+#include "pim_vxlan.h"
+#include "pim_util.h"
+#include "log.h"
+#include "lib_errors.h"
+#include "pim_util.h"
+#include "pim6_mld.h"
+
+#if PIM_IPV == 6
+#define pim6_msdp_err(funcname, argtype) \
+int funcname(struct argtype *args) \
+{ \
+ snprintf(args->errmsg, args->errmsg_len, \
+ "Trying to configure MSDP in pim6d. " \
+ "MSDP does not exist for IPv6."); \
+ return NB_ERR_VALIDATION; \
+} \
+MACRO_REQUIRE_SEMICOLON()
+
+#define yang_dnode_get_pimaddr yang_dnode_get_ipv6
+
+#else /* PIM_IPV != 6 */
+#define pim6_msdp_err(funcname, argtype) \
+MACRO_REQUIRE_SEMICOLON()
+
+#define yang_dnode_get_pimaddr yang_dnode_get_ipv4
+#endif /* PIM_IPV != 6 */
+
+/*
+ * When PIM is disabled on interface, IGMPv3 local membership
+ * information is not injected into PIM interface state.
+
+ * The function pim_if_membership_refresh() fetches all IGMPv3 local
+ * membership information into PIM. It is intented to be called
+ * whenever PIM is enabled on the interface in order to collect missed
+ * local membership information.
+ */
+static void pim_if_membership_refresh(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+#if PIM_IPV == 4
+ struct listnode *grpnode;
+ struct gm_group *grp;
+#else
+ struct gm_if *gm_ifp;
+ struct gm_sg *sg, *sg_start;
+#endif
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (!pim_ifp->pim_enable)
+ return;
+ if (!pim_ifp->gm_enable)
+ return;
+
+#if PIM_IPV == 6
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp)
+ return;
+#endif
+ /*
+ * First clear off membership from all PIM (S,G) entries on the
+ * interface
+ */
+
+ pim_ifchannel_membership_clear(ifp);
+
+#if PIM_IPV == 4
+ /*
+ * Then restore PIM (S,G) membership from all IGMPv3 (S,G) entries on
+ * the interface
+ */
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode, grp)) {
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list, srcnode,
+ src)) {
+
+ if (IGMP_SOURCE_TEST_FORWARDING(src->source_flags)) {
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = src->source_addr;
+ sg.grp = grp->group_addr;
+ pim_ifchannel_local_membership_add(
+ ifp, &sg, false /*is_vxlan*/);
+ }
+
+ } /* scan group sources */
+ } /* scan igmp groups */
+#else
+ sg_start = gm_sgs_first(gm_ifp->sgs);
+
+ frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+ if (!in6_multicast_nofwd(&sg->sgaddr.grp)) {
+ pim_ifchannel_local_membership_add(
+ ifp, &sg->sgaddr, false /*is_vxlan*/);
+ }
+ }
+#endif
+
+ /*
+ * Finally delete every PIM (S,G) entry lacking all state info
+ */
+
+ pim_ifchannel_delete_on_noinfo(ifp);
+}
+
+static int pim_cmd_interface_add(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ pim_ifp = pim_if_new(ifp, false, true, false, false);
+ else
+ pim_ifp->pim_enable = true;
+
+ pim_if_addr_add_all(ifp);
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+ pim_if_membership_refresh(ifp);
+
+ pim_if_create_pimreg(pim_ifp->pim);
+ return 1;
+}
+
+static int interface_pim_use_src_cmd_worker(struct interface *ifp,
+ pim_addr source_addr, char *errmsg, size_t errmsg_len)
+{
+ int result;
+ int ret = NB_OK;
+
+ result = pim_update_source_set(ifp, source_addr);
+
+ switch (result) {
+ case PIM_SUCCESS:
+ break;
+ case PIM_IFACE_NOT_FOUND:
+ ret = NB_ERR;
+ snprintf(errmsg, errmsg_len,
+ "Pim not enabled on this interface %s",
+ ifp->name);
+ break;
+ case PIM_UPDATE_SOURCE_DUP:
+ ret = NB_ERR;
+ snprintf(errmsg, errmsg_len, "Source already set");
+ break;
+ default:
+ ret = NB_ERR;
+ snprintf(errmsg, errmsg_len, "Source set failed");
+ }
+
+ return ret;
+}
+
+static int pim_cmd_spt_switchover(struct pim_instance *pim,
+ enum pim_spt_switchover spt,
+ const char *plist)
+{
+ pim->spt.switchover = spt;
+
+ switch (pim->spt.switchover) {
+ case PIM_SPT_IMMEDIATE:
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
+
+ pim_upstream_add_lhr_star_pimreg(pim);
+ break;
+ case PIM_SPT_INFINITY:
+ pim_upstream_remove_lhr_star_pimreg(pim, plist);
+
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist);
+
+ if (plist)
+ pim->spt.plist = XSTRDUP(MTYPE_PIM_PLIST_NAME, plist);
+ break;
+ }
+
+ return NB_OK;
+}
+
+static int pim_ssm_cmd_worker(struct pim_instance *pim, const char *plist,
+ char *errmsg, size_t errmsg_len)
+{
+ int result = pim_ssm_range_set(pim, pim->vrf->vrf_id, plist);
+ int ret = NB_ERR;
+
+ if (result == PIM_SSM_ERR_NONE)
+ return NB_OK;
+
+ switch (result) {
+ case PIM_SSM_ERR_NO_VRF:
+ snprintf(errmsg, errmsg_len,
+ "VRF doesn't exist");
+ break;
+ case PIM_SSM_ERR_DUP:
+ snprintf(errmsg, errmsg_len,
+ "duplicate config");
+ break;
+ default:
+ snprintf(errmsg, errmsg_len,
+ "ssm range config failed");
+ }
+
+ return ret;
+}
+
+static int pim_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
+ struct prefix group, const char *plist,
+ char *errmsg, size_t errmsg_len)
+{
+ int result;
+
+ result = pim_rp_new(pim, rp_addr, group, plist, RP_SRC_STATIC);
+
+ if (result == PIM_RP_NO_PATH) {
+ snprintfrr(errmsg, errmsg_len,
+ "No Path to RP address specified: %pPA", &rp_addr);
+ return NB_OK;
+ }
+
+ if (result == PIM_GROUP_OVERLAP) {
+ snprintf(errmsg, errmsg_len,
+ "Group range specified cannot exact match another");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ if (result == PIM_GROUP_PFXLIST_OVERLAP) {
+ snprintf(errmsg, errmsg_len,
+ "This group is already covered by a RP prefix-list");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ if (result == PIM_RP_PFXLIST_IN_USE) {
+ snprintf(errmsg, errmsg_len,
+ "The same prefix-list cannot be applied to multiple RPs");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ return NB_OK;
+}
+
+static int pim_no_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
+ struct prefix group, const char *plist,
+ char *errmsg, size_t errmsg_len)
+{
+ char group_str[PREFIX2STR_BUFFER];
+ int result;
+
+ prefix2str(&group, group_str, sizeof(group_str));
+
+ result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
+
+ if (result == PIM_GROUP_BAD_ADDRESS) {
+ snprintf(errmsg, errmsg_len,
+ "Bad group address specified: %s", group_str);
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ if (result == PIM_RP_BAD_ADDRESS) {
+ snprintfrr(errmsg, errmsg_len, "Bad RP address specified: %pPA",
+ &rp_addr);
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ if (result == PIM_RP_NOT_FOUND) {
+ snprintf(errmsg, errmsg_len,
+ "Unable to find specified RP");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ return NB_OK;
+}
+
+static bool is_pim_interface(const struct lyd_node *dnode)
+{
+ char if_xpath[XPATH_MAXLEN];
+ const struct lyd_node *pim_enable_dnode;
+ const struct lyd_node *igmp_enable_dnode;
+
+ yang_dnode_get_path(dnode, if_xpath, sizeof(if_xpath));
+ pim_enable_dnode =
+ yang_dnode_getf(dnode,
+ "%s/frr-pim:pim/address-family[address-family='%s']/pim-enable",
+ if_xpath, FRR_PIM_AF_XPATH_VAL);
+ igmp_enable_dnode = yang_dnode_getf(dnode,
+ "%s/frr-gmp:gmp/address-family[address-family='%s']/enable",
+ if_xpath, FRR_PIM_AF_XPATH_VAL);
+
+ if (((pim_enable_dnode) &&
+ (yang_dnode_get_bool(pim_enable_dnode, "."))) ||
+ ((igmp_enable_dnode) &&
+ (yang_dnode_get_bool(igmp_enable_dnode, "."))))
+ return true;
+
+ return false;
+}
+
+static int pim_cmd_gm_start(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ uint8_t need_startup = 0;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ pim_ifp = pim_if_new(ifp, true, false, false, false);
+ need_startup = 1;
+ } else {
+ if (!pim_ifp->gm_enable) {
+ pim_ifp->gm_enable = true;
+ need_startup = 1;
+ }
+ }
+ pim_if_create_pimreg(pim_ifp->pim);
+
+ /* 'ip igmp' executed multiple times, with need_startup
+ * avoid multiple if add all and membership refresh
+ */
+ if (need_startup) {
+ pim_if_addr_add_all(ifp);
+ pim_if_membership_refresh(ifp);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * CLI reconfiguration affects the interface level (struct pim_interface).
+ * This function propagates the reconfiguration to every active socket
+ * for that interface.
+ */
+#if PIM_IPV == 4
+static void igmp_sock_query_interval_reconfig(struct gm_sock *igmp)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ assert(igmp);
+ assert(igmp->interface);
+ assert(igmp->interface->info);
+
+ ifp = igmp->interface;
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug("%s: Querier %pPAs on %s reconfig query_interval=%d",
+ __func__, &igmp->ifaddr, ifp->name,
+ pim_ifp->gm_default_query_interval);
+
+ /*
+ * igmp_startup_mode_on() will reset QQI:
+
+ * igmp->querier_query_interval = pim_ifp->gm_default_query_interval;
+ */
+ igmp_startup_mode_on(igmp);
+}
+
+static void igmp_sock_query_reschedule(struct gm_sock *igmp)
+{
+ if (igmp->mtrace_only)
+ return;
+
+ if (igmp->t_igmp_query_timer) {
+ /* other querier present */
+ assert(igmp->t_igmp_query_timer);
+ assert(!igmp->t_other_querier_timer);
+
+ pim_igmp_general_query_off(igmp);
+ pim_igmp_general_query_on(igmp);
+
+ assert(igmp->t_igmp_query_timer);
+ assert(!igmp->t_other_querier_timer);
+ } else {
+ /* this is the querier */
+
+ assert(!igmp->t_igmp_query_timer);
+ assert(igmp->t_other_querier_timer);
+
+ pim_igmp_other_querier_timer_off(igmp);
+ pim_igmp_other_querier_timer_on(igmp);
+
+ assert(!igmp->t_igmp_query_timer);
+ assert(igmp->t_other_querier_timer);
+ }
+}
+#endif /* PIM_IPV == 4 */
+
+#if PIM_IPV == 4
+static void change_query_interval(struct pim_interface *pim_ifp,
+ int query_interval)
+{
+ struct listnode *sock_node;
+ struct gm_sock *igmp;
+
+ pim_ifp->gm_default_query_interval = query_interval;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
+ igmp_sock_query_interval_reconfig(igmp);
+ igmp_sock_query_reschedule(igmp);
+ }
+}
+#endif
+
+static void change_query_max_response_time(struct interface *ifp,
+ int query_max_response_time_dsec)
+{
+#if PIM_IPV == 4
+ struct listnode *sock_node;
+ struct gm_sock *igmp;
+ struct listnode *grp_node;
+ struct gm_group *grp;
+#endif
+
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp->gm_query_max_response_time_dsec ==
+ query_max_response_time_dsec)
+ return;
+
+ pim_ifp->gm_query_max_response_time_dsec = query_max_response_time_dsec;
+
+#if PIM_IPV == 6
+ gm_ifp_update(ifp);
+#else
+ /*
+ * Below we modify socket/group/source timers in order to quickly
+ * reflect the change. Otherwise, those timers would args->eventually
+ * catch up.
+ */
+
+ /* scan all sockets */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
+ /* reschedule socket general query */
+ igmp_sock_query_reschedule(igmp);
+ }
+
+ /* scan socket groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grp_node, grp)) {
+ struct listnode *src_node;
+ struct gm_source *src;
+
+ /* reset group timers for groups in EXCLUDE mode */
+ if (grp->group_filtermode_isexcl)
+ igmp_group_reset_gmi(grp);
+
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(grp->group_source_list, src_node,
+ src)) {
+
+ /* reset source timers for sources with running
+ * timers
+ */
+ if (src->t_source_timer)
+ igmp_source_reset_gmi(grp, src);
+ }
+ }
+#endif /* PIM_IPV == 4 */
+}
+
+int routing_control_plane_protocols_name_validate(
+ struct nb_cb_create_args *args)
+{
+ const char *name;
+
+ name = yang_dnode_get_string(args->dnode, "./name");
+ if (!strmatch(name, "pim")) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "pim supports only one instance with name pimd");
+ return NB_ERR_VALIDATION;
+ }
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-pim:pim/address-family
+ */
+int pim_address_family_create(struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_address_family_destroy(struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-pim:pim/address-family/packets
+ */
+int pim_address_family_packets_modify(struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ router->packet_process = yang_dnode_get_uint8(args->dnode,
+ NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-pim:pim/address-family/join-prune-interval
+ */
+int pim_address_family_join_prune_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ router->t_periodic = yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-pim:pim/address-family/register-suppress-time
+ */
+int pim_address_family_register_suppress_time_modify(
+ struct nb_cb_modify_args *args)
+{
+ uint16_t value;
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ value = yang_dnode_get_uint16(args->dnode, NULL);
+ /*
+ * As soon as this is non-constant it needs to be replaced with
+ * a yang_dnode_get to lookup the candidate value, *not* the
+ * operational value. Since the code has a field assigned and
+ * used for this value it should have YANG/CLI to set it too,
+ * otherwise just use the #define!
+ */
+ /* RFC7761: 4.11. Timer Values */
+ if (value <= router->register_probe_time * 2) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Register suppress time (%u) must be more than "
+ "twice the register probe time (%u).",
+ value, router->register_probe_time);
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ pim_update_suppress_timers(
+ yang_dnode_get_uint16(args->dnode, NULL));
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ecmp
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->ecmp_enable = yang_dnode_get_bool(args->dnode, NULL);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ecmp-rebalance
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ecmp_rebalance_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->ecmp_rebalance_enable =
+ yang_dnode_get_bool(args->dnode, NULL);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/keep-alive-timer
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_keep_alive_timer_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->keep_alive_time = yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/rp-keep-alive-timer
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_keep_alive_timer_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->rp_keep_alive_time = yang_dnode_get_uint16(args->dnode,
+ NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_create(
+ struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/send-v6-secondary
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->send_v6_secondary = yang_dnode_get_bool(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_send_v6_secondary_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover
+ */
+void routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_apply_finish(
+ struct nb_cb_apply_finish_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ int spt_switch_action;
+ const char *prefix_list = NULL;
+
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ spt_switch_action = yang_dnode_get_enum(args->dnode, "./spt-action");
+
+ switch (spt_switch_action) {
+ case PIM_SPT_INFINITY:
+ if (yang_dnode_exists(args->dnode,
+ "./spt-infinity-prefix-list"))
+ prefix_list = yang_dnode_get_string(
+ args->dnode, "./spt-infinity-prefix-list");
+
+ pim_cmd_spt_switchover(pim, PIM_SPT_INFINITY,
+ prefix_list);
+ break;
+ case PIM_SPT_IMMEDIATE:
+ pim_cmd_spt_switchover(pim, PIM_SPT_IMMEDIATE, NULL);
+ }
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover/spt-action
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_action_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/spt-switchover/spt-infinity-prefix-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_spt_switchover_spt_infinity_prefix_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ssm-prefix-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ const char *plist_name;
+ int result;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ plist_name = yang_dnode_get_string(args->dnode, NULL);
+ result = pim_ssm_cmd_worker(pim, plist_name, args->errmsg,
+ args->errmsg_len);
+
+ if (result)
+ return NB_ERR_INCONSISTENCY;
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_prefix_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ int result;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ result = pim_ssm_cmd_worker(pim, NULL, args->errmsg,
+ args->errmsg_len);
+
+ if (result)
+ return NB_ERR_INCONSISTENCY;
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/ssm-pingd-source-ip
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ int result;
+ pim_addr source_addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ result = pim_ssmpingd_start(pim, source_addr);
+ if (result) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "%% Failure starting ssmpingd for source %pPA: %d",
+ &source_addr, result);
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ int result;
+ pim_addr source_addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ result = pim_ssmpingd_stop(pim, source_addr);
+ if (result) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "%% Failure stopping ssmpingd for source %pPA: %d",
+ &source_addr, result);
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time
+ */
+int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.hold_time = yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/keep-alive
+ */
+int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.keep_alive = yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/connection-retry
+ */
+int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.connection_retry =
+ yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+pim6_msdp_err(pim_msdp_mesh_group_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_destroy, nb_cb_destroy_args);
+pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
+ nb_cb_modify_args);
+pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
+ nb_cb_destroy_args);
+pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
+ nb_cb_create_args);
+
+#if PIM_IPV != 6
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups
+ */
+int pim_msdp_mesh_group_create(struct nb_cb_create_args *args)
+{
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ mg = pim_msdp_mg_new(vrf->info, yang_dnode_get_string(
+ args->dnode, "./name"));
+ nb_running_set_entry(args->dnode, mg);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mg = nb_running_unset_entry(args->dnode);
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim_msdp_mg_free(vrf->info, &mg);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups/source
+ */
+int pim_msdp_mesh_group_source_modify(struct nb_cb_modify_args *args)
+{
+ const struct lyd_node *vrf_dnode;
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+ struct ipaddr ip;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mg = nb_running_get_entry(args->dnode, NULL, true);
+ vrf_dnode =
+ yang_dnode_get_parent(args->dnode, "address-family");
+ vrf = nb_running_get_entry(vrf_dnode, "../../", true);
+ yang_dnode_get_ip(&ip, args->dnode, NULL);
+
+ pim_msdp_mg_src_add(vrf->info, mg, &ip.ip._v4_addr);
+ break;
+ }
+ return NB_OK;
+}
+
+int pim_msdp_mesh_group_source_destroy(struct nb_cb_destroy_args *args)
+{
+ const struct lyd_node *vrf_dnode;
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+ struct in_addr addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mg = nb_running_get_entry(args->dnode, NULL, true);
+ vrf_dnode =
+ yang_dnode_get_parent(args->dnode, "address-family");
+ vrf = nb_running_get_entry(vrf_dnode, "../../", true);
+
+ addr.s_addr = INADDR_ANY;
+ pim_msdp_mg_src_add(vrf->info, mg, &addr);
+ break;
+ }
+ return NB_OK;
+}
+
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups/members
+ */
+int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args)
+{
+ const struct lyd_node *vrf_dnode;
+ struct pim_msdp_mg_mbr *mbr;
+ struct pim_msdp_mg *mg;
+ struct vrf *vrf;
+ struct ipaddr ip;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mg = nb_running_get_entry(args->dnode, NULL, true);
+ vrf_dnode =
+ yang_dnode_get_parent(args->dnode, "address-family");
+ vrf = nb_running_get_entry(vrf_dnode, "../../", true);
+ yang_dnode_get_ip(&ip, args->dnode, "address");
+
+ mbr = pim_msdp_mg_mbr_add(vrf->info, mg, &ip.ip._v4_addr);
+ nb_running_set_entry(args->dnode, mbr);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_mesh_group_members_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_msdp_mg_mbr *mbr;
+ struct pim_msdp_mg *mg;
+ const struct lyd_node *mg_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mbr = nb_running_get_entry(args->dnode, NULL, true);
+ mg_dnode =
+ yang_dnode_get_parent(args->dnode, "msdp-mesh-groups");
+ mg = nb_running_get_entry(mg_dnode, NULL, true);
+ pim_msdp_mg_mbr_del(mg, mbr);
+ nb_running_unset_entry(args->dnode);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create(
+ struct nb_cb_create_args *args)
+{
+ struct pim_msdp_peer *mp;
+ struct pim_instance *pim;
+ struct vrf *vrf;
+ struct ipaddr peer_ip;
+ struct ipaddr source_ip;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_ip(&peer_ip, args->dnode, "./peer-ip");
+ yang_dnode_get_ip(&source_ip, args->dnode, "./source-ip");
+ mp = pim_msdp_peer_add(pim, &peer_ip.ipaddr_v4,
+ &source_ip.ipaddr_v4, NULL);
+ nb_running_set_entry(args->dnode, mp);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct pim_msdp_peer *mp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_unset_entry(args->dnode);
+ pim_msdp_peer_del(&mp);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/source-ip
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct pim_msdp_peer *mp;
+ struct ipaddr source_ip;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_ip(&source_ip, args->dnode, NULL);
+ pim_msdp_peer_change_source(mp, &source_ip.ipaddr_v4);
+ break;
+ }
+
+ return NB_OK;
+}
+#endif /* PIM_IPV != 6 */
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
+ struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct in_addr addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ addr.s_addr = 0;
+ pim_vxlan_mlag_update(true/*mlag_enable*/,
+ false/*peer_state*/, MLAG_ROLE_NONE,
+ NULL/*peerlink*/, &addr);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag
+ */
+void routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_apply_finish(
+ struct nb_cb_apply_finish_args *args)
+{
+ const char *ifname;
+ uint32_t role;
+ bool peer_state;
+ struct interface *ifp;
+ struct ipaddr reg_addr;
+
+ ifname = yang_dnode_get_string(args->dnode, "./peerlink-rif");
+ ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
+ if (!ifp) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "No such interface name %s", ifname);
+ return;
+ }
+ role = yang_dnode_get_enum(args->dnode, "./my-role");
+ peer_state = yang_dnode_get_bool(args->dnode, "./peer-state");
+ yang_dnode_get_ip(&reg_addr, args->dnode, "./reg-address");
+
+ pim_vxlan_mlag_update(true, peer_state, role, ifp,
+ &reg_addr.ip._v4_addr);
+}
+
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/peerlink-rif
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peerlink_rif_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/reg-address
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_reg_address_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/my-role
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_my_role_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag/peer-state
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_peer_state_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/register-accept-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ const char *plist;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ plist = yang_dnode_get_string(args->dnode, NULL);
+
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist);
+ pim->register_plist = XSTRDUP(MTYPE_PIM_PLIST_NAME, plist);
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+
+ XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family
+ */
+int lib_interface_pim_address_family_create(struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_OK;
+
+ pim_pim_interface_delete(ifp);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/pim-enable
+ */
+int lib_interface_pim_address_family_pim_enable_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ int mcast_if_count;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ mcast_if_count =
+ yang_get_list_elements_count(if_dnode);
+
+ /* Limiting mcast interfaces to number of VIFs */
+ if (mcast_if_count == MAXVIFS) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Max multicast interfaces(%d) reached.",
+ MAXVIFS);
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+
+ if (yang_dnode_get_bool(args->dnode, NULL)) {
+ if (!pim_cmd_interface_add(ifp)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Could not enable PIM SM on interface %s",
+ ifp->name);
+ return NB_ERR_INCONSISTENCY;
+ }
+ } else {
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ pim_pim_interface_delete(ifp);
+ }
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-interface:lib/interface/frr-pim:pim/address-family/pim-passive-enable
+ */
+int lib_interface_pim_address_family_pim_passive_enable_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->pim_passive_enable =
+ yang_dnode_get_bool(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/hello-interval
+ */
+int lib_interface_pim_address_family_hello_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->pim_hello_period =
+ yang_dnode_get_uint16(args->dnode, NULL);
+ pim_ifp->pim_default_holdtime = -1;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/hello-holdtime
+ */
+int lib_interface_pim_address_family_hello_holdtime_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->pim_default_holdtime =
+ yang_dnode_get_uint16(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+
+}
+
+int lib_interface_pim_address_family_hello_holdtime_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->pim_default_holdtime = -1;
+ break;
+ }
+
+ return NB_OK;
+}
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd
+ */
+int lib_interface_pim_address_family_bfd_create(struct nb_cb_create_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->bfd_config.enabled = true;
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_bfd_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Pim not enabled on this interface");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->bfd_config.enabled = false;
+ pim_bfd_reg_dereg_all_nbr(ifp);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd
+ */
+void lib_interface_pim_address_family_bfd_apply_finish(
+ struct nb_cb_apply_finish_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ zlog_debug("Pim not enabled on this interface");
+ return;
+ }
+
+ pim_ifp->bfd_config.detection_multiplier =
+ yang_dnode_get_uint8(args->dnode, "./detect_mult");
+ pim_ifp->bfd_config.min_rx =
+ yang_dnode_get_uint16(args->dnode, "./min-rx-interval");
+ pim_ifp->bfd_config.min_tx =
+ yang_dnode_get_uint16(args->dnode, "./min-tx-interval");
+
+ pim_bfd_reg_dereg_all_nbr(ifp);
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd/min-rx-interval
+ */
+int lib_interface_pim_address_family_bfd_min_rx_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd/min-tx-interval
+ */
+int lib_interface_pim_address_family_bfd_min_tx_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd/detect_mult
+ */
+int lib_interface_pim_address_family_bfd_detect_mult_modify(
+ struct nb_cb_modify_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bfd/profile
+ */
+int lib_interface_pim_address_family_bfd_profile_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ XFREE(MTYPE_TMP, pim_ifp->bfd_config.profile);
+ pim_ifp->bfd_config.profile = XSTRDUP(
+ MTYPE_TMP, yang_dnode_get_string(args->dnode, NULL));
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_bfd_profile_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ XFREE(MTYPE_TMP, pim_ifp->bfd_config.profile);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/bsm
+ */
+int lib_interface_pim_address_family_bsm_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->bsm_enable = yang_dnode_get_bool(args->dnode, NULL);
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/unicast-bsm
+ */
+int lib_interface_pim_address_family_unicast_bsm_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->ucast_bsm_accept =
+ yang_dnode_get_bool(args->dnode, NULL);
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/active-active
+ */
+int lib_interface_pim_address_family_active_active_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (yang_dnode_get_bool(args->dnode, NULL)) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "Configuring PIM active-active on Interface: %s",
+ ifp->name);
+ pim_if_configure_mlag_dualactive(pim_ifp);
+ } else {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "UnConfiguring PIM active-active on Interface: %s",
+ ifp->name);
+ pim_if_unconfigure_mlag_dualactive(pim_ifp);
+ }
+
+ break;
+ }
+
+ return NB_OK;
+
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/dr-priority
+ */
+int lib_interface_pim_address_family_dr_priority_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ uint32_t old_dr_prio;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Pim not enabled on this interface");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ old_dr_prio = pim_ifp->pim_dr_priority;
+ pim_ifp->pim_dr_priority = yang_dnode_get_uint32(args->dnode,
+ NULL);
+
+ if (old_dr_prio != pim_ifp->pim_dr_priority) {
+ pim_if_dr_election(ifp);
+ pim_hello_restart_now(ifp);
+ }
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/use-source
+ */
+int lib_interface_pim_address_family_use_source_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ pim_addr source_addr;
+ int result;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Pim not enabled on this interface");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+#if PIM_IPV == 4
+ yang_dnode_get_ipv4(&source_addr, args->dnode, NULL);
+#else
+ yang_dnode_get_ipv6(&source_addr, args->dnode, NULL);
+#endif
+
+ result = interface_pim_use_src_cmd_worker(
+ ifp, source_addr,
+ args->errmsg, args->errmsg_len);
+
+ if (result != PIM_SUCCESS)
+ return NB_ERR_INCONSISTENCY;
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_use_source_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ int result;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Pim not enabled on this interface");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+
+ result = interface_pim_use_src_cmd_worker(ifp, PIMADDR_ANY,
+ args->errmsg,
+ args->errmsg_len);
+
+ if (result != PIM_SUCCESS)
+ return NB_ERR_INCONSISTENCY;
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-oil
+ */
+int lib_interface_pim_address_family_multicast_boundary_oil_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const char *plist;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Pim not enabled on this interface");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ plist = yang_dnode_get_string(args->dnode, NULL);
+
+ if (pim_ifp->boundary_oil_plist)
+ XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
+
+ pim_ifp->boundary_oil_plist =
+ XSTRDUP(MTYPE_PIM_INTERFACE, plist);
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (pim_ifp->boundary_oil_plist)
+ XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/mroute
+ */
+int lib_interface_pim_address_family_mroute_create(
+ struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_mroute_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct pim_instance *pim;
+ struct pim_interface *pim_iifp;
+ struct interface *iif;
+ struct interface *oif;
+ const char *oifname;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ iif = nb_running_get_entry(args->dnode, NULL, true);
+ pim_iifp = iif->info;
+ pim = pim_iifp->pim;
+
+ oifname = yang_dnode_get_string(args->dnode, "./oif");
+ oif = if_lookup_by_name(oifname, pim->vrf->vrf_id);
+
+ if (!oif) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "No such interface name %s",
+ oifname);
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ yang_dnode_get_pimaddr(&source_addr, args->dnode, "./source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode, "./group-addr");
+
+ if (pim_static_del(pim, iif, oif, group_addr, source_addr)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Failed to remove static mroute");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/mroute/oif
+ */
+int lib_interface_pim_address_family_mroute_oif_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct pim_interface *pim_iifp;
+ struct interface *iif;
+ struct interface *oif;
+ const char *oifname;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+
+#ifdef PIM_ENFORCE_LOOPFREE_MFC
+ iif = nb_running_get_entry(args->dnode, NULL, false);
+ if (!iif) {
+ return NB_OK;
+ }
+
+ pim_iifp = iif->info;
+ pim = pim_iifp->pim;
+
+ oifname = yang_dnode_get_string(args->dnode, NULL);
+ oif = if_lookup_by_name(oifname, pim->vrf->vrf_id);
+
+ if (oif && (iif->ifindex == oif->ifindex)) {
+ strlcpy(args->errmsg,
+ "% IIF same as OIF and loopfree enforcement is enabled; rejecting",
+ args->errmsg_len);
+ return NB_ERR_VALIDATION;
+ }
+#endif
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ iif = nb_running_get_entry(args->dnode, NULL, true);
+ pim_iifp = iif->info;
+ pim = pim_iifp->pim;
+
+ oifname = yang_dnode_get_string(args->dnode, NULL);
+ oif = if_lookup_by_name(oifname, pim->vrf->vrf_id);
+ if (!oif) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "No such interface name %s",
+ oifname);
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ yang_dnode_get_pimaddr(&source_addr, args->dnode, "../source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode, "../group-addr");
+
+ if (pim_static_add(pim, iif, oif, group_addr, source_addr)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Failed to add static mroute");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_mroute_oif_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_create(
+ struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct prefix group;
+ pim_addr rp_addr;
+ const char *plist;
+ int result = 0;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "./rp-address");
+
+ if (yang_dnode_get(args->dnode, "./group-list")) {
+ yang_dnode_get_prefix(&group, args->dnode,
+ "./group-list");
+ apply_mask(&group);
+ result = pim_no_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg,
+ args->errmsg_len);
+ }
+
+ else if (yang_dnode_get(args->dnode, "./prefix-list")) {
+ plist = yang_dnode_get_string(args->dnode,
+ "./prefix-list");
+ if (!pim_get_all_mcast_group(&group)) {
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "Unable to convert 224.0.0.0/4 to prefix");
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ result = pim_no_rp_cmd_worker(pim, rp_addr, group,
+ plist, args->errmsg,
+ args->errmsg_len);
+ }
+
+ if (result)
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list/group-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct prefix group;
+ pim_addr rp_addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ yang_dnode_get_prefix(&group, args->dnode, NULL);
+ apply_mask(&group);
+ return pim_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg, args->errmsg_len);
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_group_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct prefix group;
+ pim_addr rp_addr;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ yang_dnode_get_prefix(&group, args->dnode, NULL);
+ apply_mask(&group);
+
+ return pim_no_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg, args->errmsg_len);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/static-rp/rp-list/prefix-list
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct prefix group;
+ pim_addr rp_addr;
+ const char *plist;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ plist = yang_dnode_get_string(args->dnode, NULL);
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ if (!pim_get_all_mcast_group(&group)) {
+ flog_err(EC_LIB_DEVELOPMENT,
+ "Unable to convert 224.0.0.0/4 to prefix");
+ return NB_ERR_INCONSISTENCY;
+ }
+ return pim_rp_cmd_worker(pim, rp_addr, group, plist,
+ args->errmsg, args->errmsg_len);
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct prefix group;
+ pim_addr rp_addr;
+ const char *plist;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ plist = yang_dnode_get_string(args->dnode, NULL);
+ if (!pim_get_all_mcast_group(&group)) {
+ flog_err(EC_LIB_DEVELOPMENT,
+ "Unable to convert 224.0.0.0/4 to prefix");
+ return NB_ERR_INCONSISTENCY;
+ }
+ return pim_no_rp_cmd_worker(pim, rp_addr, group, plist,
+ args->errmsg, args->errmsg_len);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family
+ */
+int lib_interface_gmp_address_family_create(struct nb_cb_create_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_gm_interface_delete(ifp);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/enable
+ */
+int lib_interface_gmp_address_family_enable_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ bool gm_enable;
+ int mcast_if_count;
+ const char *ifp_name;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ mcast_if_count =
+ yang_get_list_elements_count(if_dnode);
+ /* Limiting mcast interfaces to number of VIFs */
+ if (mcast_if_count == MAXVIFS) {
+ ifp_name = yang_dnode_get_string(if_dnode, "name");
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Max multicast interfaces(%d) Reached. Could not enable %s on interface %s",
+ MAXVIFS, GM, ifp_name);
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ gm_enable = yang_dnode_get_bool(args->dnode, NULL);
+
+ if (gm_enable)
+ return pim_cmd_gm_start(ifp);
+
+ else
+ pim_gm_interface_delete(ifp);
+ }
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/igmp-version
+ */
+int lib_interface_gmp_address_family_igmp_version_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ int igmp_version, old_version = 0;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ igmp_version = yang_dnode_get_uint8(args->dnode, NULL);
+ old_version = pim_ifp->igmp_version;
+ pim_ifp->igmp_version = igmp_version;
+
+ /* Current and new version is different refresh existing
+ * membership. Going from 3 -> 2 or 2 -> 3.
+ */
+ if (old_version != igmp_version)
+ pim_if_membership_refresh(ifp);
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_gmp_address_family_igmp_version_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->igmp_version = IGMP_DEFAULT_VERSION;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/mld-version
+ */
+int lib_interface_gmp_address_family_mld_version_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ pim_ifp->mld_version = yang_dnode_get_uint8(args->dnode, NULL);
+ gm_ifp_update(ifp);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_gmp_address_family_mld_version_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ pim_ifp->mld_version = 2;
+ gm_ifp_update(ifp);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/query-interval
+ */
+int lib_interface_gmp_address_family_query_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ int query_interval;
+
+#if PIM_IPV == 4
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ query_interval = yang_dnode_get_uint16(args->dnode, NULL);
+ change_query_interval(ifp->info, query_interval);
+ }
+#else
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ query_interval = yang_dnode_get_uint16(args->dnode, NULL);
+ pim_ifp->gm_default_query_interval = query_interval;
+ gm_ifp_update(ifp);
+ }
+#endif
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/query-max-response-time
+ */
+int lib_interface_gmp_address_family_query_max_response_time_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ int query_max_response_time_dsec;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ query_max_response_time_dsec =
+ yang_dnode_get_uint16(args->dnode, NULL);
+ change_query_max_response_time(ifp,
+ query_max_response_time_dsec);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/last-member-query-interval
+ */
+int lib_interface_gmp_address_family_last_member_query_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ int last_member_query_interval;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ last_member_query_interval =
+ yang_dnode_get_uint16(args->dnode, NULL);
+ pim_ifp->gm_specific_query_max_response_time_dsec =
+ last_member_query_interval;
+#if PIM_IPV == 6
+ gm_ifp_update(ifp);
+#endif
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/robustness-variable
+ */
+int lib_interface_gmp_address_family_robustness_variable_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ int last_member_query_count;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ last_member_query_count =
+ yang_dnode_get_uint8(args->dnode, NULL);
+ pim_ifp->gm_last_member_query_count = last_member_query_count;
+#if PIM_IPV == 6
+ gm_ifp_update(ifp);
+#endif
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group
+ */
+int lib_interface_gmp_address_family_static_group_create(
+ struct nb_cb_create_args *args)
+{
+ struct interface *ifp;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ int result;
+ const char *ifp_name;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ ifp_name = yang_dnode_get_string(if_dnode, "name");
+ snprintf(args->errmsg, args->errmsg_len,
+ "multicast not enabled on interface %s",
+ ifp_name);
+ return NB_ERR_VALIDATION;
+ }
+
+ yang_dnode_get_pimaddr(&group_addr, args->dnode,
+ "./group-addr");
+#if PIM_IPV == 4
+ if (pim_is_group_224_0_0_0_24(group_addr)) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Groups within 224.0.0.0/24 are reserved and cannot be joined");
+ return NB_ERR_VALIDATION;
+ }
+#else
+ if (ipv6_mcast_reserved(&group_addr)) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Groups within ffx2::/16 are reserved and cannot be joined");
+ return NB_ERR_VALIDATION;
+ }
+#endif
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode,
+ "./group-addr");
+ result = pim_if_gm_join_add(ifp, group_addr, source_addr);
+ if (result) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "Failure joining " GM " group");
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+ return NB_OK;
+}
+
+int lib_interface_gmp_address_family_static_group_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ pim_addr source_addr;
+ pim_addr group_addr;
+ int result;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ yang_dnode_get_pimaddr(&group_addr, args->dnode,
+ "./group-addr");
+ result = pim_if_gm_join_del(ifp, group_addr, source_addr);
+
+ if (result) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Failure leaving " GM
+ " group %pPAs %pPAs on interface %s: %d",
+ &source_addr, &group_addr, ifp->name, result);
+
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c
new file mode 100644
index 0000000..1cd7cce
--- /dev/null
+++ b/pimd/pim_neighbor.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "memory.h"
+#include "if.h"
+#include "vty.h"
+#include "plist.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_neighbor.h"
+#include "pim_time.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_pim.h"
+#include "pim_upstream.h"
+#include "pim_ifchannel.h"
+#include "pim_rp.h"
+#include "pim_zebra.h"
+#include "pim_join.h"
+#include "pim_jp_agg.h"
+#include "pim_bfd.h"
+#include "pim_register.h"
+#include "pim_oil.h"
+
+static void dr_election_by_addr(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct pim_neighbor *neigh;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ pim_ifp->pim_dr_addr = pim_ifp->primary_address;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: on interface %s", __func__, ifp->name);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, node, neigh)) {
+ if (pim_addr_cmp(neigh->source_addr, pim_ifp->pim_dr_addr) > 0)
+ pim_ifp->pim_dr_addr = neigh->source_addr;
+ }
+}
+
+static void dr_election_by_pri(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct pim_neighbor *neigh;
+ uint32_t dr_pri;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ pim_ifp->pim_dr_addr = pim_ifp->primary_address;
+ dr_pri = pim_ifp->pim_dr_priority;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: dr pri %u on interface %s", __func__, dr_pri,
+ ifp->name);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, node, neigh)) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_info("%s: neigh pri %u addr %pPA if dr addr %pPA",
+ __func__, neigh->dr_priority,
+ &neigh->source_addr, &pim_ifp->pim_dr_addr);
+ }
+ if ((neigh->dr_priority > dr_pri) ||
+ ((neigh->dr_priority == dr_pri) &&
+ (pim_addr_cmp(neigh->source_addr, pim_ifp->pim_dr_addr) >
+ 0))) {
+ pim_ifp->pim_dr_addr = neigh->source_addr;
+ dr_pri = neigh->dr_priority;
+ }
+ }
+}
+
+/*
+ RFC 4601: 4.3.2. DR Election
+
+ A router's idea of the current DR on an interface can change when a
+ PIM Hello message is received, when a neighbor times out, or when a
+ router's own DR Priority changes.
+ */
+int pim_if_dr_election(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_addr old_dr_addr;
+
+ ++pim_ifp->pim_dr_election_count;
+
+ old_dr_addr = pim_ifp->pim_dr_addr;
+
+ if (pim_ifp->pim_dr_num_nondrpri_neighbors) {
+ dr_election_by_addr(ifp);
+ } else {
+ dr_election_by_pri(ifp);
+ }
+
+ /* DR changed ? */
+ if (pim_addr_cmp(old_dr_addr, pim_ifp->pim_dr_addr)) {
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "%s: DR was %pPA now is %pPA on interface %s",
+ __func__, &old_dr_addr, &pim_ifp->pim_dr_addr,
+ ifp->name);
+
+ pim_ifp->pim_dr_election_last =
+ pim_time_monotonic_sec(); /* timestamp */
+ ++pim_ifp->pim_dr_election_changes;
+ pim_if_update_join_desired(pim_ifp);
+ pim_if_update_could_assert(ifp);
+ pim_if_update_assert_tracking_desired(ifp);
+
+ if (PIM_I_am_DR(pim_ifp)) {
+ pim_ifp->am_i_dr = true;
+ pim_clear_nocache_state(pim_ifp);
+ } else {
+ if (pim_ifp->am_i_dr == true) {
+ pim_reg_del_on_couldreg_fail(ifp);
+ pim_ifp->am_i_dr = false;
+ }
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void update_dr_priority(struct pim_neighbor *neigh,
+ pim_hello_options hello_options,
+ uint32_t dr_priority)
+{
+ pim_hello_options will_set_pri; /* boolean */
+ pim_hello_options bit_flip; /* boolean */
+ pim_hello_options pri_change; /* boolean */
+
+ will_set_pri =
+ PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_DR_PRIORITY);
+
+ bit_flip = (will_set_pri
+ != PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY));
+
+ if (bit_flip) {
+ struct pim_interface *pim_ifp = neigh->interface->info;
+
+ /* update num. of neighbors without dr_pri */
+
+ if (will_set_pri) {
+ --pim_ifp->pim_dr_num_nondrpri_neighbors;
+ } else {
+ ++pim_ifp->pim_dr_num_nondrpri_neighbors;
+ }
+ }
+
+ pri_change = (bit_flip || (neigh->dr_priority != dr_priority));
+
+ if (will_set_pri) {
+ neigh->dr_priority = dr_priority;
+ } else {
+ neigh->dr_priority = 0; /* cosmetic unset */
+ }
+
+ if (pri_change) {
+ /*
+ RFC 4601: 4.3.2. DR Election
+
+ A router's idea of the current DR on an interface can change
+ when a
+ PIM Hello message is received, when a neighbor times out, or
+ when a
+ router's own DR Priority changes.
+ */
+ pim_if_dr_election(
+ neigh->interface); // router's own DR Priority changes
+ }
+}
+
+static void on_neighbor_timer(struct event *t)
+{
+ struct pim_neighbor *neigh;
+ struct interface *ifp;
+ char msg[100];
+
+ neigh = EVENT_ARG(t);
+
+ ifp = neigh->interface;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "Expired %d sec holdtime for neighbor %pPA on interface %s",
+ neigh->holdtime, &neigh->source_addr, ifp->name);
+
+ snprintf(msg, sizeof(msg), "%d-sec holdtime expired", neigh->holdtime);
+ pim_neighbor_delete(ifp, neigh, msg);
+
+ /*
+ RFC 4601: 4.3.2. DR Election
+
+ A router's idea of the current DR on an interface can change when a
+ PIM Hello message is received, when a neighbor times out, or when a
+ router's own DR Priority changes.
+ */
+ pim_if_dr_election(ifp); // neighbor times out
+}
+
+void pim_neighbor_timer_reset(struct pim_neighbor *neigh, uint16_t holdtime)
+{
+ neigh->holdtime = holdtime;
+
+ EVENT_OFF(neigh->t_expire_timer);
+
+ /*
+ 0xFFFF is request for no holdtime
+ */
+ if (neigh->holdtime == 0xFFFF) {
+ return;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("%s: starting %u sec timer for neighbor %pPA on %s",
+ __func__, neigh->holdtime, &neigh->source_addr,
+ neigh->interface->name);
+
+ event_add_timer(router->master, on_neighbor_timer, neigh,
+ neigh->holdtime, &neigh->t_expire_timer);
+}
+
+static void on_neighbor_jp_timer(struct event *t)
+{
+ struct pim_neighbor *neigh = EVENT_ARG(t);
+ struct pim_rpf rpf;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s:Sending JP Agg to %pPA on %s with %d groups",
+ __func__, &neigh->source_addr,
+ neigh->interface->name,
+ neigh->upstream_jp_agg->count);
+
+ rpf.source_nexthop.interface = neigh->interface;
+ rpf.rpf_addr = neigh->source_addr;
+ pim_joinprune_send(&rpf, neigh->upstream_jp_agg);
+
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
+}
+
+static void pim_neighbor_start_jp_timer(struct pim_neighbor *neigh)
+{
+ EVENT_OFF(neigh->jp_timer);
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
+}
+
+static struct pim_neighbor *
+pim_neighbor_new(struct interface *ifp, pim_addr source_addr,
+ pim_hello_options hello_options, uint16_t holdtime,
+ uint16_t propagation_delay, uint16_t override_interval,
+ uint32_t dr_priority, uint32_t generation_id,
+ struct list *addr_list)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+
+ assert(ifp);
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ neigh = XCALLOC(MTYPE_PIM_NEIGHBOR, sizeof(*neigh));
+
+ neigh->creation = pim_time_monotonic_sec();
+ neigh->source_addr = source_addr;
+ neigh->hello_options = hello_options;
+ neigh->propagation_delay_msec = propagation_delay;
+ neigh->override_interval_msec = override_interval;
+ neigh->dr_priority = dr_priority;
+ neigh->generation_id = generation_id;
+ neigh->prefix_list = addr_list;
+ neigh->t_expire_timer = NULL;
+ neigh->interface = ifp;
+
+ neigh->upstream_jp_agg = list_new();
+ neigh->upstream_jp_agg->cmp = pim_jp_agg_group_list_cmp;
+ neigh->upstream_jp_agg->del =
+ (void (*)(void *))pim_jp_agg_group_list_free;
+ pim_neighbor_start_jp_timer(neigh);
+
+ pim_neighbor_timer_reset(neigh, holdtime);
+ /*
+ * The pim_ifstat_hello_sent variable is used to decide if
+ * we should expedite a hello out the interface. If we
+ * establish a new neighbor, we unfortunately need to
+ * reset the value so that we can know to hurry up and
+ * hello
+ */
+ PIM_IF_FLAG_UNSET_HELLO_SENT(pim_ifp->flags);
+
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s: creating PIM neighbor %pPA on interface %s",
+ __func__, &source_addr, ifp->name);
+
+ zlog_notice("PIM NEIGHBOR UP: neighbor %pPA on interface %s",
+ &source_addr, ifp->name);
+
+ if (neigh->propagation_delay_msec
+ > pim_ifp->pim_neighbors_highest_propagation_delay_msec) {
+ pim_ifp->pim_neighbors_highest_propagation_delay_msec =
+ neigh->propagation_delay_msec;
+ }
+ if (neigh->override_interval_msec
+ > pim_ifp->pim_neighbors_highest_override_interval_msec) {
+ pim_ifp->pim_neighbors_highest_override_interval_msec =
+ neigh->override_interval_msec;
+ }
+
+ if (!PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY)) {
+ /* update num. of neighbors without hello option lan_delay */
+ ++pim_ifp->pim_number_of_nonlandelay_neighbors;
+ }
+
+ if (!PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY)) {
+ /* update num. of neighbors without hello option dr_pri */
+ ++pim_ifp->pim_dr_num_nondrpri_neighbors;
+ }
+
+ // Register PIM Neighbor with BFD
+ pim_bfd_info_nbr_create(pim_ifp, neigh);
+
+ return neigh;
+}
+
+static void delete_prefix_list(struct pim_neighbor *neigh)
+{
+ if (neigh->prefix_list) {
+
+#ifdef DUMP_PREFIX_LIST
+ struct listnode *p_node;
+ struct prefix *p;
+ int list_size = neigh->prefix_list
+ ? (int)listcount(neigh->prefix_list)
+ : -1;
+ int i = 0;
+ for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, p_node, p)) {
+ zlog_debug(
+ "%s: DUMP_PREFIX_LIST neigh=%x prefix_list=%x prefix=%x addr=%pFXh [%d/%d]",
+ __func__, (unsigned)neigh,
+ (unsigned)neigh->prefix_list, (unsigned)p, p, i,
+ list_size);
+ ++i;
+ }
+#endif
+
+ list_delete(&neigh->prefix_list);
+ }
+}
+
+void pim_neighbor_free(struct pim_neighbor *neigh)
+{
+ assert(!neigh->t_expire_timer);
+
+ delete_prefix_list(neigh);
+
+ list_delete(&neigh->upstream_jp_agg);
+ EVENT_OFF(neigh->jp_timer);
+
+ bfd_sess_free(&neigh->bfd_session);
+
+ XFREE(MTYPE_PIM_NEIGHBOR, neigh);
+}
+
+struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp,
+ struct prefix *src)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node, *pnode;
+ struct pim_neighbor *neigh;
+ struct prefix *p;
+
+ if (!ifp || !ifp->info)
+ return NULL;
+
+ pim_ifp = ifp->info;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, node, neigh)) {
+ for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, pnode, p)) {
+ if (prefix_same(p, src))
+ return neigh;
+ }
+ }
+
+ return NULL;
+}
+
+struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
+ pim_addr source_addr, bool secondary)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *node;
+ struct pim_neighbor *neigh;
+
+ if (!ifp)
+ return NULL;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, node, neigh)) {
+ if (!pim_addr_cmp(source_addr, neigh->source_addr)) {
+ return neigh;
+ }
+ }
+
+ if (secondary) {
+ struct prefix p;
+
+ pim_addr_to_prefix(&p, source_addr);
+ return pim_neighbor_find_by_secondary(ifp, &p);
+ }
+
+ return NULL;
+}
+
+/*
+ * Find the *one* interface out
+ * this interface. If more than
+ * one return NULL
+ */
+struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp || pim_ifp->pim_neighbor_list->count != 1)
+ return NULL;
+
+ return listnode_head(pim_ifp->pim_neighbor_list);
+}
+
+struct pim_neighbor *
+pim_neighbor_add(struct interface *ifp, pim_addr source_addr,
+ pim_hello_options hello_options, uint16_t holdtime,
+ uint16_t propagation_delay, uint16_t override_interval,
+ uint32_t dr_priority, uint32_t generation_id,
+ struct list *addr_list, int send_hello_now)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_neighbor *neigh;
+
+ neigh = pim_neighbor_new(ifp, source_addr, hello_options, holdtime,
+ propagation_delay, override_interval,
+ dr_priority, generation_id, addr_list);
+ if (!neigh) {
+ return 0;
+ }
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ listnode_add(pim_ifp->pim_neighbor_list, neigh);
+
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("%s: neighbor %pPA added ", __func__, &source_addr);
+ /*
+ RFC 4601: 4.3.2. DR Election
+
+ A router's idea of the current DR on an interface can change when a
+ PIM Hello message is received, when a neighbor times out, or when a
+ router's own DR Priority changes.
+ */
+ pim_if_dr_election(neigh->interface); // new neighbor -- should not
+ // trigger dr election...
+
+ /*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ To allow new or rebooting routers to learn of PIM neighbors quickly,
+ when a Hello message is received from a new neighbor, or a Hello
+ message with a new GenID is received from an existing neighbor, a
+ new Hello message should be sent on this interface after a
+ randomized delay between 0 and Triggered_Hello_Delay.
+
+ This is a bit silly to do it that way. If I get a new
+ genid we need to send the hello *now* because we've
+ lined up a bunch of join/prune messages to go out the
+ interface.
+ */
+ if (send_hello_now)
+ pim_hello_restart_now(ifp);
+ else
+ pim_hello_restart_triggered(neigh->interface);
+
+ pim_upstream_find_new_rpf(pim_ifp->pim);
+
+ /* RNH can send nexthop update prior to PIM neibhor UP
+ in that case nexthop cache would not consider this neighbor
+ as RPF.
+ Upon PIM neighbor UP, iterate all RPs and update
+ nexthop cache with this neighbor.
+ */
+ pim_resolve_rp_nh(pim_ifp->pim, neigh);
+
+ pim_rp_setup(pim_ifp->pim);
+
+ sched_rpf_cache_refresh(pim_ifp->pim);
+ return neigh;
+}
+
+static uint16_t find_neighbors_next_highest_propagation_delay_msec(
+ struct interface *ifp, struct pim_neighbor *highest_neigh)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *neigh_node;
+ struct pim_neighbor *neigh;
+ uint16_t next_highest_delay_msec;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ next_highest_delay_msec = pim_ifp->pim_propagation_delay_msec;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neigh_node,
+ neigh)) {
+ if (neigh == highest_neigh)
+ continue;
+ if (neigh->propagation_delay_msec > next_highest_delay_msec)
+ next_highest_delay_msec = neigh->propagation_delay_msec;
+ }
+
+ return next_highest_delay_msec;
+}
+
+static uint16_t find_neighbors_next_highest_override_interval_msec(
+ struct interface *ifp, struct pim_neighbor *highest_neigh)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *neigh_node;
+ struct pim_neighbor *neigh;
+ uint16_t next_highest_interval_msec;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ next_highest_interval_msec = pim_ifp->pim_override_interval_msec;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neigh_node,
+ neigh)) {
+ if (neigh == highest_neigh)
+ continue;
+ if (neigh->override_interval_msec > next_highest_interval_msec)
+ next_highest_interval_msec =
+ neigh->override_interval_msec;
+ }
+
+ return next_highest_interval_msec;
+}
+
+void pim_neighbor_delete(struct interface *ifp, struct pim_neighbor *neigh,
+ const char *delete_message)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ zlog_notice("PIM NEIGHBOR DOWN: neighbor %pPA on interface %s: %s",
+ &neigh->source_addr, ifp->name, delete_message);
+
+ EVENT_OFF(neigh->t_expire_timer);
+
+ pim_if_assert_on_neighbor_down(ifp, neigh->source_addr);
+
+ if (!PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY)) {
+ /* update num. of neighbors without hello option lan_delay */
+
+ --pim_ifp->pim_number_of_nonlandelay_neighbors;
+ }
+
+ if (!PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY)) {
+ /* update num. of neighbors without dr_pri */
+
+ --pim_ifp->pim_dr_num_nondrpri_neighbors;
+ }
+
+ assert(neigh->propagation_delay_msec
+ <= pim_ifp->pim_neighbors_highest_propagation_delay_msec);
+ assert(neigh->override_interval_msec
+ <= pim_ifp->pim_neighbors_highest_override_interval_msec);
+
+ if (pim_if_lan_delay_enabled(ifp)) {
+
+ /* will delete a neighbor with highest propagation delay? */
+ if (neigh->propagation_delay_msec
+ == pim_ifp->pim_neighbors_highest_propagation_delay_msec) {
+ /* then find the next highest propagation delay */
+ pim_ifp->pim_neighbors_highest_propagation_delay_msec =
+ find_neighbors_next_highest_propagation_delay_msec(
+ ifp, neigh);
+ }
+
+ /* will delete a neighbor with highest override interval? */
+ if (neigh->override_interval_msec
+ == pim_ifp->pim_neighbors_highest_override_interval_msec) {
+ /* then find the next highest propagation delay */
+ pim_ifp->pim_neighbors_highest_override_interval_msec =
+ find_neighbors_next_highest_override_interval_msec(
+ ifp, neigh);
+ }
+ }
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: deleting PIM neighbor %pPA on interface %s",
+ __func__, &neigh->source_addr, ifp->name);
+ }
+
+ listnode_delete(pim_ifp->pim_neighbor_list, neigh);
+
+ pim_neighbor_free(neigh);
+
+ sched_rpf_cache_refresh(pim_ifp->pim);
+}
+
+void pim_neighbor_delete_all(struct interface *ifp, const char *delete_message)
+{
+ struct pim_interface *pim_ifp;
+ struct listnode *neigh_node;
+ struct listnode *neigh_nextnode;
+ struct pim_neighbor *neigh;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->pim_neighbor_list, neigh_node,
+ neigh_nextnode, neigh)) {
+ pim_neighbor_delete(ifp, neigh, delete_message);
+ }
+}
+
+struct prefix *pim_neighbor_find_secondary(struct pim_neighbor *neigh,
+ struct prefix *addr)
+{
+ struct listnode *node;
+ struct prefix *p;
+
+ if (!neigh->prefix_list)
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, node, p)) {
+ if (prefix_same(p, addr))
+ return p;
+ }
+
+ return NULL;
+}
+
+/*
+ RFC 4601: 4.3.4. Maintaining Secondary Address Lists
+
+ All the advertised secondary addresses in received Hello messages
+ must be checked against those previously advertised by all other
+ PIM neighbors on that interface. If there is a conflict and the
+ same secondary address was previously advertised by another
+ neighbor, then only the most recently received mapping MUST be
+ maintained, and an error message SHOULD be logged to the
+ administrator in a rate-limited manner.
+*/
+static void delete_from_neigh_addr(struct interface *ifp,
+ struct list *addr_list, pim_addr neigh_addr)
+{
+ struct listnode *addr_node;
+ struct prefix *addr;
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ assert(addr_list);
+
+ /*
+ Scan secondary address list
+ */
+ for (ALL_LIST_ELEMENTS_RO(addr_list, addr_node, addr)) {
+ struct listnode *neigh_node;
+ struct pim_neighbor *neigh;
+
+ if (addr->family != PIM_AF)
+ continue;
+ /*
+ Scan neighbors
+ */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list,
+ neigh_node, neigh)) {
+ {
+ struct prefix *p = pim_neighbor_find_secondary(
+ neigh, addr);
+ if (p) {
+ zlog_info(
+ "secondary addr %pFXh recvd from neigh %pPA deleted from neigh %pPA on %s",
+ addr, &neigh_addr,
+ &neigh->source_addr, ifp->name);
+
+ listnode_delete(neigh->prefix_list, p);
+ prefix_free(&p);
+ }
+ }
+
+ } /* scan neighbors */
+
+ } /* scan addr list */
+}
+
+void pim_neighbor_update(struct pim_neighbor *neigh,
+ pim_hello_options hello_options, uint16_t holdtime,
+ uint32_t dr_priority, struct list *addr_list)
+{
+ struct pim_interface *pim_ifp = neigh->interface->info;
+ uint32_t old, new;
+
+ /* Received holdtime ? */
+ if (PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_HOLDTIME)) {
+ pim_neighbor_timer_reset(neigh, holdtime);
+ } else {
+ pim_neighbor_timer_reset(neigh,
+ PIM_IF_DEFAULT_HOLDTIME(pim_ifp));
+ }
+
+#ifdef DUMP_PREFIX_LIST
+ zlog_debug(
+ "%s: DUMP_PREFIX_LIST old_prefix_list=%x old_size=%d new_prefix_list=%x new_size=%d",
+ __func__, (unsigned)neigh->prefix_list,
+ neigh->prefix_list ? (int)listcount(neigh->prefix_list) : -1,
+ (unsigned)addr_list,
+ addr_list ? (int)listcount(addr_list) : -1);
+#endif
+
+ if (neigh->prefix_list == addr_list) {
+ if (addr_list) {
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "%s: internal error: trying to replace same prefix list=%p",
+ __func__, (void *)addr_list);
+ }
+ } else {
+ /* Delete existing secondary address list */
+ delete_prefix_list(neigh);
+ }
+
+ if (addr_list) {
+ delete_from_neigh_addr(neigh->interface, addr_list,
+ neigh->source_addr);
+ }
+
+ /* Replace secondary address list */
+ neigh->prefix_list = addr_list;
+
+ update_dr_priority(neigh, hello_options, dr_priority);
+ new = PIM_OPTION_IS_SET(hello_options, PIM_OPTION_MASK_LAN_PRUNE_DELAY);
+ old = PIM_OPTION_IS_SET(neigh->hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY);
+
+ if (old != new) {
+ if (old)
+ ++pim_ifp->pim_number_of_nonlandelay_neighbors;
+ else
+ --pim_ifp->pim_number_of_nonlandelay_neighbors;
+ }
+ /*
+ Copy flags
+ */
+ neigh->hello_options = hello_options;
+}
diff --git a/pimd/pim_neighbor.h b/pimd/pim_neighbor.h
new file mode 100644
index 0000000..69e9976
--- /dev/null
+++ b/pimd/pim_neighbor.h
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_NEIGHBOR_H
+#define PIM_NEIGHBOR_H
+
+#include <zebra.h>
+
+#include "if.h"
+#include "linklist.h"
+#include "prefix.h"
+
+#include "pim_tlv.h"
+#include "pim_iface.h"
+#include "pim_str.h"
+
+struct pim_neighbor {
+ int64_t creation; /* timestamp of creation */
+ pim_addr source_addr;
+ pim_hello_options hello_options;
+ uint16_t holdtime;
+ uint16_t propagation_delay_msec;
+ uint16_t override_interval_msec;
+ uint32_t dr_priority;
+ uint32_t generation_id;
+ struct list *prefix_list; /* list of struct prefix */
+ struct event *t_expire_timer;
+ struct interface *interface;
+
+ struct event *jp_timer;
+ struct list *upstream_jp_agg;
+ struct bfd_session_params *bfd_session;
+};
+
+void pim_neighbor_timer_reset(struct pim_neighbor *neigh, uint16_t holdtime);
+void pim_neighbor_free(struct pim_neighbor *neigh);
+struct pim_neighbor *pim_neighbor_find(struct interface *ifp,
+ pim_addr source_addr, bool secondary);
+struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp,
+ struct prefix *src);
+struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp);
+
+
+#define PIM_NEIGHBOR_SEND_DELAY 0
+#define PIM_NEIGHBOR_SEND_NOW 1
+struct pim_neighbor *
+pim_neighbor_add(struct interface *ifp, pim_addr source_addr,
+ pim_hello_options hello_options, uint16_t holdtime,
+ uint16_t propagation_delay, uint16_t override_interval,
+ uint32_t dr_priority, uint32_t generation_id,
+ struct list *addr_list, int send_hello_now);
+void pim_neighbor_delete(struct interface *ifp, struct pim_neighbor *neigh,
+ const char *delete_message);
+void pim_neighbor_delete_all(struct interface *ifp, const char *delete_message);
+void pim_neighbor_update(struct pim_neighbor *neigh,
+ pim_hello_options hello_options, uint16_t holdtime,
+ uint32_t dr_priority, struct list *addr_list);
+struct prefix *pim_neighbor_find_secondary(struct pim_neighbor *neigh,
+ struct prefix *addr);
+int pim_if_dr_election(struct interface *ifp);
+
+#endif /* PIM_NEIGHBOR_H */
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
new file mode 100644
index 0000000..4e8e5f0
--- /dev/null
+++ b/pimd/pim_nht.c
@@ -0,0 +1,1116 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Chirag Shah
+ */
+#include <zebra.h>
+#include "network.h"
+#include "zclient.h"
+#include "stream.h"
+#include "nexthop.h"
+#include "if.h"
+#include "hash.h"
+#include "jhash.h"
+
+#include "lib/printfrr.h"
+
+#include "pimd.h"
+#include "pimd/pim_nht.h"
+#include "pim_instance.h"
+#include "log.h"
+#include "pim_time.h"
+#include "pim_oil.h"
+#include "pim_ifchannel.h"
+#include "pim_mroute.h"
+#include "pim_zebra.h"
+#include "pim_upstream.h"
+#include "pim_join.h"
+#include "pim_jp_agg.h"
+#include "pim_zebra.h"
+#include "pim_zlookup.h"
+#include "pim_rp.h"
+#include "pim_addr.h"
+#include "pim_register.h"
+#include "pim_vxlan.h"
+
+/**
+ * pim_sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister
+ * command to Zebra.
+ */
+void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
+ struct pim_nexthop_cache *pnc, int command)
+{
+ struct prefix p;
+ int ret;
+
+ pim_addr_to_prefix(&p, pnc->rpf.rpf_addr);
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
+ pim->vrf->vrf_id);
+ if (ret == ZCLIENT_SEND_FAILURE)
+ zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ",
+ __func__,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p,
+ pim->vrf->name, ret);
+
+ return;
+}
+
+struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
+ struct pim_rpf *rpf)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache lookup;
+
+ lookup.rpf.rpf_addr = rpf->rpf_addr;
+ pnc = hash_lookup(pim->rpf_hash, &lookup);
+
+ return pnc;
+}
+
+static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
+ struct pim_rpf *rpf_addr)
+{
+ struct pim_nexthop_cache *pnc;
+ char hash_name[64];
+
+ pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE,
+ sizeof(struct pim_nexthop_cache));
+ pnc->rpf.rpf_addr = rpf_addr->rpf_addr;
+
+ pnc = hash_get(pim->rpf_hash, pnc, hash_alloc_intern);
+
+ pnc->rp_list = list_new();
+ pnc->rp_list->cmp = pim_rp_list_cmp;
+
+ snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash",
+ &pnc->rpf.rpf_addr, pim->vrf->name);
+ pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key,
+ pim_upstream_equal, hash_name);
+
+ return pnc;
+}
+
+static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
+ pim_addr addr)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_rpf rpf;
+ struct zclient *zclient = NULL;
+
+ zclient = pim_zebra_zclient_get();
+ memset(&rpf, 0, sizeof(rpf));
+ rpf.rpf_addr = addr;
+
+ pnc = pim_nexthop_cache_find(pim, &rpf);
+ if (!pnc) {
+ pnc = pim_nexthop_cache_add(pim, &rpf);
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc,
+ ZEBRA_NEXTHOP_REGISTER);
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug(
+ "%s: NHT cache and zebra notification added for %pPA(%s)",
+ __func__, &addr, pim->vrf->name);
+ }
+
+ return pnc;
+}
+
+/* TBD: this does several distinct things and should probably be split up.
+ * (checking state vs. returning pnc vs. adding upstream vs. adding rp)
+ */
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
+ struct pim_upstream *up, struct rp_info *rp,
+ struct pim_nexthop_cache *out_pnc)
+{
+ struct pim_nexthop_cache *pnc;
+ struct listnode *ch_node = NULL;
+
+ pnc = pim_nht_get(pim, addr);
+
+ assertf(up || rp, "addr=%pPA", &addr);
+
+ if (rp != NULL) {
+ ch_node = listnode_lookup(pnc->rp_list, rp);
+ if (ch_node == NULL)
+ listnode_add_sort(pnc->rp_list, rp);
+ }
+
+ if (up != NULL)
+ (void)hash_get(pnc->upstream_hash, up, hash_alloc_intern);
+
+ if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
+ if (out_pnc)
+ memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
+ return 1;
+ }
+
+ return 0;
+}
+
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
+{
+ struct pim_nexthop_cache *pnc;
+
+ pnc = pim_nht_get(pim, addr);
+
+ pnc->bsr_count++;
+}
+
+static void pim_nht_drop_maybe(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc)
+{
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u",
+ __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
+ pnc->rp_list->count, pnc->upstream_hash->count,
+ pnc->bsr_count);
+
+ if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0
+ && pnc->bsr_count == 0) {
+ struct zclient *zclient = pim_zebra_zclient_get();
+
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc,
+ ZEBRA_NEXTHOP_UNREGISTER);
+
+ list_delete(&pnc->rp_list);
+ hash_free(pnc->upstream_hash);
+
+ hash_release(pim->rpf_hash, pnc);
+ if (pnc->nexthop)
+ nexthops_free(pnc->nexthop);
+ XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
+ }
+}
+
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
+ struct pim_upstream *up, struct rp_info *rp)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache lookup;
+ struct pim_upstream *upstream = NULL;
+
+ /* Remove from RPF hash if it is the last entry */
+ lookup.rpf.rpf_addr = addr;
+ pnc = hash_lookup(pim->rpf_hash, &lookup);
+ if (!pnc) {
+ zlog_warn("attempting to delete nonexistent NHT entry %pPA",
+ &addr);
+ return;
+ }
+
+ if (rp) {
+ /* Release the (*, G)upstream from pnc->upstream_hash,
+ * whose Group belongs to the RP getting deleted
+ */
+ frr_each (rb_pim_upstream, &pim->upstream_head, upstream) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ if (!pim_addr_is_any(upstream->sg.src))
+ continue;
+
+ pim_addr_to_prefix(&grp, upstream->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+ if (trp_info == rp)
+ hash_release(pnc->upstream_hash, upstream);
+ }
+ listnode_delete(pnc->rp_list, rp);
+ }
+
+ if (up)
+ hash_release(pnc->upstream_hash, up);
+
+ pim_nht_drop_maybe(pim, pnc);
+}
+
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache lookup;
+
+ /*
+ * Nothing to do here if the address to unregister
+ * is 0.0.0.0 as that the BSR has not been registered
+ * for tracking yet.
+ */
+ if (pim_addr_is_any(addr))
+ return;
+
+ lookup.rpf.rpf_addr = addr;
+
+ pnc = hash_lookup(pim->rpf_hash, &lookup);
+
+ if (!pnc) {
+ zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",
+ &addr);
+ return;
+ }
+
+ assertf(pnc->bsr_count > 0, "addr=%pPA", &addr);
+ pnc->bsr_count--;
+
+ pim_nht_drop_maybe(pim, pnc);
+}
+
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
+ struct interface *src_ifp, pim_addr src_ip)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache lookup;
+ struct pim_neighbor *nbr = NULL;
+ struct nexthop *nh;
+ struct interface *ifp;
+
+ lookup.rpf.rpf_addr = bsr_addr;
+
+ pnc = hash_lookup(pim->rpf_hash, &lookup);
+ if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) {
+ /* BSM from a new freshly registered BSR - do a synchronous
+ * zebra query since otherwise we'd drop the first packet,
+ * leading to additional delay in picking up BSM data
+ */
+
+ /* FIXME: this should really be moved into a generic NHT
+ * function that does "add and get immediate result" or maybe
+ * "check cache or get immediate result." But until that can
+ * be worked in, here's a copy of the code below :(
+ */
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ ifindex_t i;
+ struct interface *ifp = NULL;
+ int num_ifindex;
+
+ memset(nexthop_tab, 0, sizeof(nexthop_tab));
+ num_ifindex = zclient_lookup_nexthop(
+ pim, nexthop_tab, router->multipath, bsr_addr,
+ PIM_NEXTHOP_LOOKUP_MAX);
+
+ if (num_ifindex <= 0)
+ return false;
+
+ for (i = 0; i < num_ifindex; i++) {
+ struct pim_zlookup_nexthop *znh = &nexthop_tab[i];
+
+ /* pim_zlookup_nexthop has no ->type */
+
+ /* 1:1 match code below with znh instead of nh */
+ ifp = if_lookup_by_index(znh->ifindex,
+ pim->vrf->vrf_id);
+
+ if (!ifp || !ifp->info)
+ continue;
+
+ if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
+ return true;
+
+ nbr = pim_neighbor_find(ifp, znh->nexthop_addr, true);
+ if (!nbr)
+ continue;
+
+ return znh->ifindex == src_ifp->ifindex;
+ }
+ return false;
+ }
+
+ if (!CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID))
+ return false;
+
+ /* if we accept BSMs from more than one ECMP nexthop, this will cause
+ * BSM message "multiplication" for each ECMP hop. i.e. if you have
+ * 4-way ECMP and 4 hops you end up with 256 copies of each BSM
+ * message.
+ *
+ * so... only accept the first (IPv4) valid nexthop as source.
+ */
+
+ for (nh = pnc->nexthop; nh; nh = nh->next) {
+ pim_addr nhaddr;
+
+ switch (nh->type) {
+#if PIM_IPV == 4
+ case NEXTHOP_TYPE_IPV4:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
+
+ /* fallthru */
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ nhaddr = nh->gate.ipv4;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ continue;
+#else
+ case NEXTHOP_TYPE_IPV6:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
+
+ /* fallthru */
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ nhaddr = nh->gate.ipv6;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ continue;
+#endif
+ case NEXTHOP_TYPE_IFINDEX:
+ nhaddr = bsr_addr;
+ break;
+
+ case NEXTHOP_TYPE_BLACKHOLE:
+ continue;
+ }
+
+ ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ continue;
+
+ if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
+ return true;
+
+ /* MRIB (IGP) may be pointing at a router where PIM is down */
+
+ nbr = pim_neighbor_find(ifp, nhaddr, true);
+
+ if (!nbr)
+ continue;
+
+ return nh->ifindex == src_ifp->ifindex;
+ }
+ return false;
+}
+
+void pim_rp_nexthop_del(struct rp_info *rp_info)
+{
+ rp_info->rp.source_nexthop.interface = NULL;
+ rp_info->rp.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
+ rp_info->rp.source_nexthop.mrib_metric_preference =
+ router->infinite_assert_metric.metric_preference;
+ rp_info->rp.source_nexthop.mrib_route_metric =
+ router->infinite_assert_metric.route_metric;
+}
+
+/* Update RP nexthop info based on Nexthop update received from Zebra.*/
+static void pim_update_rp_nh(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc)
+{
+ struct listnode *node = NULL;
+ struct rp_info *rp_info = NULL;
+ struct interface *ifp;
+
+ /*Traverse RP list and update each RP Nexthop info */
+ for (ALL_LIST_ELEMENTS_RO(pnc->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ ifp = rp_info->rp.source_nexthop.interface;
+ // Compute PIM RPF using cached nexthop
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
+ rp_info->rp.rpf_addr,
+ &rp_info->group, 1))
+ pim_rp_nexthop_del(rp_info);
+
+ /*
+ * If we transition from no path to a path
+ * we need to search through all the vxlan's
+ * that use this rp and send NULL registers
+ * for all the vxlan S,G streams
+ */
+ if (!ifp && rp_info->rp.source_nexthop.interface)
+ pim_vxlan_rp_info_is_alive(pim, &rp_info->rp);
+ }
+}
+
+/* Update Upstream nexthop info based on Nexthop update received from Zebra.*/
+static int pim_update_upstream_nh_helper(struct hash_bucket *bucket, void *arg)
+{
+ struct pim_instance *pim = (struct pim_instance *)arg;
+ struct pim_upstream *up = (struct pim_upstream *)bucket->data;
+
+ enum pim_rpf_result rpf_result;
+ struct pim_rpf old;
+
+ old.source_nexthop.interface = up->rpf.source_nexthop.interface;
+ rpf_result = pim_rpf_update(pim, up, &old, __func__);
+
+ /* update kernel multicast forwarding cache (MFC); if the
+ * RPF nbr is now unreachable the MFC has already been updated
+ * by pim_rpf_clear
+ */
+ if (rpf_result == PIM_RPF_CHANGED)
+ pim_upstream_mroute_iif_update(up->channel_oil, __func__);
+
+ if (rpf_result == PIM_RPF_CHANGED ||
+ (rpf_result == PIM_RPF_FAILURE && old.source_nexthop.interface))
+ pim_zebra_upstream_rpf_changed(pim, up, &old);
+
+ /*
+ * If we are a VXLAN source and we are transitioning from not
+ * having an outgoing interface to having an outgoing interface
+ * let's immediately send the null pim register
+ */
+ if (!old.source_nexthop.interface && up->rpf.source_nexthop.interface &&
+ PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(up->flags) &&
+ (up->reg_state == PIM_REG_NOINFO || up->reg_state == PIM_REG_JOIN)) {
+ pim_null_register_send(up);
+ }
+
+ if (PIM_DEBUG_PIM_NHT) {
+ zlog_debug("%s: NHT upstream %s(%s) old ifp %s new ifp %s rpf_result: %d",
+ __func__, up->sg_str, pim->vrf->name,
+ old.source_nexthop.interface ? old.source_nexthop
+ .interface->name
+ : "Unknown",
+ up->rpf.source_nexthop.interface ? up->rpf.source_nexthop
+ .interface->name
+ : "Unknown",
+ rpf_result);
+ }
+
+ return HASHWALK_CONTINUE;
+}
+
+static int pim_update_upstream_nh(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc)
+{
+ hash_walk(pnc->upstream_hash, pim_update_upstream_nh_helper, pim);
+
+ pim_zebra_update_all_interfaces(pim);
+
+ return 0;
+}
+
+static int pim_upstream_nh_if_update_helper(struct hash_bucket *bucket,
+ void *arg)
+{
+ struct pim_nexthop_cache *pnc = bucket->data;
+ struct pnc_hash_walk_data *pwd = arg;
+ struct pim_instance *pim = pwd->pim;
+ struct interface *ifp = pwd->ifp;
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ if (ifp != if_lookup_by_index(first_ifindex, pim->vrf->vrf_id))
+ continue;
+
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
+ }
+
+ return HASHWALK_CONTINUE;
+}
+
+void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp)
+{
+ struct pnc_hash_walk_data pwd;
+
+ pwd.pim = pim;
+ pwd.ifp = ifp;
+
+ hash_walk(pim->rpf_hash, pim_upstream_nh_if_update_helper, &pwd);
+}
+
+uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
+{
+ uint32_t hash_val;
+
+ if (!src)
+ return 0;
+
+ hash_val = prefix_hash_key(src);
+ if (grp)
+ hash_val ^= prefix_hash_key(grp);
+ return hash_val;
+}
+
+static int pim_ecmp_nexthop_search(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc,
+ struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, int neighbor_needed)
+{
+ struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
+ struct interface *ifps[router->multipath];
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ uint32_t hash_val = 0, mod_val = 0;
+ uint8_t nh_iter = 0, found = 0;
+ uint32_t i, num_nbrs = 0;
+ struct pim_interface *pim_ifp;
+
+ if (!pnc || !pnc->nexthop_num || !nexthop)
+ return 0;
+
+ pim_addr nh_addr = nexthop->mrib_nexthop_addr;
+ pim_addr grp_addr = pim_addr_from_prefix(grp);
+
+ memset(&nbrs, 0, sizeof(nbrs));
+ memset(&ifps, 0, sizeof(ifps));
+
+
+ // Current Nexthop is VALID, check to stay on the current path.
+ if (nexthop->interface && nexthop->interface->info &&
+ (!pim_addr_is_any(nh_addr))) {
+ /* User configured knob to explicitly switch
+ to new path is disabled or current path
+ metric is less than nexthop update.
+ */
+
+ if (pim->ecmp_rebalance_enable == 0) {
+ uint8_t curr_route_valid = 0;
+ // Check if current nexthop is present in new updated
+ // Nexthop list.
+ // If the current nexthop is not valid, candidate to
+ // choose new Nexthop.
+ for (nh_node = pnc->nexthop; nh_node;
+ nh_node = nh_node->next) {
+ curr_route_valid = (nexthop->interface->ifindex
+ == nh_node->ifindex);
+ if (curr_route_valid)
+ break;
+ }
+
+ if (curr_route_valid &&
+ !pim_if_connected_to_source(nexthop->interface,
+ src)) {
+ nbr = pim_neighbor_find(
+ nexthop->interface,
+ nexthop->mrib_nexthop_addr, true);
+ if (!nbr
+ && !if_is_loopback(nexthop->interface)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: current nexthop does not have nbr ",
+ __func__);
+ } else {
+ /* update metric even if the upstream
+ * neighbor stays unchanged
+ */
+ nexthop->mrib_metric_preference =
+ pnc->distance;
+ nexthop->mrib_route_metric =
+ pnc->metric;
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection",
+ __func__, &src,
+ &grp_addr,
+ pim->vrf->name,
+ nexthop->interface->name);
+ return 1;
+ }
+ }
+ }
+ }
+
+ /*
+ * Look up all interfaces and neighbors,
+ * store for later usage
+ */
+ for (nh_node = pnc->nexthop, i = 0; nh_node;
+ nh_node = nh_node->next, i++) {
+ ifps[i] =
+ if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ if (ifps[i]) {
+#if PIM_IPV == 4
+ pim_addr nhaddr = nh_node->gate.ipv4;
+#else
+ pim_addr nhaddr = nh_node->gate.ipv6;
+#endif
+ nbrs[i] = pim_neighbor_find(ifps[i], nhaddr, true);
+ if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
+ num_nbrs++;
+ }
+ }
+ if (pim->ecmp_enable) {
+ struct prefix src_pfx;
+ uint32_t consider = pnc->nexthop_num;
+
+ if (neighbor_needed && num_nbrs < consider)
+ consider = num_nbrs;
+
+ if (consider == 0)
+ return 0;
+
+ // PIM ECMP flag is enable then choose ECMP path.
+ pim_addr_to_prefix(&src_pfx, src);
+ hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
+ mod_val = hash_val % consider;
+ }
+
+ for (nh_node = pnc->nexthop; nh_node && (found == 0);
+ nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ ifp = ifps[nh_iter];
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, first_ifindex, &src,
+ pim->vrf->name);
+ if (nh_iter == mod_val)
+ mod_val++; // Select nexthpath
+ nh_iter++;
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name,
+ first_ifindex, &src);
+ if (nh_iter == mod_val)
+ mod_val++; // Select nexthpath
+ nh_iter++;
+ continue;
+ }
+
+ if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
+ nbr = nbrs[nh_iter];
+ if (!nbr && !if_is_loopback(ifp)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: pim nbr not found on input interface %s(%s)",
+ __func__, ifp->name,
+ pim->vrf->name);
+ if (nh_iter == mod_val)
+ mod_val++; // Select nexthpath
+ nh_iter++;
+ continue;
+ }
+ }
+
+ if (nh_iter == mod_val) {
+ nexthop->interface = ifp;
+#if PIM_IPV == 4
+ nexthop->mrib_nexthop_addr = nh_node->gate.ipv4;
+#else
+ nexthop->mrib_nexthop_addr = nh_node->gate.ipv6;
+#endif
+ nexthop->mrib_metric_preference = pnc->distance;
+ nexthop->mrib_route_metric = pnc->metric;
+ nexthop->last_lookup = src;
+ nexthop->last_lookup_time = pim_time_monotonic_usec();
+ nexthop->nbr = nbr;
+ found = 1;
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d",
+ __func__, &src, &grp_addr,
+ pim->vrf->name, ifp->name, &nh_addr,
+ mod_val, nh_iter, pim->ecmp_enable);
+ }
+ nh_iter++;
+ }
+
+ if (found)
+ return 1;
+ else
+ return 0;
+}
+
+/* This API is used to parse Registered address nexthop update coming from Zebra
+ */
+int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
+{
+ struct nexthop *nexthop;
+ struct nexthop *nhlist_head = NULL;
+ struct nexthop *nhlist_tail = NULL;
+ int i;
+ struct pim_rpf rpf;
+ struct pim_nexthop_cache *pnc = NULL;
+ struct interface *ifp = NULL;
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+ struct pim_instance *pim;
+ struct zapi_route nhr;
+ struct prefix match;
+
+ if (!vrf)
+ return 0;
+ pim = vrf->info;
+
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &nhr)) {
+ zlog_err("%s: Decode of nexthop update from zebra failed",
+ __func__);
+ return 0;
+ }
+
+ rpf.rpf_addr = pim_addr_from_prefix(&match);
+ pnc = pim_nexthop_cache_find(pim, &rpf);
+ if (!pnc) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: Skipping NHT update, addr %pPA is not in local cached DB.",
+ __func__, &rpf.rpf_addr);
+ return 0;
+ }
+
+ pnc->last_update = pim_time_monotonic_usec();
+
+ if (nhr.nexthop_num) {
+ pnc->nexthop_num = 0;
+
+ for (i = 0; i < nhr.nexthop_num; i++) {
+ nexthop = nexthop_from_zapi_nexthop(&nhr.nexthops[i]);
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ /*
+ * Connected route (i.e. no nexthop), use
+ * RPF address from nexthop cache (i.e.
+ * destination) as PIM nexthop.
+ */
+#if PIM_IPV == 4
+ nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ nexthop->gate.ipv4 = pnc->rpf.rpf_addr;
+#else
+ nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ nexthop->gate.ipv6 = pnc->rpf.rpf_addr;
+#endif
+ break;
+#if PIM_IPV == 4
+ /* RFC5549 IPv4-over-IPv6 nexthop handling:
+ * if we get an IPv6 nexthop in IPv4 PIM, hunt down a
+ * PIM neighbor and use that instead.
+ */
+ case NEXTHOP_TYPE_IPV6_IFINDEX: {
+ struct interface *ifp1 = NULL;
+ struct pim_neighbor *nbr = NULL;
+
+ ifp1 = if_lookup_by_index(nexthop->ifindex,
+ pim->vrf->vrf_id);
+
+ if (!ifp1)
+ nbr = NULL;
+ else
+ /* FIXME: should really use nbr's
+ * secondary address list here
+ */
+ nbr = pim_neighbor_find_if(ifp1);
+
+ /* Overwrite with Nbr address as NH addr */
+ if (nbr)
+ nexthop->gate.ipv4 = nbr->source_addr;
+ else
+ // Mark nexthop address to 0 until PIM
+ // Nbr is resolved.
+ nexthop->gate.ipv4 = PIMADDR_ANY;
+
+ break;
+ }
+#else
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+#endif
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* nothing to do for the other nexthop types */
+ break;
+ }
+
+ ifp = if_lookup_by_index(nexthop->ifindex,
+ pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+ zlog_debug(
+ "%s: could not find interface for ifindex %d(%s) (addr %s)",
+ __func__, nexthop->ifindex,
+ pim->vrf->name,
+ nexthop2str(nexthop, buf,
+ sizeof(buf)));
+ }
+ nexthop_free(nexthop);
+ continue;
+ }
+
+ if (PIM_DEBUG_PIM_NHT) {
+#if PIM_IPV == 4
+ pim_addr nhaddr = nexthop->gate.ipv4;
+#else
+ pim_addr nhaddr = nexthop->gate.ipv6;
+#endif
+ zlog_debug(
+ "%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",
+ __func__, &match, pim->vrf->name, i + 1,
+ &nhaddr, ifp->name, nexthop->type,
+ nhr.distance, nhr.metric);
+ }
+
+ if (!ifp->info) {
+ /*
+ * Though Multicast is not enabled on this
+ * Interface store it in database otheriwse we
+ * may miss this update and this will not cause
+ * any issue, because while choosing the path we
+ * are ommitting the Interfaces which are not
+ * multicast enabled
+ */
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+
+ zlog_debug(
+ "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)",
+ __func__, ifp->name,
+ pim->vrf->name,
+ nexthop->ifindex,
+ nexthop2str(nexthop, buf,
+ sizeof(buf)));
+ }
+ }
+
+ if (nhlist_tail) {
+ nhlist_tail->next = nexthop;
+ nhlist_tail = nexthop;
+ } else {
+ nhlist_tail = nexthop;
+ nhlist_head = nexthop;
+ }
+
+ // Keep track of all nexthops, even PIM-disabled ones.
+ pnc->nexthop_num++;
+ }
+ /* Reset existing pnc->nexthop before assigning new list */
+ nexthops_free(pnc->nexthop);
+ pnc->nexthop = nhlist_head;
+ if (pnc->nexthop_num) {
+ pnc->flags |= PIM_NEXTHOP_VALID;
+ pnc->distance = nhr.distance;
+ pnc->metric = nhr.metric;
+ }
+ } else {
+ pnc->flags &= ~PIM_NEXTHOP_VALID;
+ pnc->nexthop_num = nhr.nexthop_num;
+ nexthops_free(pnc->nexthop);
+ pnc->nexthop = NULL;
+ }
+ SET_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
+ __func__, &match, pim->vrf->name, nhr.nexthop_num,
+ pnc->nexthop_num, vrf_id, pnc->upstream_hash->count,
+ listcount(pnc->rp_list));
+
+ pim_rpf_set_refresh_time(pim);
+
+ if (listcount(pnc->rp_list))
+ pim_update_rp_nh(pim, pnc);
+ if (pnc->upstream_hash->count)
+ pim_update_upstream_nh(pim, pnc);
+
+ return 0;
+}
+
+int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
+ struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, int neighbor_needed)
+{
+ struct pim_nexthop_cache *pnc;
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
+ struct pim_rpf rpf;
+ int num_ifindex;
+ struct interface *ifps[router->multipath], *ifp;
+ int first_ifindex;
+ int found = 0;
+ uint8_t i = 0;
+ uint32_t hash_val = 0, mod_val = 0;
+ uint32_t num_nbrs = 0;
+ struct pim_interface *pim_ifp;
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
+ __func__, &src, pim->vrf->name,
+ nexthop->last_lookup_time);
+
+ rpf.rpf_addr = src;
+
+ pnc = pim_nexthop_cache_find(pim, &rpf);
+ if (pnc) {
+ if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED))
+ return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp,
+ neighbor_needed);
+ }
+
+ memset(nexthop_tab, 0,
+ sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex =
+ zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
+ PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_warn(
+ "%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
+ return 0;
+ }
+
+ memset(&nbrs, 0, sizeof(nbrs));
+ memset(&ifps, 0, sizeof(ifps));
+
+ /*
+ * Look up all interfaces and neighbors,
+ * store for later usage
+ */
+ for (i = 0; i < num_ifindex; i++) {
+ ifps[i] = if_lookup_by_index(nexthop_tab[i].ifindex,
+ pim->vrf->vrf_id);
+ if (ifps[i]) {
+ nbrs[i] = pim_neighbor_find(
+ ifps[i], nexthop_tab[i].nexthop_addr, true);
+
+ if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
+ num_nbrs++;
+ }
+ }
+
+ // If PIM ECMP enable then choose ECMP path.
+ if (pim->ecmp_enable) {
+ struct prefix src_pfx;
+ uint32_t consider = num_ifindex;
+
+ if (neighbor_needed && num_nbrs < consider)
+ consider = num_nbrs;
+
+ if (consider == 0)
+ return 0;
+
+ pim_addr_to_prefix(&src_pfx, src);
+ hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
+ mod_val = hash_val % consider;
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: hash_val %u mod_val %u", __func__,
+ hash_val, mod_val);
+ }
+
+ i = 0;
+ while (!found && (i < num_ifindex)) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = ifps[i];
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, first_ifindex, &src,
+ pim->vrf->name);
+ if (i == mod_val)
+ mod_val++;
+ i++;
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name,
+ first_ifindex, &src);
+ if (i == mod_val)
+ mod_val++;
+ i++;
+ continue;
+ }
+ if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
+ nbr = nbrs[i];
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("ifp name: %s(%s), pim nbr: %p",
+ ifp->name, pim->vrf->name, nbr);
+ if (!nbr && !if_is_loopback(ifp)) {
+ if (i == mod_val)
+ mod_val++;
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",
+ __func__,
+ &nexthop_tab[i].nexthop_addr,
+ ifp->name, pim->vrf->name,
+ &src);
+ i++;
+ continue;
+ }
+ }
+
+ if (i == mod_val) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
+ __func__, &nexthop_tab[i].nexthop_addr,
+ &src, ifp->name, pim->vrf->name,
+ nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
+ /* update nexthop data */
+ nexthop->interface = ifp;
+ nexthop->mrib_nexthop_addr =
+ nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference =
+ nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric =
+ nexthop_tab[i].route_metric;
+ nexthop->last_lookup = src;
+ nexthop->last_lookup_time = pim_time_monotonic_usec();
+ nexthop->nbr = nbr;
+ found = 1;
+ }
+ i++;
+ }
+
+ if (found)
+ return 1;
+ else
+ return 0;
+}
+
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+ struct prefix *grp)
+{
+ struct pim_nexthop nhop;
+ int vif_index;
+ ifindex_t ifindex;
+
+ memset(&nhop, 0, sizeof(nhop));
+ if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
+ return -1;
+ }
+
+ ifindex = nhop.interface->ifindex;
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
+ __func__, ifindex,
+ ifindex2ifname(ifindex, pim->vrf->vrf_id),
+ pim->vrf->name, &src);
+
+ vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex);
+
+ if (vif_index < 0) {
+ if (PIM_DEBUG_PIM_NHT) {
+ zlog_debug(
+ "%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
+ __func__, vif_index, pim->vrf->name, &src);
+ }
+ return -2;
+ }
+
+ return vif_index;
+}
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
new file mode 100644
index 0000000..5a54e1c
--- /dev/null
+++ b/pimd/pim_nht.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ * Chirag Shah
+ */
+#ifndef PIM_NHT_H
+#define PIM_NHT_H
+
+#include "prefix.h"
+#include <zebra.h>
+#include "zclient.h"
+#include "vrf.h"
+
+#include "pimd.h"
+#include "pim_rp.h"
+#include "pim_rpf.h"
+
+/* PIM nexthop cache value structure. */
+struct pim_nexthop_cache {
+ struct pim_rpf rpf;
+ /* IGP route's metric. */
+ uint32_t metric;
+ uint32_t distance;
+ /* Nexthop number and nexthop linked list. */
+ uint8_t nexthop_num;
+ struct nexthop *nexthop;
+ int64_t last_update;
+ uint16_t flags;
+#define PIM_NEXTHOP_VALID (1 << 0)
+#define PIM_NEXTHOP_ANSWER_RECEIVED (1 << 1)
+
+ struct list *rp_list;
+ struct hash *upstream_hash;
+
+ /* bsr_count won't currently go above 1 as we only have global_scope,
+ * but if anyone adds scope support multiple scopes may NHT-track the
+ * same BSR
+ */
+ uint32_t bsr_count;
+};
+
+struct pnc_hash_walk_data {
+ struct pim_instance *pim;
+ struct interface *ifp;
+};
+
+int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
+ struct pim_upstream *up, struct rp_info *rp,
+ struct pim_nexthop_cache *out_pnc);
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
+ struct pim_upstream *up, struct rp_info *rp);
+struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
+ struct pim_rpf *rpf);
+uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
+int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
+ struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, int neighbor_needed);
+void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
+ struct pim_nexthop_cache *pnc, int command);
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+ struct prefix *grp);
+void pim_rp_nexthop_del(struct rp_info *rp_info);
+
+/* for RPF check on BSM message receipt */
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr);
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
+/* RPF(bsr_addr) == src_ip%src_ifp? */
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
+ struct interface *src_ifp, pim_addr src_ip);
+void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp);
+#endif
diff --git a/pimd/pim_oil.c b/pimd/pim_oil.c
new file mode 100644
index 0000000..d18406d
--- /dev/null
+++ b/pimd/pim_oil.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "memory.h"
+#include "linklist.h"
+#include "if.h"
+#include "hash.h"
+#include "jhash.h"
+
+#include "pimd.h"
+#include "pim_oil.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_time.h"
+#include "pim_vxlan.h"
+
+static void pim_channel_update_mute(struct channel_oil *c_oil);
+
+char *pim_channel_oil_dump(struct channel_oil *c_oil, char *buf, size_t size)
+{
+ char *out;
+ struct interface *ifp;
+ pim_sgaddr sg;
+ int i;
+
+ sg.src = *oil_origin(c_oil);
+ sg.grp = *oil_mcastgrp(c_oil);
+ ifp = pim_if_find_by_vif_index(c_oil->pim, *oil_incoming_vif(c_oil));
+ snprintfrr(buf, size, "%pSG IIF: %s, OIFS: ", &sg,
+ ifp ? ifp->name : "(?)");
+
+ out = buf + strlen(buf);
+ for (i = 0; i < MAXVIFS; i++) {
+ if (oil_if_has(c_oil, i) != 0) {
+ ifp = pim_if_find_by_vif_index(c_oil->pim, i);
+ snprintf(out, buf + size - out, "%s ",
+ ifp ? ifp->name : "(?)");
+ out += strlen(out);
+ }
+ }
+
+ return buf;
+}
+
+int pim_channel_oil_compare(const struct channel_oil *cc1,
+ const struct channel_oil *cc2)
+{
+ struct channel_oil *c1 = (struct channel_oil *)cc1;
+ struct channel_oil *c2 = (struct channel_oil *)cc2;
+ int rv;
+
+ rv = pim_addr_cmp(*oil_mcastgrp(c1), *oil_mcastgrp(c2));
+ if (rv)
+ return rv;
+ rv = pim_addr_cmp(*oil_origin(c1), *oil_origin(c2));
+ if (rv)
+ return rv;
+ return 0;
+}
+
+void pim_oil_init(struct pim_instance *pim)
+{
+ rb_pim_oil_init(&pim->channel_oil_head);
+}
+
+void pim_oil_terminate(struct pim_instance *pim)
+{
+ struct channel_oil *c_oil;
+
+ while ((c_oil = rb_pim_oil_pop(&pim->channel_oil_head)))
+ pim_channel_oil_free(c_oil);
+
+ rb_pim_oil_fini(&pim->channel_oil_head);
+}
+
+void pim_channel_oil_free(struct channel_oil *c_oil)
+{
+ XFREE(MTYPE_PIM_CHANNEL_OIL, c_oil);
+}
+
+struct channel_oil *pim_find_channel_oil(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+ struct channel_oil *c_oil = NULL;
+ struct channel_oil lookup;
+
+ *oil_mcastgrp(&lookup) = sg->grp;
+ *oil_origin(&lookup) = sg->src;
+
+ c_oil = rb_pim_oil_find(&pim->channel_oil_head, &lookup);
+
+ return c_oil;
+}
+
+struct channel_oil *pim_channel_oil_add(struct pim_instance *pim,
+ pim_sgaddr *sg, const char *name)
+{
+ struct channel_oil *c_oil;
+
+ c_oil = pim_find_channel_oil(pim, sg);
+ if (c_oil) {
+ ++c_oil->oil_ref_count;
+
+ if (!c_oil->up) {
+ /* channel might be present prior to upstream */
+ c_oil->up = pim_upstream_find(
+ pim, sg);
+ /* if the upstream entry is being anchored to an
+ * already existing channel OIL we need to re-evaluate
+ * the "Mute" state on AA OIFs
+ */
+ pim_channel_update_mute(c_oil);
+ }
+
+ /* check if the IIF has changed
+ * XXX - is this really needed
+ */
+ pim_upstream_mroute_iif_update(c_oil, __func__);
+
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug(
+ "%s(%s): Existing oil for %pSG Ref Count: %d (Post Increment)",
+ __func__, name, sg, c_oil->oil_ref_count);
+ return c_oil;
+ }
+
+ c_oil = XCALLOC(MTYPE_PIM_CHANNEL_OIL, sizeof(*c_oil));
+
+ *oil_mcastgrp(c_oil) = sg->grp;
+ *oil_origin(c_oil) = sg->src;
+
+ *oil_incoming_vif(c_oil) = MAXVIFS;
+ c_oil->oil_ref_count = 1;
+ c_oil->installed = 0;
+ c_oil->up = pim_upstream_find(pim, sg);
+ c_oil->pim = pim;
+
+ rb_pim_oil_add(&pim->channel_oil_head, c_oil);
+
+ if (PIM_DEBUG_MROUTE)
+ zlog_debug("%s(%s): c_oil %pSG add", __func__, name, sg);
+
+ return c_oil;
+}
+
+
+/*
+ * Clean up mroute and channel oil created for dropping pkts from directly
+ * connected source when the interface was non DR.
+ */
+void pim_clear_nocache_state(struct pim_interface *pim_ifp)
+{
+ struct channel_oil *c_oil;
+
+ frr_each_safe (rb_pim_oil, &pim_ifp->pim->channel_oil_head, c_oil) {
+
+ if ((!c_oil->up) ||
+ !(PIM_UPSTREAM_FLAG_TEST_SRC_NOCACHE(c_oil->up->flags)))
+ continue;
+
+ if (*oil_incoming_vif(c_oil) != pim_ifp->mroute_vif_index)
+ continue;
+
+ EVENT_OFF(c_oil->up->t_ka_timer);
+ PIM_UPSTREAM_FLAG_UNSET_SRC_NOCACHE(c_oil->up->flags);
+ PIM_UPSTREAM_FLAG_UNSET_SRC_STREAM(c_oil->up->flags);
+ pim_upstream_del(pim_ifp->pim, c_oil->up, __func__);
+ }
+}
+
+struct channel_oil *pim_channel_oil_del(struct channel_oil *c_oil,
+ const char *name)
+{
+ if (PIM_DEBUG_MROUTE) {
+ pim_sgaddr sg = {.src = *oil_origin(c_oil),
+ .grp = *oil_mcastgrp(c_oil)};
+
+ zlog_debug(
+ "%s(%s): Del oil for %pSG, Ref Count: %d (Predecrement)",
+ __func__, name, &sg, c_oil->oil_ref_count);
+ }
+ --c_oil->oil_ref_count;
+
+ if (c_oil->oil_ref_count < 1) {
+ /*
+ * notice that listnode_delete() can't be moved
+ * into pim_channel_oil_free() because the later is
+ * called by list_delete_all_node()
+ */
+ c_oil->up = NULL;
+ rb_pim_oil_del(&c_oil->pim->channel_oil_head, c_oil);
+
+ pim_channel_oil_free(c_oil);
+ return NULL;
+ }
+
+ return c_oil;
+}
+
+void pim_channel_oil_upstream_deref(struct channel_oil *c_oil)
+{
+ /* The upstream entry associated with a channel_oil is abt to be
+ * deleted. If the channel_oil is kept around because of other
+ * references we need to remove upstream based states out of it.
+ */
+ c_oil = pim_channel_oil_del(c_oil, __func__);
+ if (c_oil) {
+ /* note: here we assume that c_oil->up has already been
+ * cleared
+ */
+ pim_channel_update_mute(c_oil);
+ }
+}
+
+int pim_channel_del_oif(struct channel_oil *channel_oil, struct interface *oif,
+ uint32_t proto_mask, const char *caller)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(channel_oil);
+ assert(oif);
+
+ pim_ifp = oif->info;
+
+ assertf(pim_ifp->mroute_vif_index >= 0,
+ "trying to del OIF %s with VIF (%d)", oif->name,
+ pim_ifp->mroute_vif_index);
+
+ /*
+ * Don't do anything if we've been asked to remove a source
+ * that is not actually on it.
+ */
+ if (!(channel_oil->oif_flags[pim_ifp->mroute_vif_index] & proto_mask)) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: no existing protocol mask %u(%u) for requested OIF %s (vif_index=%d, min_ttl=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, proto_mask,
+ channel_oil
+ ->oif_flags[pim_ifp->mroute_vif_index],
+ oif->name, pim_ifp->mroute_vif_index,
+ oil_if_has(channel_oil, pim_ifp->mroute_vif_index),
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+ return 0;
+ }
+
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] &= ~proto_mask;
+
+ if (channel_oil->oif_flags[pim_ifp->mroute_vif_index] &
+ PIM_OIF_FLAG_PROTO_ANY) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: other protocol masks remain for requested OIF %s (vif_index=%d, min_ttl=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_if_has(channel_oil, pim_ifp->mroute_vif_index),
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+ return 0;
+ }
+
+ oil_if_set(channel_oil, pim_ifp->mroute_vif_index, 0);
+ /* clear mute; will be re-evaluated when the OIF becomes valid again */
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] &= ~PIM_OIF_FLAG_MUTE;
+
+ if (pim_upstream_mroute_add(channel_oil, __func__)) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: could not remove output interface %s (vif_index=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+ return -1;
+ }
+
+ --channel_oil->oil_size;
+
+ if (PIM_DEBUG_MROUTE) {
+ struct interface *iifp =
+ pim_if_find_by_vif_index(pim_ifp->pim,
+ *oil_incoming_vif(channel_oil));
+
+ zlog_debug("%s(%s): (S,G)=(%pPAs,%pPAs): proto_mask=%u IIF:%s OIF=%s vif_index=%d",
+ __func__, caller, oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil), proto_mask,
+ iifp ? iifp->name : "Unknown", oif->name,
+ pim_ifp->mroute_vif_index);
+ }
+
+ return 0;
+}
+
+void pim_channel_del_inherited_oif(struct channel_oil *c_oil,
+ struct interface *oif, const char *caller)
+{
+ struct pim_upstream *up = c_oil->up;
+
+ pim_channel_del_oif(c_oil, oif, PIM_OIF_FLAG_PROTO_STAR,
+ caller);
+
+ /* if an inherited OIF is being removed join-desired can change
+ * if the inherited OIL is now empty and KAT is running
+ */
+ if (up && !pim_addr_is_any(up->sg.src) &&
+ pim_upstream_empty_inherited_olist(up))
+ pim_upstream_update_join_desired(up->pim, up);
+}
+
+static bool pim_channel_eval_oif_mute(struct channel_oil *c_oil,
+ struct pim_interface *pim_ifp)
+{
+ struct pim_interface *pim_reg_ifp;
+ struct pim_interface *vxlan_ifp;
+ bool do_mute = false;
+ struct pim_instance *pim = c_oil->pim;
+
+ if (!c_oil->up)
+ return do_mute;
+
+ pim_reg_ifp = pim->regiface->info;
+ if (pim_ifp == pim_reg_ifp) {
+ /* suppress pimreg in the OIL if the mroute is not supposed to
+ * trigger register encapsulated data
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_NO_PIMREG_DATA(c_oil->up->flags))
+ do_mute = true;
+
+ return do_mute;
+ }
+
+ vxlan_ifp = pim_vxlan_get_term_ifp(pim);
+ if (pim_ifp == vxlan_ifp) {
+ /* 1. vxlan termination device must never be added to the
+ * origination mroute (and that can actually happen because
+ * of XG inheritance from the termination mroute) otherwise
+ * traffic will end up looping.
+ * PS: This check has also been extended to non-orig mroutes
+ * that have a local SIP as such mroutes can move back and
+ * forth between orig<=>non-orig type.
+ * 2. vxlan termination device should be removed from the non-DF
+ * to prevent duplicates to the overlay rxer
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(c_oil->up->flags) ||
+ PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(c_oil->up->flags) ||
+ pim_vxlan_is_local_sip(c_oil->up))
+ do_mute = true;
+
+ return do_mute;
+ }
+
+ if (PIM_I_am_DualActive(pim_ifp)) {
+ struct pim_upstream *starup = c_oil->up->parent;
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(c_oil->up->flags)
+ && (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(c_oil->up->flags)))
+ do_mute = true;
+
+ /* In case entry is (S,G), Negotiation happens at (*.G) */
+ if (starup
+
+ && PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(starup->flags)
+ && (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(starup->flags)))
+ do_mute = true;
+ return do_mute;
+ }
+ return do_mute;
+}
+
+void pim_channel_update_oif_mute(struct channel_oil *c_oil,
+ struct pim_interface *pim_ifp)
+{
+ bool old_mute;
+ bool new_mute;
+
+ /* If pim_ifp is not a part of the OIL there is nothing to do */
+ if (!oil_if_has(c_oil, pim_ifp->mroute_vif_index))
+ return;
+
+ old_mute = !!(c_oil->oif_flags[pim_ifp->mroute_vif_index] &
+ PIM_OIF_FLAG_MUTE);
+ new_mute = pim_channel_eval_oif_mute(c_oil, pim_ifp);
+ if (old_mute == new_mute)
+ return;
+
+ if (new_mute)
+ c_oil->oif_flags[pim_ifp->mroute_vif_index] |=
+ PIM_OIF_FLAG_MUTE;
+ else
+ c_oil->oif_flags[pim_ifp->mroute_vif_index] &=
+ ~PIM_OIF_FLAG_MUTE;
+
+ pim_upstream_mroute_add(c_oil, __func__);
+}
+
+/* pim_upstream has been set or cleared on the c_oil. re-eval mute state
+ * on all existing OIFs
+ */
+static void pim_channel_update_mute(struct channel_oil *c_oil)
+{
+ struct pim_interface *pim_reg_ifp;
+ struct pim_interface *vxlan_ifp;
+
+ if (c_oil->pim->regiface) {
+ pim_reg_ifp = c_oil->pim->regiface->info;
+ if (pim_reg_ifp)
+ pim_channel_update_oif_mute(c_oil, pim_reg_ifp);
+ }
+ vxlan_ifp = pim_vxlan_get_term_ifp(c_oil->pim);
+ if (vxlan_ifp)
+ pim_channel_update_oif_mute(c_oil, vxlan_ifp);
+}
+
+int pim_channel_add_oif(struct channel_oil *channel_oil, struct interface *oif,
+ uint32_t proto_mask, const char *caller)
+{
+ struct pim_interface *pim_ifp;
+ int old_ttl;
+
+ /*
+ * If we've gotten here we've gone bad, but let's
+ * not take down pim
+ */
+ if (!channel_oil) {
+ zlog_warn("Attempt to Add OIF for non-existent channel oil");
+ return -1;
+ }
+
+ pim_ifp = oif->info;
+
+ assertf(pim_ifp->mroute_vif_index >= 0,
+ "trying to add OIF %s with VIF (%d)", oif->name,
+ pim_ifp->mroute_vif_index);
+
+ /* Prevent single protocol from subscribing same interface to
+ channel (S,G) multiple times */
+ if (channel_oil->oif_flags[pim_ifp->mroute_vif_index] & proto_mask) {
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] |= proto_mask;
+
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: existing protocol mask %u requested OIF %s (vif_index=%d, min_ttl=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, proto_mask, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_if_has(channel_oil, pim_ifp->mroute_vif_index),
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+ return -3;
+ }
+
+ /* Allow other protocol to request subscription of same interface to
+ * channel (S,G), we need to note this information
+ */
+ if (channel_oil->oif_flags[pim_ifp->mroute_vif_index]
+ & PIM_OIF_FLAG_PROTO_ANY) {
+
+ /* Updating time here is not required as this time has to
+ * indicate when the interface is added
+ */
+
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] |= proto_mask;
+ /* Check the OIF really exists before returning, and only log
+ warning otherwise */
+ if (oil_if_has(channel_oil, pim_ifp->mroute_vif_index) < 1) {
+ zlog_warn(
+ "%s %s: new protocol mask %u requested nonexistent OIF %s (vif_index=%d, min_ttl=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, proto_mask, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_if_has(channel_oil, pim_ifp->mroute_vif_index),
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s(%s): (S,G)=(%pPAs,%pPAs): proto_mask=%u OIF=%s vif_index=%d added to 0x%x",
+ __func__, caller, oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil),
+ proto_mask, oif->name,
+ pim_ifp->mroute_vif_index,
+ channel_oil
+ ->oif_flags[pim_ifp->mroute_vif_index]);
+ }
+ return 0;
+ }
+
+ old_ttl = oil_if_has(channel_oil, pim_ifp->mroute_vif_index);
+
+ if (old_ttl > 0) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: interface %s (vif_index=%d) is existing output for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+ return -4;
+ }
+
+ oil_if_set(channel_oil, pim_ifp->mroute_vif_index, PIM_MROUTE_MIN_TTL);
+
+ /* Some OIFs are held in a muted state i.e. the PIM state machine
+ * decided to include the OIF but additional status check such as
+ * MLAG DF role prevent it from being activated for traffic
+ * forwarding.
+ */
+ if (pim_channel_eval_oif_mute(channel_oil, pim_ifp))
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] |=
+ PIM_OIF_FLAG_MUTE;
+ else
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] &=
+ ~PIM_OIF_FLAG_MUTE;
+
+ /* channel_oil->oil.mfcc_parent != MAXVIFS indicate this entry is not
+ * valid to get installed in kernel.
+ */
+ if (*oil_incoming_vif(channel_oil) != MAXVIFS) {
+ if (pim_upstream_mroute_add(channel_oil, __func__)) {
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s %s: could not add output interface %s (vif_index=%d) for channel (S,G)=(%pPAs,%pPAs)",
+ __FILE__, __func__, oif->name,
+ pim_ifp->mroute_vif_index,
+ oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil));
+ }
+
+ oil_if_set(channel_oil, pim_ifp->mroute_vif_index,
+ old_ttl);
+ return -5;
+ }
+ }
+
+ channel_oil->oif_creation[pim_ifp->mroute_vif_index] =
+ pim_time_monotonic_sec();
+ ++channel_oil->oil_size;
+ channel_oil->oif_flags[pim_ifp->mroute_vif_index] |= proto_mask;
+
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s(%s): (S,G)=(%pPAs,%pPAs): proto_mask=%u OIF=%s vif_index=%d: DONE",
+ __func__, caller, oil_origin(channel_oil),
+ oil_mcastgrp(channel_oil),
+ proto_mask,
+ oif->name, pim_ifp->mroute_vif_index);
+ }
+
+ return 0;
+}
+
+int pim_channel_oil_empty(struct channel_oil *c_oil)
+{
+ static struct channel_oil null_oil;
+
+ if (!c_oil)
+ return 1;
+
+ /* exclude pimreg from the OIL when checking if the inherited_oil is
+ * non-NULL.
+ * pimreg device (in all vrfs) uses a vifi of
+ * 0 (PIM_OIF_PIM_REGISTER_VIF) so we simply mfcc_ttls[0] */
+ if (oil_if_has(c_oil, 0))
+ oil_if_set(&null_oil, 0, 1);
+ else
+ oil_if_set(&null_oil, 0, 0);
+
+ return !oil_if_cmp(&c_oil->oil, &null_oil.oil);
+}
diff --git a/pimd/pim_oil.h b/pimd/pim_oil.h
new file mode 100644
index 0000000..6a52227
--- /dev/null
+++ b/pimd/pim_oil.h
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_OIL_H
+#define PIM_OIL_H
+
+struct pim_interface;
+
+#include "pim_mroute.h"
+
+/*
+ * Where did we get this (S,G) from?
+ *
+ * GM - Learned from IGMP/MLD
+ * PIM - Learned from PIM
+ * SOURCE - Learned from Source multicast packet received
+ * STAR - Inherited
+ */
+#define PIM_OIF_FLAG_PROTO_GM (1 << 0)
+#define PIM_OIF_FLAG_PROTO_PIM (1 << 1)
+#define PIM_OIF_FLAG_PROTO_STAR (1 << 2)
+#define PIM_OIF_FLAG_PROTO_VXLAN (1 << 3)
+#define PIM_OIF_FLAG_PROTO_ANY \
+ (PIM_OIF_FLAG_PROTO_GM | PIM_OIF_FLAG_PROTO_PIM | \
+ PIM_OIF_FLAG_PROTO_STAR | PIM_OIF_FLAG_PROTO_VXLAN)
+
+/* OIF is present in the OIL but must not be used for forwarding traffic */
+#define PIM_OIF_FLAG_MUTE (1 << 4)
+/*
+ * We need a pimreg vif id from the kernel.
+ * Since ifindex == vif id for most cases and the number
+ * of expected interfaces is at most 100, using MAXVIFS -1
+ * is probably ok.
+ * Don't come running to me if this assumption is bad,
+ * fix it.
+ */
+#define PIM_OIF_PIM_REGISTER_VIF 0
+#define PIM_MAX_USABLE_VIFS (MAXVIFS - 1)
+
+struct channel_counts {
+ unsigned long long lastused;
+ unsigned long origpktcnt;
+ unsigned long pktcnt;
+ unsigned long oldpktcnt;
+ unsigned long origbytecnt;
+ unsigned long bytecnt;
+ unsigned long oldbytecnt;
+ unsigned long origwrong_if;
+ unsigned long wrong_if;
+ unsigned long oldwrong_if;
+};
+
+/*
+ qpim_channel_oil_list holds a list of struct channel_oil.
+
+ Each channel_oil.oil is used to control an (S,G) entry in the Kernel
+ Multicast Forwarding Cache.
+
+ There is a case when we create a channel_oil but don't install in the kernel
+
+ Case where (S, G) entry not installed in the kernel:
+ FRR receives IGMP/PIM (*, G) join and RP is not configured or
+ not-reachable, then create a channel_oil for the group G with the incoming
+ interface(channel_oil.oil.mfcc_parent) as invalid i.e "MAXVIF" and populate
+ the outgoing interface where join is received. Keep this entry in the stack,
+ but don't install in the kernel(channel_oil.installed = 0).
+
+ Case where (S, G) entry installed in the kernel:
+ When RP is configured and is reachable for the group G, and receiving a
+ join if channel_oil is already present then populate the incoming interface
+ and install the entry in the kernel, if channel_oil not present, then create
+ a new_channel oil(channel_oil.installed = 1).
+
+ is_valid: indicate if this entry is valid to get installed in kernel.
+ installed: indicate if this entry is installed in the kernel.
+
+*/
+PREDECL_RBTREE_UNIQ(rb_pim_oil);
+
+struct channel_oil {
+ struct pim_instance *pim;
+
+ struct rb_pim_oil_item oil_rb;
+
+#if PIM_IPV == 4
+ struct mfcctl oil;
+#else
+ struct mf6cctl oil;
+#endif
+ int installed;
+ int oil_inherited_rescan;
+ int oil_size;
+ int oil_ref_count;
+ time_t oif_creation[MAXVIFS];
+ uint32_t oif_flags[MAXVIFS];
+ struct channel_counts cc;
+ struct pim_upstream *up;
+ time_t mroute_creation;
+};
+
+#if PIM_IPV == 4
+static inline pim_addr *oil_origin(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mfcc_origin;
+}
+
+static inline pim_addr *oil_mcastgrp(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mfcc_mcastgrp;
+}
+
+static inline vifi_t *oil_incoming_vif(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mfcc_parent;
+}
+
+static inline bool oil_if_has(struct channel_oil *c_oil, vifi_t ifi)
+{
+ return !!c_oil->oil.mfcc_ttls[ifi];
+}
+
+static inline void oil_if_set(struct channel_oil *c_oil, vifi_t ifi, uint8_t set)
+{
+ c_oil->oil.mfcc_ttls[ifi] = set;
+}
+
+static inline int oil_if_cmp(struct mfcctl *oil1, struct mfcctl *oil2)
+{
+ return memcmp(&oil1->mfcc_ttls[0], &oil2->mfcc_ttls[0],
+ sizeof(oil1->mfcc_ttls));
+}
+#else
+static inline pim_addr *oil_origin(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mf6cc_origin.sin6_addr;
+}
+
+static inline pim_addr *oil_mcastgrp(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mf6cc_mcastgrp.sin6_addr;
+}
+
+static inline mifi_t *oil_incoming_vif(struct channel_oil *c_oil)
+{
+ return &c_oil->oil.mf6cc_parent;
+}
+
+static inline bool oil_if_has(struct channel_oil *c_oil, mifi_t ifi)
+{
+ return !!IF_ISSET(ifi, &c_oil->oil.mf6cc_ifset);
+}
+
+static inline void oil_if_set(struct channel_oil *c_oil, mifi_t ifi,
+ uint8_t set)
+{
+ if (set)
+ IF_SET(ifi, &c_oil->oil.mf6cc_ifset);
+ else
+ IF_CLR(ifi, &c_oil->oil.mf6cc_ifset);
+}
+
+static inline int oil_if_cmp(struct mf6cctl *oil1, struct mf6cctl *oil2)
+{
+ return memcmp(&oil1->mf6cc_ifset, &oil2->mf6cc_ifset,
+ sizeof(oil1->mf6cc_ifset));
+}
+#endif
+
+extern int pim_channel_oil_compare(const struct channel_oil *c1,
+ const struct channel_oil *c2);
+DECLARE_RBTREE_UNIQ(rb_pim_oil, struct channel_oil, oil_rb,
+ pim_channel_oil_compare);
+
+void pim_oil_init(struct pim_instance *pim);
+void pim_oil_terminate(struct pim_instance *pim);
+
+void pim_channel_oil_free(struct channel_oil *c_oil);
+struct channel_oil *pim_find_channel_oil(struct pim_instance *pim,
+ pim_sgaddr *sg);
+struct channel_oil *pim_channel_oil_add(struct pim_instance *pim,
+ pim_sgaddr *sg, const char *name);
+void pim_clear_nocache_state(struct pim_interface *pim_ifp);
+struct channel_oil *pim_channel_oil_del(struct channel_oil *c_oil,
+ const char *name);
+
+int pim_channel_add_oif(struct channel_oil *c_oil, struct interface *oif,
+ uint32_t proto_mask, const char *caller);
+int pim_channel_del_oif(struct channel_oil *c_oil, struct interface *oif,
+ uint32_t proto_mask, const char *caller);
+
+int pim_channel_oil_empty(struct channel_oil *c_oil);
+
+char *pim_channel_oil_dump(struct channel_oil *c_oil, char *buf, size_t size);
+
+void pim_channel_update_oif_mute(struct channel_oil *c_oil,
+ struct pim_interface *pim_ifp);
+
+void pim_channel_oil_upstream_deref(struct channel_oil *c_oil);
+void pim_channel_del_inherited_oif(struct channel_oil *c_oil,
+ struct interface *oif, const char *caller);
+
+#endif /* PIM_OIL_H */
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
new file mode 100644
index 0000000..a4c9178
--- /dev/null
+++ b/pimd/pim_pim.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "frrevent.h"
+#include "memory.h"
+#include "if.h"
+#include "network.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_pim.h"
+#include "pim_time.h"
+#include "pim_iface.h"
+#include "pim_sock.h"
+#include "pim_str.h"
+#include "pim_util.h"
+#include "pim_tlv.h"
+#include "pim_neighbor.h"
+#include "pim_hello.h"
+#include "pim_join.h"
+#include "pim_assert.h"
+#include "pim_msg.h"
+#include "pim_register.h"
+#include "pim_errors.h"
+#include "pim_bsm.h"
+#include <lib/lib_errors.h>
+
+static void on_pim_hello_send(struct event *t);
+
+static const char *pim_pim_msgtype2str(enum pim_msg_type type)
+{
+ switch (type) {
+ case PIM_MSG_TYPE_HELLO:
+ return "HELLO";
+ case PIM_MSG_TYPE_REGISTER:
+ return "REGISTER";
+ case PIM_MSG_TYPE_REG_STOP:
+ return "REGSTOP";
+ case PIM_MSG_TYPE_JOIN_PRUNE:
+ return "JOINPRUNE";
+ case PIM_MSG_TYPE_BOOTSTRAP:
+ return "BOOT";
+ case PIM_MSG_TYPE_ASSERT:
+ return "ASSERT";
+ case PIM_MSG_TYPE_GRAFT:
+ return "GRAFT";
+ case PIM_MSG_TYPE_GRAFT_ACK:
+ return "GACK";
+ case PIM_MSG_TYPE_CANDIDATE:
+ return "CANDIDATE";
+ }
+
+ return "UNKNOWN";
+}
+
+static void sock_close(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ if (pim_ifp->t_pim_sock_read) {
+ zlog_debug(
+ "Cancelling READ event for PIM socket fd=%d on interface %s",
+ pim_ifp->pim_sock_fd, ifp->name);
+ }
+ }
+ EVENT_OFF(pim_ifp->t_pim_sock_read);
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ if (pim_ifp->t_pim_hello_timer) {
+ zlog_debug(
+ "Cancelling PIM hello timer for interface %s",
+ ifp->name);
+ }
+ }
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("Deleting PIM socket fd=%d on interface %s",
+ pim_ifp->pim_sock_fd, ifp->name);
+ }
+
+ /*
+ * If the fd is already deleted no need to do anything here
+ */
+ if (pim_ifp->pim_sock_fd > 0 && close(pim_ifp->pim_sock_fd)) {
+ zlog_warn(
+ "Failure closing PIM socket fd=%d on interface %s: errno=%d: %s",
+ pim_ifp->pim_sock_fd, ifp->name, errno,
+ safe_strerror(errno));
+ }
+
+ pim_ifp->pim_sock_fd = -1;
+ pim_ifp->pim_sock_creation = 0;
+}
+
+void pim_sock_delete(struct interface *ifp, const char *delete_message)
+{
+ zlog_info("PIM INTERFACE DOWN: on interface %s: %s", ifp->name,
+ delete_message);
+
+ if (!ifp->info) {
+ flog_err(EC_PIM_CONFIG,
+ "%s: %s: but PIM not enabled on interface %s (!)",
+ __func__, delete_message, ifp->name);
+ return;
+ }
+
+ /*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Before an interface goes down or changes primary IP address, a Hello
+ message with a zero HoldTime should be sent immediately (with the
+ old IP address if the IP address changed).
+ */
+ pim_hello_send(ifp, 0 /* zero-sec holdtime */);
+
+ pim_neighbor_delete_all(ifp, delete_message);
+
+ sock_close(ifp);
+}
+
+/* For now check dst address for hello, assrt and join/prune is all pim rtr */
+static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, pim_addr addr)
+{
+ if ((type == PIM_MSG_TYPE_HELLO) || (type == PIM_MSG_TYPE_ASSERT)
+ || (type == PIM_MSG_TYPE_JOIN_PRUNE)) {
+ if (pim_addr_cmp(addr, qpim_all_pim_routers_addr))
+ return false;
+ }
+
+ return true;
+}
+
+int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
+ pim_sgaddr sg)
+{
+ struct iovec iov[2], *iovp = iov;
+#if PIM_IPV == 4
+ struct ip *ip_hdr = (struct ip *)buf;
+ size_t ip_hlen; /* ip header length in bytes */
+#endif
+ uint8_t *pim_msg;
+ uint32_t pim_msg_len = 0;
+ uint16_t pim_checksum; /* received checksum */
+ uint16_t checksum; /* computed checksum */
+ struct pim_neighbor *neigh;
+ struct pim_msg_header *header;
+ bool no_fwd;
+
+#if PIM_IPV == 4
+ if (len <= sizeof(*ip_hdr)) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "PIM packet size=%zu shorter than minimum=%zu",
+ len, sizeof(*ip_hdr));
+ return -1;
+ }
+
+ ip_hlen = ip_hdr->ip_hl << 2; /* ip_hl gives length in 4-byte words */
+ sg = pim_sgaddr_from_iphdr(ip_hdr);
+
+ pim_msg = buf + ip_hlen;
+ pim_msg_len = len - ip_hlen;
+#else
+ struct ipv6_ph phdr = {
+ .src = sg.src,
+ .dst = sg.grp,
+ .ulpl = htonl(len),
+ .next_hdr = IPPROTO_PIM,
+ };
+
+ iovp->iov_base = &phdr;
+ iovp->iov_len = sizeof(phdr);
+ iovp++;
+
+ /* NB: header is not included in IPv6 RX */
+ pim_msg = buf;
+ pim_msg_len = len;
+#endif
+
+ iovp->iov_base = pim_msg;
+ iovp->iov_len = pim_msg_len;
+ iovp++;
+
+ if (pim_msg_len < PIM_PIM_MIN_LEN) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "PIM message size=%d shorter than minimum=%d",
+ pim_msg_len, PIM_PIM_MIN_LEN);
+ return -1;
+ }
+ header = (struct pim_msg_header *)pim_msg;
+
+ if (header->ver != PIM_PROTO_VERSION) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Ignoring PIM pkt from %s with unsupported version: %d",
+ ifp->name, header->ver);
+ return -1;
+ }
+
+ /* save received checksum */
+ pim_checksum = header->checksum;
+
+ /* for computing checksum */
+ header->checksum = 0;
+ no_fwd = header->Nbit;
+
+ if (header->type == PIM_MSG_TYPE_REGISTER) {
+ if (pim_msg_len < PIM_MSG_REGISTER_LEN) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("PIM Register Message size=%d shorther than min length %d",
+ pim_msg_len, PIM_MSG_REGISTER_LEN);
+ return -1;
+ }
+
+#if PIM_IPV == 6
+ phdr.ulpl = htonl(PIM_MSG_REGISTER_LEN);
+#endif
+ /* First 8 byte header checksum */
+ iovp[-1].iov_len = PIM_MSG_REGISTER_LEN;
+ checksum = in_cksumv(iov, iovp - iov);
+
+ if (checksum != pim_checksum) {
+#if PIM_IPV == 6
+ phdr.ulpl = htonl(pim_msg_len);
+#endif
+ iovp[-1].iov_len = pim_msg_len;
+
+ checksum = in_cksumv(iov, iovp - iov);
+ if (checksum != pim_checksum) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Ignoring PIM pkt from %s with invalid checksum: received=%x calculated=%x",
+ ifp->name, pim_checksum,
+ checksum);
+
+ return -1;
+ }
+ }
+ } else {
+ checksum = in_cksumv(iov, iovp - iov);
+ if (checksum != pim_checksum) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Ignoring PIM pkt from %s with invalid checksum: received=%x calculated=%x",
+ ifp->name, pim_checksum, checksum);
+
+ return -1;
+ }
+ }
+
+ if (PIM_DEBUG_PIM_PACKETS) {
+ zlog_debug(
+ "Recv PIM %s packet from %pPA to %pPA on %s: pim_version=%d pim_msg_size=%d checksum=%x",
+ pim_pim_msgtype2str(header->type), &sg.src, &sg.grp,
+ ifp->name, header->ver, pim_msg_len, checksum);
+ if (PIM_DEBUG_PIM_PACKETDUMP_RECV)
+ pim_pkt_dump(__func__, pim_msg, pim_msg_len);
+ }
+
+ if (!pim_pkt_dst_addr_ok(header->type, sg.grp)) {
+ zlog_warn(
+ "%s: Ignoring Pkt. Unexpected IP destination %pPA for %s (Expected: all_pim_routers_addr) from %pPA",
+ __func__, &sg.grp, pim_pim_msgtype2str(header->type),
+ &sg.src);
+ return -1;
+ }
+
+ switch (header->type) {
+ case PIM_MSG_TYPE_HELLO:
+ return pim_hello_recv(ifp, sg.src, pim_msg + PIM_MSG_HEADER_LEN,
+ pim_msg_len - PIM_MSG_HEADER_LEN);
+ break;
+ case PIM_MSG_TYPE_REGISTER:
+ return pim_register_recv(ifp, sg.grp, sg.src,
+ pim_msg + PIM_MSG_HEADER_LEN,
+ pim_msg_len - PIM_MSG_HEADER_LEN);
+ break;
+ case PIM_MSG_TYPE_REG_STOP:
+ return pim_register_stop_recv(ifp, pim_msg + PIM_MSG_HEADER_LEN,
+ pim_msg_len - PIM_MSG_HEADER_LEN);
+ break;
+ case PIM_MSG_TYPE_JOIN_PRUNE:
+ neigh = pim_neighbor_find(ifp, sg.src, false);
+ if (!neigh) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s %s: non-hello PIM message type=%d from non-neighbor %pPA on %s",
+ __FILE__, __func__, header->type,
+ &sg.src, ifp->name);
+ return -1;
+ }
+ pim_neighbor_timer_reset(neigh, neigh->holdtime);
+ return pim_joinprune_recv(ifp, neigh, sg.src,
+ pim_msg + PIM_MSG_HEADER_LEN,
+ pim_msg_len - PIM_MSG_HEADER_LEN);
+ break;
+ case PIM_MSG_TYPE_ASSERT:
+ neigh = pim_neighbor_find(ifp, sg.src, false);
+ if (!neigh) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s %s: non-hello PIM message type=%d from non-neighbor %pPA on %s",
+ __FILE__, __func__, header->type,
+ &sg.src, ifp->name);
+ return -1;
+ }
+ pim_neighbor_timer_reset(neigh, neigh->holdtime);
+ return pim_assert_recv(ifp, neigh, sg.src,
+ pim_msg + PIM_MSG_HEADER_LEN,
+ pim_msg_len - PIM_MSG_HEADER_LEN);
+ break;
+ case PIM_MSG_TYPE_BOOTSTRAP:
+ return pim_bsm_process(ifp, &sg, pim_msg, pim_msg_len, no_fwd);
+ break;
+
+ default:
+ if (PIM_DEBUG_PIM_PACKETS) {
+ zlog_debug(
+ "Recv PIM packet type %d which is not currently understood",
+ header->type);
+ }
+ return -1;
+ }
+}
+
+static void pim_sock_read_on(struct interface *ifp);
+
+static void pim_sock_read(struct event *t)
+{
+ struct interface *ifp, *orig_ifp;
+ struct pim_interface *pim_ifp;
+ int fd;
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
+ socklen_t fromlen = sizeof(from);
+ socklen_t tolen = sizeof(to);
+ uint8_t buf[PIM_PIM_BUFSIZE_READ];
+ int len;
+ ifindex_t ifindex = -1;
+ int result = -1; /* defaults to bad */
+ static long long count = 0;
+ int cont = 1;
+
+ orig_ifp = ifp = EVENT_ARG(t);
+ fd = EVENT_FD(t);
+
+ pim_ifp = ifp->info;
+
+ while (cont) {
+ pim_sgaddr sg;
+
+ len = pim_socket_recvfromto(fd, buf, sizeof(buf), &from,
+ &fromlen, &to, &tolen, &ifindex);
+ if (len < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EWOULDBLOCK || errno == EAGAIN)
+ break;
+
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("Received errno: %d %s", errno,
+ safe_strerror(errno));
+ goto done;
+ }
+
+ /*
+ * What? So with vrf's the incoming packet is received
+ * on the vrf interface but recvfromto above returns
+ * the right ifindex, so just use it. We know
+ * it's the right interface because we bind to it
+ */
+ ifp = if_lookup_by_index(ifindex, pim_ifp->pim->vrf->vrf_id);
+ if (!ifp || !ifp->info) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: Received incoming pim packet on interface(%s:%d) not yet configured for pim",
+ __func__, ifp ? ifp->name : "Unknown",
+ ifindex);
+ goto done;
+ }
+#if PIM_IPV == 4
+ sg.src = ((struct sockaddr_in *)&from)->sin_addr;
+ sg.grp = ((struct sockaddr_in *)&to)->sin_addr;
+#else
+ sg.src = ((struct sockaddr_in6 *)&from)->sin6_addr;
+ sg.grp = ((struct sockaddr_in6 *)&to)->sin6_addr;
+#endif
+
+ int fail = pim_pim_packet(ifp, buf, len, sg);
+ if (fail) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("%s: pim_pim_packet() return=%d",
+ __func__, fail);
+ goto done;
+ }
+
+ count++;
+ if (count % router->packet_process == 0)
+ cont = 0;
+ }
+
+ result = 0; /* good */
+
+done:
+ pim_sock_read_on(orig_ifp);
+
+ if (result) {
+ ++pim_ifp->pim_ifstat_hello_recvfail;
+ }
+}
+
+static void pim_sock_read_on(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+ assert(ifp->info);
+
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_PIM_TRACE_DETAIL) {
+ zlog_debug("Scheduling READ event on PIM socket fd=%d",
+ pim_ifp->pim_sock_fd);
+ }
+ event_add_read(router->master, pim_sock_read, ifp, pim_ifp->pim_sock_fd,
+ &pim_ifp->t_pim_sock_read);
+}
+
+static int pim_sock_open(struct interface *ifp)
+{
+ int fd;
+ struct pim_interface *pim_ifp = ifp->info;
+
+ fd = pim_socket_mcast(IPPROTO_PIM, pim_ifp->primary_address, ifp,
+ 0 /* loop=false */);
+ if (fd < 0)
+ return -1;
+
+ if (pim_socket_join(fd, qpim_all_pim_routers_addr,
+ pim_ifp->primary_address, ifp->ifindex, pim_ifp)) {
+ close(fd);
+ return -2;
+ }
+
+ return fd;
+}
+
+void pim_ifstat_reset(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ return;
+ }
+
+ pim_ifp->pim_ifstat_start = pim_time_monotonic_sec();
+ pim_ifp->pim_ifstat_hello_sent = 0;
+ pim_ifp->pim_ifstat_hello_sendfail = 0;
+ pim_ifp->pim_ifstat_hello_recv = 0;
+ pim_ifp->pim_ifstat_hello_recvfail = 0;
+ pim_ifp->pim_ifstat_bsm_rx = 0;
+ pim_ifp->pim_ifstat_bsm_tx = 0;
+ pim_ifp->pim_ifstat_join_recv = 0;
+ pim_ifp->pim_ifstat_join_send = 0;
+ pim_ifp->pim_ifstat_prune_recv = 0;
+ pim_ifp->pim_ifstat_prune_send = 0;
+ pim_ifp->pim_ifstat_reg_recv = 0;
+ pim_ifp->pim_ifstat_reg_send = 0;
+ pim_ifp->pim_ifstat_reg_stop_recv = 0;
+ pim_ifp->pim_ifstat_reg_stop_send = 0;
+ pim_ifp->pim_ifstat_assert_recv = 0;
+ pim_ifp->pim_ifstat_assert_send = 0;
+ pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ pim_ifp->igmp_ifstat_joins_sent = 0;
+ pim_ifp->igmp_ifstat_joins_failed = 0;
+ pim_ifp->igmp_peak_group_count = 0;
+}
+
+void pim_sock_reset(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ assert(ifp);
+ assert(ifp->info);
+
+ pim_ifp = ifp->info;
+
+ pim_ifp->primary_address = pim_find_primary_addr(ifp);
+
+ pim_ifp->pim_sock_fd = -1;
+ pim_ifp->pim_sock_creation = 0;
+ pim_ifp->t_pim_sock_read = NULL;
+
+ pim_ifp->t_pim_hello_timer = NULL;
+ pim_ifp->pim_hello_period = PIM_DEFAULT_HELLO_PERIOD;
+ pim_ifp->pim_default_holdtime =
+ -1; /* unset: means 3.5 * pim_hello_period */
+ pim_ifp->pim_triggered_hello_delay = PIM_DEFAULT_TRIGGERED_HELLO_DELAY;
+ pim_ifp->pim_dr_priority = PIM_DEFAULT_DR_PRIORITY;
+ pim_ifp->pim_propagation_delay_msec =
+ PIM_DEFAULT_PROPAGATION_DELAY_MSEC;
+ pim_ifp->pim_override_interval_msec =
+ PIM_DEFAULT_OVERRIDE_INTERVAL_MSEC;
+ pim_ifp->pim_can_disable_join_suppression =
+ PIM_DEFAULT_CAN_DISABLE_JOIN_SUPPRESSION;
+
+ /* neighbors without lan_delay */
+ pim_ifp->pim_number_of_nonlandelay_neighbors = 0;
+ pim_ifp->pim_neighbors_highest_propagation_delay_msec = 0;
+ pim_ifp->pim_neighbors_highest_override_interval_msec = 0;
+
+ /* DR Election */
+ pim_ifp->pim_dr_election_last = 0; /* timestamp */
+ pim_ifp->pim_dr_election_count = 0;
+ pim_ifp->pim_dr_election_changes = 0;
+ pim_ifp->pim_dr_num_nondrpri_neighbors =
+ 0; /* neighbors without dr_pri */
+ pim_ifp->pim_dr_addr = pim_ifp->primary_address;
+ pim_ifp->am_i_dr = true;
+
+ pim_ifstat_reset(ifp);
+}
+
+#if PIM_IPV == 4
+static uint16_t ip_id = 0;
+#endif
+
+#if PIM_IPV == 4
+static int pim_msg_send_frame(int fd, char *buf, size_t len,
+ struct sockaddr *dst, size_t salen,
+ const char *ifname)
+{
+ if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) >= 0)
+ return 0;
+
+ if (errno == EMSGSIZE) {
+ struct ip *ip = (struct ip *)buf;
+ size_t hdrsize = sizeof(struct ip);
+ size_t newlen1 = ((len - hdrsize) / 2) & 0xFFF8;
+ size_t sendlen = newlen1 + hdrsize;
+ size_t offset = ntohs(ip->ip_off);
+ int ret;
+
+ ip->ip_len = htons(sendlen);
+ ip->ip_off = htons(offset | IP_MF);
+
+ ret = pim_msg_send_frame(fd, buf, sendlen, dst, salen, ifname);
+ if (ret)
+ return ret;
+
+ struct ip *ip2 = (struct ip *)(buf + newlen1);
+ size_t newlen2 = len - sendlen;
+
+ sendlen = newlen2 + hdrsize;
+
+ memcpy(ip2, ip, hdrsize);
+ ip2->ip_len = htons(sendlen);
+ ip2->ip_off = htons(offset + (newlen1 >> 3));
+ return pim_msg_send_frame(fd, (char *)ip2, sendlen, dst, salen,
+ ifname);
+ }
+
+ zlog_warn(
+ "%s: sendto() failure to %pSU: iface=%s fd=%d msg_size=%zd: %m",
+ __func__, dst, ifname, fd, len);
+ return -1;
+}
+
+#else
+static int pim_msg_send_frame(pim_addr src, pim_addr dst, ifindex_t ifindex,
+ struct iovec *message, int fd)
+{
+ int retval;
+ struct msghdr smsghdr = {};
+ struct cmsghdr *scmsgp;
+ union cmsgbuf {
+ struct cmsghdr hdr;
+ uint8_t buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
+ };
+ struct in6_pktinfo *pktinfo;
+ struct sockaddr_in6 dst_sin6 = {};
+
+ union cmsgbuf cmsg_buf = {};
+
+ /* destination address */
+ dst_sin6.sin6_family = AF_INET6;
+#ifdef SIN6_LEN
+ dst_sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif /*SIN6_LEN*/
+ dst_sin6.sin6_addr = dst;
+ dst_sin6.sin6_scope_id = ifindex;
+
+ /* send msg hdr */
+ smsghdr.msg_iov = message;
+ smsghdr.msg_iovlen = 1;
+ smsghdr.msg_name = (caddr_t)&dst_sin6;
+ smsghdr.msg_namelen = sizeof(dst_sin6);
+ smsghdr.msg_control = (caddr_t)&cmsg_buf.buf;
+ smsghdr.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
+ smsghdr.msg_flags = 0;
+
+ scmsgp = CMSG_FIRSTHDR(&smsghdr);
+ scmsgp->cmsg_level = IPPROTO_IPV6;
+ scmsgp->cmsg_type = IPV6_PKTINFO;
+ scmsgp->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+
+ pktinfo = (struct in6_pktinfo *)(CMSG_DATA(scmsgp));
+ pktinfo->ipi6_ifindex = ifindex;
+ pktinfo->ipi6_addr = src;
+
+ retval = sendmsg(fd, &smsghdr, 0);
+ if (retval < 0)
+ flog_err(
+ EC_LIB_SOCKET,
+ "sendmsg failed: source: %pI6 Dest: %pI6 ifindex: %d: %s (%d)",
+ &src, &dst, ifindex, safe_strerror(errno), errno);
+
+ return retval;
+}
+#endif
+
+int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ int pim_msg_size, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+
+ pim_ifp = ifp->info;
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip sending PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+#if PIM_IPV == 4
+ uint8_t ttl;
+ struct pim_msg_header *header;
+ unsigned char buffer[10000];
+
+ memset(buffer, 0, 10000);
+
+ header = (struct pim_msg_header *)pim_msg;
+
+/*
+ * Omnios apparently doesn't have a #define for IP default
+ * ttl that is the same as all other platforms.
+ */
+#ifndef IPDEFTTL
+#define IPDEFTTL 64
+#endif
+ /* TTL for packets destine to ALL-PIM-ROUTERS is 1 */
+ switch (header->type) {
+ case PIM_MSG_TYPE_HELLO:
+ case PIM_MSG_TYPE_JOIN_PRUNE:
+ case PIM_MSG_TYPE_BOOTSTRAP:
+ case PIM_MSG_TYPE_ASSERT:
+ ttl = 1;
+ break;
+ case PIM_MSG_TYPE_REGISTER:
+ case PIM_MSG_TYPE_REG_STOP:
+ case PIM_MSG_TYPE_GRAFT:
+ case PIM_MSG_TYPE_GRAFT_ACK:
+ case PIM_MSG_TYPE_CANDIDATE:
+ ttl = IPDEFTTL;
+ break;
+ default:
+ ttl = MAXTTL;
+ break;
+ }
+
+ struct ip *ip = (struct ip *)buffer;
+ struct sockaddr_in to = {};
+ int sendlen = sizeof(*ip) + pim_msg_size;
+ socklen_t tolen;
+ unsigned char *msg_start;
+
+ ip->ip_id = htons(++ip_id);
+ ip->ip_hl = 5;
+ ip->ip_v = 4;
+ ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
+ ip->ip_p = PIM_IP_PROTO_PIM;
+ ip->ip_src = src;
+ ip->ip_dst = dst;
+ ip->ip_ttl = ttl;
+ ip->ip_len = htons(sendlen);
+
+ to.sin_family = AF_INET;
+ to.sin_addr = dst;
+ tolen = sizeof(to);
+
+ msg_start = buffer + sizeof(*ip);
+ memcpy(msg_start, pim_msg, pim_msg_size);
+
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("%s: to %pPA on %s: msg_size=%d checksum=%x",
+ __func__, &dst, ifp->name, pim_msg_size,
+ header->checksum);
+
+ if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
+ pim_pkt_dump(__func__, pim_msg, pim_msg_size);
+ }
+
+ pim_msg_send_frame(fd, (char *)buffer, sendlen, (struct sockaddr *)&to,
+ tolen, ifp->name);
+ return 0;
+
+#else
+ struct iovec iovector[2];
+
+ iovector[0].iov_base = pim_msg;
+ iovector[0].iov_len = pim_msg_size;
+
+ pim_msg_send_frame(src, dst, ifp->ifindex, &iovector[0], fd);
+
+ return 0;
+#endif
+}
+
+static int hello_send(struct interface *ifp, uint16_t holdtime)
+{
+ uint8_t pim_msg[PIM_PIM_BUFSIZE_WRITE];
+ struct pim_interface *pim_ifp;
+ int pim_tlv_size;
+ int pim_msg_size;
+
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug(
+ "%s: to %pPA on %s: holdt=%u prop_d=%u overr_i=%u dis_join_supp=%d dr_prio=%u gen_id=%08x addrs=%d",
+ __func__, &qpim_all_pim_routers_addr, ifp->name,
+ holdtime, pim_ifp->pim_propagation_delay_msec,
+ pim_ifp->pim_override_interval_msec,
+ pim_ifp->pim_can_disable_join_suppression,
+ pim_ifp->pim_dr_priority, pim_ifp->pim_generation_id,
+ listcount(ifp->connected));
+
+ pim_tlv_size = pim_hello_build_tlv(
+ ifp, pim_msg + PIM_PIM_MIN_LEN,
+ sizeof(pim_msg) - PIM_PIM_MIN_LEN, holdtime,
+ pim_ifp->pim_dr_priority, pim_ifp->pim_generation_id,
+ pim_ifp->pim_propagation_delay_msec,
+ pim_ifp->pim_override_interval_msec,
+ pim_ifp->pim_can_disable_join_suppression);
+ if (pim_tlv_size < 0) {
+ return -1;
+ }
+
+ pim_msg_size = pim_tlv_size + PIM_PIM_MIN_LEN;
+
+ assert(pim_msg_size >= PIM_PIM_MIN_LEN);
+ assert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE);
+
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ PIM_MSG_TYPE_HELLO, false);
+
+ if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ ifp)) {
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug(
+ "%s: could not send PIM message on interface %s",
+ __func__, ifp->name);
+ }
+ return -2;
+ }
+
+ return 0;
+}
+
+int pim_hello_send(struct interface *ifp, uint16_t holdtime)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (if_is_loopback(ifp))
+ return 0;
+
+ if (hello_send(ifp, holdtime)) {
+ ++pim_ifp->pim_ifstat_hello_sendfail;
+
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_warn("Could not send PIM hello on interface %s",
+ ifp->name);
+ }
+ return -1;
+ }
+
+ if (!pim_ifp->pim_passive_enable) {
+ ++pim_ifp->pim_ifstat_hello_sent;
+ PIM_IF_FLAG_SET_HELLO_SENT(pim_ifp->flags);
+ }
+
+ return 0;
+}
+
+static void hello_resched(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug("Rescheduling %d sec hello on interface %s",
+ pim_ifp->pim_hello_period, ifp->name);
+ }
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
+ event_add_timer(router->master, on_pim_hello_send, ifp,
+ pim_ifp->pim_hello_period, &pim_ifp->t_pim_hello_timer);
+}
+
+/*
+ Periodic hello timer
+ */
+static void on_pim_hello_send(struct event *t)
+{
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+
+ ifp = EVENT_ARG(t);
+ pim_ifp = ifp->info;
+
+ /*
+ * Schedule next hello
+ */
+ hello_resched(ifp);
+
+ /*
+ * Send hello
+ */
+ pim_hello_send(ifp, PIM_IF_DEFAULT_HOLDTIME(pim_ifp));
+}
+
+/*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ Thus, if a router needs to send a Join/Prune or Assert message on an
+ interface on which it has not yet sent a Hello message with the
+ currently configured IP address, then it MUST immediately send the
+ relevant Hello message without waiting for the Hello Timer to
+ expire, followed by the Join/Prune or Assert message.
+ */
+void pim_hello_restart_now(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ /*
+ * Reset next hello timer
+ */
+ hello_resched(ifp);
+
+ /*
+ * Immediately send hello
+ */
+ pim_hello_send(ifp, PIM_IF_DEFAULT_HOLDTIME(pim_ifp));
+}
+
+/*
+ RFC 4601: 4.3.1. Sending Hello Messages
+
+ To allow new or rebooting routers to learn of PIM neighbors quickly,
+ when a Hello message is received from a new neighbor, or a Hello
+ message with a new GenID is received from an existing neighbor, a
+ new Hello message should be sent on this interface after a
+ randomized delay between 0 and Triggered_Hello_Delay.
+ */
+void pim_hello_restart_triggered(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ int triggered_hello_delay_msec;
+ int random_msec;
+
+ pim_ifp = ifp->info;
+
+ /*
+ * No need to ever start loopback or vrf device hello's
+ */
+ if (if_is_loopback(ifp))
+ return;
+
+ /*
+ * There exists situations where we have the a RPF out this
+ * interface, but we haven't formed a neighbor yet. This
+ * happens especially during interface flaps. While
+ * we would like to handle this more gracefully in other
+ * parts of the code. In order to get us up and running
+ * let's just send the hello immediate'ish
+ * This should be revisited when we get nexthop tracking
+ * in and when we have a better handle on safely
+ * handling the rpf information for upstreams that
+ * we cannot legally reach yet.
+ */
+ triggered_hello_delay_msec = 1;
+ // triggered_hello_delay_msec = 1000 *
+ // pim_ifp->pim_triggered_hello_delay;
+
+ if (pim_ifp->t_pim_hello_timer) {
+ long remain_msec =
+ pim_time_timer_remain_msec(pim_ifp->t_pim_hello_timer);
+ if (remain_msec <= triggered_hello_delay_msec) {
+ /* Rescheduling hello would increase the delay, then
+ it's faster
+ to just wait for the scheduled periodic hello. */
+ return;
+ }
+
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
+ }
+
+ random_msec = triggered_hello_delay_msec;
+ // random_msec = random() % (triggered_hello_delay_msec + 1);
+
+ if (PIM_DEBUG_PIM_HELLO) {
+ zlog_debug("Scheduling %d msec triggered hello on interface %s",
+ random_msec, ifp->name);
+ }
+
+ event_add_timer_msec(router->master, on_pim_hello_send, ifp,
+ random_msec, &pim_ifp->t_pim_hello_timer);
+}
+
+int pim_sock_add(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+ uint32_t old_genid;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_sock_fd >= 0) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Can't recreate existing PIM socket fd=%d for interface %s",
+ pim_ifp->pim_sock_fd, ifp->name);
+ return -1;
+ }
+
+ pim_ifp->pim_sock_fd = pim_sock_open(ifp);
+ if (pim_ifp->pim_sock_fd < 0) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("Could not open PIM socket on interface %s",
+ ifp->name);
+ return -2;
+ }
+
+ pim_socket_ip_hdr(pim_ifp->pim_sock_fd);
+
+ pim_ifp->t_pim_sock_read = NULL;
+ pim_ifp->pim_sock_creation = pim_time_monotonic_sec();
+
+ /*
+ * Just ensure that the new generation id
+ * actually chooses something different.
+ * Actually ran across a case where this
+ * happened, pre-switch to random().
+ * While this is unlikely to happen now
+ * let's make sure it doesn't.
+ */
+ old_genid = pim_ifp->pim_generation_id;
+
+ while (old_genid == pim_ifp->pim_generation_id)
+ pim_ifp->pim_generation_id = frr_weak_random();
+
+ zlog_info("PIM INTERFACE UP: on interface %s ifindex=%d", ifp->name,
+ ifp->ifindex);
+
+ /*
+ * Start receiving PIM messages
+ */
+ pim_sock_read_on(ifp);
+
+ /*
+ * Start sending PIM hello's
+ */
+ pim_hello_restart_triggered(ifp);
+
+ return 0;
+}
diff --git a/pimd/pim_pim.h b/pimd/pim_pim.h
new file mode 100644
index 0000000..35e6930
--- /dev/null
+++ b/pimd/pim_pim.h
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_PIM_H
+#define PIM_PIM_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+#define PIM_PIM_BUFSIZE_READ (20000)
+#define PIM_PIM_BUFSIZE_WRITE (20000)
+
+#define PIM_DEFAULT_HELLO_PERIOD (30) /* seconds, RFC 4601: 4.11 */
+#define PIM_DEFAULT_TRIGGERED_HELLO_DELAY (5) /* seconds, RFC 4601: 4.11 */
+#define PIM_DEFAULT_DR_PRIORITY (1) /* RFC 4601: 4.3.1 */
+#define PIM_DEFAULT_PROPAGATION_DELAY_MSEC (500) /* RFC 4601: 4.11. Timer Values */
+#define PIM_DEFAULT_OVERRIDE_INTERVAL_MSEC (2500) /* RFC 4601: 4.11. Timer Values */
+#define PIM_DEFAULT_CAN_DISABLE_JOIN_SUPPRESSION (0) /* boolean */
+#define PIM_DEFAULT_T_PERIODIC (60) /* RFC 4601: 4.11. Timer Values */
+
+enum pim_msg_type {
+ PIM_MSG_TYPE_HELLO = 0,
+ PIM_MSG_TYPE_REGISTER,
+ PIM_MSG_TYPE_REG_STOP,
+ PIM_MSG_TYPE_JOIN_PRUNE,
+ PIM_MSG_TYPE_BOOTSTRAP,
+ PIM_MSG_TYPE_ASSERT,
+ PIM_MSG_TYPE_GRAFT,
+ PIM_MSG_TYPE_GRAFT_ACK,
+ PIM_MSG_TYPE_CANDIDATE
+};
+
+void pim_ifstat_reset(struct interface *ifp);
+void pim_sock_reset(struct interface *ifp);
+int pim_sock_add(struct interface *ifp);
+void pim_sock_delete(struct interface *ifp, const char *delete_message);
+void pim_hello_restart_now(struct interface *ifp);
+void pim_hello_restart_triggered(struct interface *ifp);
+
+int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
+ pim_sgaddr sg);
+
+int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ int pim_msg_size, struct interface *ifp);
+
+int pim_hello_send(struct interface *ifp, uint16_t holdtime);
+#endif /* PIM_PIM_H */
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
new file mode 100644
index 0000000..01da699
--- /dev/null
+++ b/pimd/pim_register.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2015 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "if.h"
+#include "frrevent.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+
+#include "pimd.h"
+#include "pim_mroute.h"
+#include "pim_iface.h"
+#include "pim_msg.h"
+#include "pim_pim.h"
+#include "pim_str.h"
+#include "pim_rp.h"
+#include "pim_register.h"
+#include "pim_upstream.h"
+#include "pim_rpf.h"
+#include "pim_oil.h"
+#include "pim_zebra.h"
+#include "pim_join.h"
+#include "pim_util.h"
+#include "pim_ssm.h"
+#include "pim_vxlan.h"
+#include "pim_addr.h"
+
+struct event *send_test_packet_timer = NULL;
+
+void pim_register_join(struct pim_upstream *up)
+{
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ if (pim_is_grp_ssm(pim, up->sg.grp)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s register setup skipped as group is SSM",
+ up->sg_str);
+ return;
+ }
+
+ pim_channel_add_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ up->reg_state = PIM_REG_JOIN;
+ pim_vxlan_update_sg_reg_state(pim, up, true);
+}
+
+void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
+ pim_addr originator)
+{
+ struct pim_interface *pinfo;
+ unsigned char buffer[10000];
+ unsigned int b1length = 0;
+ unsigned int length;
+ uint8_t *b1;
+
+ if (PIM_DEBUG_PIM_REG) {
+ zlog_debug("Sending Register stop for %pSG to %pPA on %s", sg,
+ &originator, ifp->name);
+ }
+
+ memset(buffer, 0, 10000);
+ b1 = (uint8_t *)buffer + PIM_MSG_REGISTER_STOP_LEN;
+
+ length = pim_encode_addr_group(b1, AFI_IP, 0, 0, sg->grp);
+ b1length += length;
+ b1 += length;
+
+ length = pim_encode_addr_ucast(b1, sg->src);
+ b1length += length;
+
+ pim_msg_build_header(src, originator, buffer,
+ b1length + PIM_MSG_REGISTER_STOP_LEN,
+ PIM_MSG_TYPE_REG_STOP, false);
+
+ pinfo = (struct pim_interface *)ifp->info;
+ if (!pinfo) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: No pinfo!", __func__);
+ return;
+ }
+ if (pim_msg_send(pinfo->pim->reg_sock, src, originator, buffer,
+ b1length + PIM_MSG_REGISTER_STOP_LEN, ifp)) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s: could not send PIM register stop message on interface %s",
+ __func__, ifp->name);
+ }
+ }
+
+ if (!pinfo->pim_passive_enable)
+ ++pinfo->pim_ifstat_reg_stop_send;
+}
+
+static void pim_reg_stop_upstream(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ switch (up->reg_state) {
+ case PIM_REG_NOINFO:
+ case PIM_REG_PRUNE:
+ return;
+ case PIM_REG_JOIN:
+ up->reg_state = PIM_REG_PRUNE;
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ pim_upstream_start_register_stop_timer(up, 0);
+ pim_vxlan_update_sg_reg_state(pim, up, false);
+ break;
+ case PIM_REG_JOIN_PENDING:
+ up->reg_state = PIM_REG_PRUNE;
+ pim_upstream_start_register_stop_timer(up, 0);
+ return;
+ }
+}
+
+int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+ struct pim_upstream *up = NULL;
+ struct pim_rpf *rp;
+ pim_sgaddr sg;
+ struct listnode *up_node;
+ struct pim_upstream *child;
+ bool wrong_af = false;
+ bool handling_star = false;
+ int l;
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+ ++pim_ifp->pim_ifstat_reg_stop_recv;
+
+ memset(&sg, 0, sizeof(sg));
+ l = pim_parse_addr_group(&sg, buf, buf_size);
+ buf += l;
+ buf_size -= l;
+ pim_parse_addr_ucast(&sg.src, buf, buf_size, &wrong_af);
+
+ if (wrong_af) {
+ zlog_err("invalid AF in Register-Stop on %s", ifp->name);
+ return -1;
+ }
+
+
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Received Register stop for %pSG", &sg);
+
+ rp = RP(pim_ifp->pim, sg.grp);
+ if (rp) {
+ /* As per RFC 7761, Section 4.9.4:
+ * A special wildcard value consisting of an address field of
+ * all zeros can be used to indicate any source.
+ */
+ if ((pim_addr_cmp(sg.src, rp->rpf_addr) == 0) ||
+ pim_addr_is_any(sg.src)) {
+ handling_star = true;
+ sg.src = PIMADDR_ANY;
+ }
+ }
+
+ /*
+ * RFC 7761 Sec 4.4.1
+ * Handling Register-Stop(*,G) Messages at the DR:
+ * A Register-Stop(*,G) should be treated as a
+ * Register-Stop(S,G) for all (S,G) Register state
+ * machines that are not in the NoInfo state.
+ */
+ up = pim_upstream_find(pim, &sg);
+ if (up) {
+ /*
+ * If the upstream find actually found a particular
+ * S,G then we *know* that the following for loop
+ * is not going to execute and this is ok
+ */
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node, child)) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s",
+ child->sg_str);
+
+ pim_reg_stop_upstream(pim, child);
+ }
+
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s", up->sg_str);
+ pim_reg_stop_upstream(pim, up);
+ } else {
+ if (!handling_star)
+ return 0;
+ /*
+ * Unfortunately pim was unable to find a *,G
+ * but pim may still actually have individual
+ * S,G's that need to be processed. In that
+ * case pim must do the expensive walk to find
+ * and stop
+ */
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_cmp(up->sg.grp, sg.grp) == 0) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s",
+ up->sg_str);
+ pim_reg_stop_upstream(pim, up);
+ }
+ }
+ }
+
+ return 0;
+}
+
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp)
+{
+ struct listnode *node;
+ struct listnode *nextnode;
+ struct pim_secondary_addr *sec_addr;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp;
+ struct pim_instance *pim = p_ifp->pim;
+
+ /* Trying to get the unicast address from the RPF interface first */
+ for (ALL_LIST_ELEMENTS(p_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+
+ /* Loop through all the pim interface and try to return a global
+ * unicast ipv6 address
+ */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS(pim_ifp->sec_addr_list, node, nextnode,
+ sec_addr)) {
+ if (!is_ipv6_global_unicast(&sec_addr->addr.u.prefix6))
+ continue;
+
+ return sec_addr->addr.u.prefix6;
+ }
+ }
+
+ zlog_warn("No global address found for use to send register message");
+ return PIMADDR_ANY;
+}
+#endif
+
+void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
+ struct pim_rpf *rpg, int null_register,
+ struct pim_upstream *up)
+{
+ unsigned char buffer[10000];
+ unsigned char *b1;
+ struct pim_interface *pinfo;
+ struct interface *ifp;
+
+ if (PIM_DEBUG_PIM_REG) {
+ zlog_debug("Sending %s %sRegister Packet to %pPA", up->sg_str,
+ null_register ? "NULL " : "", &rpg->rpf_addr);
+ }
+
+ ifp = rpg->source_nexthop.interface;
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("%s: No interface to transmit register on",
+ __func__);
+ return;
+ }
+ pinfo = (struct pim_interface *)ifp->info;
+ if (!pinfo) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "%s: Interface: %s not configured for pim to transmit on!",
+ __func__, ifp->name);
+ return;
+ }
+
+ if (PIM_DEBUG_PIM_REG) {
+ zlog_debug("%s: Sending %s %sRegister Packet to %pPA on %s",
+ __func__, up->sg_str, null_register ? "NULL " : "",
+ &rpg->rpf_addr, ifp->name);
+ }
+
+ memset(buffer, 0, 10000);
+ b1 = buffer + PIM_MSG_HEADER_LEN;
+ *b1 |= null_register << 6;
+ b1 = buffer + PIM_MSG_REGISTER_LEN;
+
+ memcpy(b1, (const unsigned char *)buf, buf_size);
+
+#if PIM_IPV == 6
+ /* While sending Register message to RP, we cannot use link-local
+ * address therefore using unicast ipv6 address here, choosing it
+ * from the RPF Interface
+ */
+ src = pim_register_get_unicast_v6_addr(pinfo);
+#endif
+ pim_msg_build_header(src, rpg->rpf_addr, buffer,
+ buf_size + PIM_MSG_REGISTER_LEN,
+ PIM_MSG_TYPE_REGISTER, false);
+
+ if (!pinfo->pim_passive_enable)
+ ++pinfo->pim_ifstat_reg_send;
+
+ if (pim_msg_send(pinfo->pim->reg_sock, src, rpg->rpf_addr, buffer,
+ buf_size + PIM_MSG_REGISTER_LEN, ifp)) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s: could not send PIM register message on interface %s",
+ __func__, ifp->name);
+ }
+ return;
+ }
+}
+
+#if PIM_IPV == 4
+void pim_null_register_send(struct pim_upstream *up)
+{
+ struct ip ip_hdr;
+ struct pim_interface *pim_ifp;
+ struct pim_rpf *rpg;
+ pim_addr src;
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s no valid iif",
+ __func__, up->sg_str);
+ return;
+ }
+
+ rpg = RP(pim_ifp->pim, up->sg.grp);
+ if (!rpg) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s no RPF to the RP",
+ __func__, up->sg_str);
+ return;
+ }
+
+ memset(&ip_hdr, 0, sizeof(ip_hdr));
+ ip_hdr.ip_p = PIM_IP_PROTO_PIM;
+ ip_hdr.ip_hl = 5;
+ ip_hdr.ip_v = 4;
+ ip_hdr.ip_src = up->sg.src;
+ ip_hdr.ip_dst = up->sg.grp;
+ ip_hdr.ip_len = htons(20);
+
+ /* checksum is broken */
+ src = pim_ifp->primary_address;
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(up->flags)) {
+ if (!pim_vxlan_get_register_src(pim_ifp->pim, up, &src)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s vxlan-aa PIP unavailable",
+ __func__, up->sg_str);
+ return;
+ }
+ }
+ pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip), src, rpg, 1,
+ up);
+}
+#else
+void pim_null_register_send(struct pim_upstream *up)
+{
+ struct ip6_hdr ip6_hdr;
+ struct pim_msg_header pim_msg_header;
+ struct pim_interface *pim_ifp;
+ struct pim_rpf *rpg;
+ pim_addr src;
+ unsigned char buffer[sizeof(ip6_hdr) + sizeof(pim_msg_header)];
+ struct ipv6_ph ph;
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "Cannot send null-register for %s no valid iif",
+ up->sg_str);
+ return;
+ }
+
+ rpg = RP(pim_ifp->pim, up->sg.grp);
+ if (!rpg) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "Cannot send null-register for %s no RPF to the RP",
+ up->sg_str);
+ return;
+ }
+
+ memset(&ip6_hdr, 0, sizeof(ip6_hdr));
+ ip6_hdr.ip6_nxt = PIM_IP_PROTO_PIM;
+ ip6_hdr.ip6_plen = PIM_MSG_HEADER_LEN;
+ ip6_hdr.ip6_vfc = 6 << 4;
+ ip6_hdr.ip6_hlim = MAXTTL;
+ ip6_hdr.ip6_src = up->sg.src;
+ ip6_hdr.ip6_dst = up->sg.grp;
+
+ memset(buffer, 0, (sizeof(ip6_hdr) + sizeof(pim_msg_header)));
+ memcpy(buffer, &ip6_hdr, sizeof(ip6_hdr));
+
+ memset(&pim_msg_header, 0, sizeof(pim_msg_header));
+ memset(&ph, 0, sizeof(ph));
+
+ ph.src = up->sg.src;
+ ph.dst = up->sg.grp;
+ ph.ulpl = htonl(PIM_MSG_HEADER_LEN);
+ ph.next_hdr = IPPROTO_PIM;
+ pim_msg_header.checksum =
+ in_cksum_with_ph6(&ph, &pim_msg_header, PIM_MSG_HEADER_LEN);
+
+ memcpy(buffer + sizeof(ip6_hdr), &pim_msg_header, PIM_MSG_HEADER_LEN);
+
+
+ src = pim_ifp->primary_address;
+ pim_register_send((uint8_t *)buffer,
+ sizeof(ip6_hdr) + PIM_MSG_HEADER_LEN, src, rpg, 1,
+ up);
+}
+#endif
+
+/*
+ * 4.4.2 Receiving Register Messages at the RP
+ *
+ * When an RP receives a Register message, the course of action is
+ * decided according to the following pseudocode:
+ *
+ * packet_arrives_on_rp_tunnel( pkt ) {
+ * if( outer.dst is not one of my addresses ) {
+ * drop the packet silently.
+ * # Note: this may be a spoofing attempt
+ * }
+ * if( I_am_RP(G) AND outer.dst == RP(G) ) {
+ * sentRegisterStop = false;
+ * if ( register.borderbit == true ) {
+ * if ( PMBR(S,G) == unknown ) {
+ * PMBR(S,G) = outer.src
+ * } else if ( outer.src != PMBR(S,G) ) {
+ * send Register-Stop(S,G) to outer.src
+ * drop the packet silently.
+ * }
+ * }
+ * if ( SPTbit(S,G) OR
+ * ( SwitchToSptDesired(S,G) AND
+ * ( inherited_olist(S,G) == NULL ))) {
+ * send Register-Stop(S,G) to outer.src
+ * sentRegisterStop = true;
+ * }
+ * if ( SPTbit(S,G) OR SwitchToSptDesired(S,G) ) {
+ * if ( sentRegisterStop == true ) {
+ * set KeepaliveTimer(S,G) to RP_Keepalive_Period;
+ * } else {
+ * set KeepaliveTimer(S,G) to Keepalive_Period;
+ * }
+ * }
+ * if( !SPTbit(S,G) AND ! pkt.NullRegisterBit ) {
+ * decapsulate and forward the inner packet to
+ * inherited_olist(S,G,rpt) # Note (+)
+ * }
+ * } else {
+ * send Register-Stop(S,G) to outer.src
+ * # Note (*)
+ * }
+ * }
+ */
+int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size)
+{
+ int sentRegisterStop = 0;
+ const void *ip_hdr;
+ pim_sgaddr sg;
+ uint32_t *bits;
+ int i_am_rp = 0;
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+ pim_addr rp_addr;
+ struct pim_rpf *rpg;
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
+#define PIM_MSG_REGISTER_BIT_RESERVED_LEN 4
+ ip_hdr = (tlv_buf + PIM_MSG_REGISTER_BIT_RESERVED_LEN);
+
+ if (!if_address_is_local(&dest_addr, PIM_AF, pim->vrf->vrf_id)) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "%s: Received Register message for destination address: %pPA that I do not own",
+ __func__, &dest_addr);
+ return 0;
+ }
+
+ ++pim_ifp->pim_ifstat_reg_recv;
+
+ /*
+ * Please note this is not drawn to get the correct bit/data size
+ *
+ * The entirety of the REGISTER packet looks like this:
+ * -------------------------------------------------------------
+ * | Ver | Type | Reserved | Checksum |
+ * |-----------------------------------------------------------|
+ * |B|N| Reserved 2 |
+ * |-----------------------------------------------------------|
+ * | Encap | IP HDR |
+ * | Mcast | |
+ * | Packet |--------------------------------------------------|
+ * | | Mcast Data |
+ * | | |
+ * ...
+ *
+ * tlv_buf when received from the caller points at the B bit
+ * We need to know the inner source and dest
+ */
+ bits = (uint32_t *)tlv_buf;
+
+ /*
+ * tlv_buf points to the start of the |B|N|... Reserved
+ * Line above. So we need to add 4 bytes to get to the
+ * start of the actual Encapsulated data.
+ */
+ memset(&sg, 0, sizeof(sg));
+ sg = pim_sgaddr_from_iphdr(ip_hdr);
+
+#if PIM_IPV == 6
+ /*
+ * According to RFC section 4.9.3, If Dummy PIM Header is included
+ * in NULL Register as a payload there would be two PIM headers.
+ * The inner PIM Header's checksum field should also be validated
+ * in addition to the outer PIM Header's checksum. Validation of
+ * inner PIM header checksum is done here.
+ */
+ if ((*bits & PIM_REGISTER_NR_BIT) &&
+ ((tlv_buf_size - PIM_MSG_REGISTER_BIT_RESERVED_LEN) >
+ (int)sizeof(struct ip6_hdr))) {
+ uint16_t computed_checksum;
+ uint16_t received_checksum;
+ struct ipv6_ph ph;
+ struct pim_msg_header *header;
+
+ header = (struct pim_msg_header
+ *)(tlv_buf +
+ PIM_MSG_REGISTER_BIT_RESERVED_LEN +
+ sizeof(struct ip6_hdr));
+ ph.src = sg.src;
+ ph.dst = sg.grp;
+ ph.ulpl = htonl(PIM_MSG_HEADER_LEN);
+ ph.next_hdr = IPPROTO_PIM;
+
+ received_checksum = header->checksum;
+
+ header->checksum = 0;
+ computed_checksum = in_cksum_with_ph6(
+ &ph, header, htonl(PIM_MSG_HEADER_LEN));
+
+ if (computed_checksum != received_checksum) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Ignoring Null Register message%pSG from %pPA due to bad checksum in Encapsulated dummy PIM header",
+ &sg, &src_addr);
+ return 0;
+ }
+ }
+#endif
+ i_am_rp = I_am_RP(pim, sg.grp);
+
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "Received Register message%pSG from %pPA on %s, rp: %d",
+ &sg, &src_addr, ifp->name, i_am_rp);
+
+ if (pim_is_grp_ssm(pim_ifp->pim, sg.grp)) {
+ if (pim_addr_is_any(sg.src)) {
+ zlog_warn(
+ "%s: Received Register message for Group(%pPA) is now in SSM, dropping the packet",
+ __func__, &sg.grp);
+ /* Drop Packet Silently */
+ return 0;
+ }
+ }
+
+ rpg = RP(pim, sg.grp);
+ if (!rpg) {
+ zlog_warn("%s: Received Register Message %pSG from %pPA on %s where the RP could not be looked up",
+ __func__, &sg, &src_addr, ifp->name);
+ return 0;
+ }
+
+ rp_addr = rpg->rpf_addr;
+ if (i_am_rp && (!pim_addr_cmp(dest_addr, rp_addr))) {
+ sentRegisterStop = 0;
+
+ if (pim->register_plist) {
+ struct prefix_list *plist;
+ struct prefix src;
+
+ plist = prefix_list_lookup(PIM_AFI,
+ pim->register_plist);
+
+ pim_addr_to_prefix(&src, sg.src);
+
+ if (prefix_list_apply_ext(plist, NULL, &src, true) ==
+ PREFIX_DENY) {
+ pim_register_stop_send(ifp, &sg, dest_addr,
+ src_addr);
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: Sending register-stop to %pPA for %pSG due to prefix-list denial, dropping packet",
+ __func__, &src_addr, &sg);
+
+ return 0;
+ }
+ }
+
+ if (*bits & PIM_REGISTER_BORDER_BIT) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "%s: Received Register message with Border bit set, ignoring",
+ __func__);
+
+ /* Drop Packet Silently */
+ return 0;
+ }
+
+ struct pim_upstream *upstream = pim_upstream_find(pim, &sg);
+ /*
+ * If we don't have a place to send ignore the packet
+ */
+ if (!upstream) {
+ upstream = pim_upstream_add(
+ pim, &sg, ifp,
+ PIM_UPSTREAM_FLAG_MASK_SRC_STREAM, __func__,
+ NULL);
+ if (!upstream) {
+ zlog_warn("Failure to create upstream state");
+ return 1;
+ }
+
+ upstream->upstream_register = src_addr;
+ } else {
+ /*
+ * If the FHR has set a very very fast register timer
+ * there exists a possibility that the incoming NULL
+ * register
+ * is happening before we set the spt bit. If so
+ * Do a quick check to update the counters and
+ * then set the spt bit as appropriate
+ */
+ if (upstream->sptbit != PIM_UPSTREAM_SPTBIT_TRUE) {
+ pim_mroute_update_counters(
+ upstream->channel_oil);
+ /*
+ * Have we seen packets?
+ */
+ if (upstream->channel_oil->cc.oldpktcnt
+ < upstream->channel_oil->cc.pktcnt)
+ pim_upstream_set_sptbit(
+ upstream,
+ upstream->rpf.source_nexthop
+ .interface);
+ }
+ }
+
+ if ((upstream->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
+ || ((SwitchToSptDesiredOnRp(pim, &sg))
+ && pim_upstream_inherited_olist(pim, upstream) == 0)) {
+ pim_register_stop_send(ifp, &sg, dest_addr, src_addr);
+ sentRegisterStop = 1;
+ } else {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("(%s) sptbit: %d", upstream->sg_str,
+ upstream->sptbit);
+ }
+ if ((upstream->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
+ || (SwitchToSptDesiredOnRp(pim, &sg))) {
+ if (sentRegisterStop) {
+ pim_upstream_keep_alive_timer_start(
+ upstream, pim->rp_keep_alive_time);
+ } else {
+ pim_upstream_keep_alive_timer_start(
+ upstream, pim->keep_alive_time);
+ }
+ }
+
+ if (!(upstream->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
+ && !(*bits & PIM_REGISTER_NR_BIT)) {
+ // decapsulate and forward the iner packet to
+ // inherited_olist(S,G,rpt)
+ // This is taken care of by the kernel for us
+ }
+ pim_upstream_msdp_reg_timer_start(upstream);
+ } else {
+ if (PIM_DEBUG_PIM_REG) {
+ if (!i_am_rp)
+ zlog_debug("Received Register packet for %pSG, Rejecting packet because I am not the RP configured for group",
+ &sg);
+ else
+ zlog_debug("Received Register packet for %pSG, Rejecting packet because the dst ip address is not the actual RP",
+ &sg);
+ }
+ pim_register_stop_send(ifp, &sg, dest_addr, src_addr);
+ }
+
+ return 0;
+}
+
+/*
+ * This routine scan all upstream and update register state and remove pimreg
+ * when couldreg becomes false.
+ */
+void pim_reg_del_on_couldreg_fail(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim;
+ struct pim_upstream *up;
+
+ if (!pim_ifp)
+ return;
+
+ pim = pim_ifp->pim;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (ifp != up->rpf.source_nexthop.interface)
+ continue;
+
+ if (!pim_upstream_could_register(up)
+ && (up->reg_state != PIM_REG_NOINFO)) {
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ EVENT_OFF(up->t_rs_timer);
+ up->reg_state = PIM_REG_NOINFO;
+ PIM_UPSTREAM_FLAG_UNSET_FHR(up->flags);
+ }
+ }
+}
diff --git a/pimd/pim_register.h b/pimd/pim_register.h
new file mode 100644
index 0000000..d1240d7
--- /dev/null
+++ b/pimd/pim_register.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2015 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#ifndef PIM_REGISTER_H
+#define PIM_REGISTER_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+#define PIM_REGISTER_BORDER_BIT 0x80000000
+#define PIM_REGISTER_NR_BIT 0x40000000
+
+#define PIM_MSG_REGISTER_LEN (8)
+#define PIM_MSG_REGISTER_STOP_LEN (4)
+
+int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size);
+
+int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size);
+#if PIM_IPV == 6
+struct in6_addr pim_register_get_unicast_v6_addr(struct pim_interface *p_ifp);
+#endif
+void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
+ struct pim_rpf *rpg, int null_register,
+ struct pim_upstream *up);
+void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
+ pim_addr originator);
+void pim_register_join(struct pim_upstream *up);
+void pim_null_register_send(struct pim_upstream *up);
+void pim_reg_del_on_couldreg_fail(struct interface *ifp);
+
+#endif
diff --git a/pimd/pim_routemap.c b/pimd/pim_routemap.c
new file mode 100644
index 0000000..bd1a24e
--- /dev/null
+++ b/pimd/pim_routemap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* PIM Route-map Code
+ * Copyright (C) 2016 Cumulus Networks <sharpd@cumulusnetworks.com>
+ * Copyright (C) 1999 Kunihiro Ishiguro <kunihiro@zebra.org>
+ *
+ * This file is part of Quagga
+ */
+#include <zebra.h>
+
+#include "if.h"
+#include "vty.h"
+#include "routemap.h"
+
+#include "pimd.h"
+
+static void pim_route_map_add(const char *rmap_name)
+{
+ route_map_notify_dependencies(rmap_name, RMAP_EVENT_MATCH_ADDED);
+}
+
+static void pim_route_map_delete(const char *rmap_name)
+{
+ route_map_notify_dependencies(rmap_name, RMAP_EVENT_MATCH_DELETED);
+}
+
+static void pim_route_map_event(const char *rmap_name)
+{
+ route_map_notify_dependencies(rmap_name, RMAP_EVENT_MATCH_ADDED);
+}
+
+void pim_route_map_init(void)
+{
+ route_map_init();
+
+ route_map_add_hook(pim_route_map_add);
+ route_map_delete_hook(pim_route_map_delete);
+ route_map_event_hook(pim_route_map_event);
+}
+
+void pim_route_map_terminate(void)
+{
+ route_map_finish();
+}
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
new file mode 100644
index 0000000..c751624
--- /dev/null
+++ b/pimd/pim_rp.c
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2015 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#include <zebra.h>
+
+#include "lib/json.h"
+#include "log.h"
+#include "network.h"
+#include "if.h"
+#include "linklist.h"
+#include "prefix.h"
+#include "memory.h"
+#include "vty.h"
+#include "vrf.h"
+#include "plist.h"
+#include "nexthop.h"
+#include "table.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_vty.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_rp.h"
+#include "pim_rpf.h"
+#include "pim_sock.h"
+#include "pim_memory.h"
+#include "pim_neighbor.h"
+#include "pim_msdp.h"
+#include "pim_nht.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_zebra.h"
+#include "pim_bsm.h"
+#include "pim_util.h"
+#include "pim_ssm.h"
+#include "termtable.h"
+
+/* Cleanup pim->rpf_hash each node data */
+void pim_rp_list_hash_clean(void *data)
+{
+ struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
+
+ list_delete(&pnc->rp_list);
+
+ hash_clean_and_free(&pnc->upstream_hash, NULL);
+ if (pnc->nexthop)
+ nexthops_free(pnc->nexthop);
+
+ XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
+}
+
+static void pim_rp_info_free(struct rp_info *rp_info)
+{
+ XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
+
+ XFREE(MTYPE_PIM_RP, rp_info);
+}
+
+int pim_rp_list_cmp(void *v1, void *v2)
+{
+ struct rp_info *rp1 = (struct rp_info *)v1;
+ struct rp_info *rp2 = (struct rp_info *)v2;
+ int ret;
+
+ /*
+ * Sort by RP IP address
+ */
+ ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
+ if (ret)
+ return ret;
+
+ /*
+ * Sort by group IP address
+ */
+ ret = prefix_cmp(&rp1->group, &rp2->group);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void pim_rp_init(struct pim_instance *pim)
+{
+ struct rp_info *rp_info;
+ struct route_node *rn;
+
+ pim->rp_list = list_new();
+ pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
+ pim->rp_list->cmp = pim_rp_list_cmp;
+
+ pim->rp_table = route_table_init();
+
+ rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
+
+ if (!pim_get_all_mcast_group(&rp_info->group)) {
+ flog_err(EC_LIB_DEVELOPMENT,
+ "Unable to convert all-multicast prefix");
+ list_delete(&pim->rp_list);
+ route_table_finish(pim->rp_table);
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return;
+ }
+ rp_info->rp.rpf_addr = PIMADDR_ANY;
+
+ listnode_add(pim->rp_list, rp_info);
+
+ rn = route_node_get(pim->rp_table, &rp_info->group);
+ rn->info = rp_info;
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
+ rp_info, &rp_info->group,
+ route_node_get_lock_count(rn));
+}
+
+void pim_rp_free(struct pim_instance *pim)
+{
+ if (pim->rp_table)
+ route_table_finish(pim->rp_table);
+ pim->rp_table = NULL;
+
+ if (pim->rp_list)
+ list_delete(&pim->rp_list);
+}
+
+/*
+ * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
+ */
+static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
+ pim_addr rp, const char *plist)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
+ rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
+ return rp_info;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Return true if plist is used by any rp_info
+ */
+static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Given an RP's address, return the RP's rp_info that is an exact match for
+ * 'group'
+ */
+static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
+ const struct prefix *group)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
+ prefix_same(&rp_info->group, group))
+ return rp_info;
+ }
+
+ return NULL;
+}
+
+/*
+ * XXX: long-term issue: we don't actually have a good "ip address-list"
+ * implementation. ("access-list XYZ" is the closest but honestly it's
+ * kinda garbage.)
+ *
+ * So it's using a prefix-list to match an address here, which causes very
+ * unexpected results for the user since prefix-lists by default only match
+ * when the prefix length is an exact match too. i.e. you'd have to add the
+ * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
+ *
+ * To avoid this pitfall, this code uses "address_mode = true" for the prefix
+ * list match (this is the only user for that.)
+ *
+ * In the long run, we need to add a "ip address-list", but that's a wholly
+ * separate bag of worms, and existing configs using ip prefix-list would
+ * drop into the UX pitfall.
+ */
+
+#include "lib/plist_int.h"
+
+/*
+ * Given a group, return the rp_info for that group
+ */
+struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
+ const struct prefix *group)
+{
+ struct listnode *node;
+ struct rp_info *best = NULL;
+ struct rp_info *rp_info;
+ struct prefix_list *plist;
+ const struct prefix *bp;
+ const struct prefix_list_entry *entry;
+ struct route_node *rn;
+
+ bp = NULL;
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (rp_info->plist) {
+ plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
+
+ if (prefix_list_apply_ext(plist, &entry, group, true)
+ == PREFIX_DENY || !entry)
+ continue;
+
+ if (!best) {
+ best = rp_info;
+ bp = &entry->prefix;
+ continue;
+ }
+
+ if (bp && bp->prefixlen < entry->prefix.prefixlen) {
+ best = rp_info;
+ bp = &entry->prefix;
+ }
+ }
+ }
+
+ rn = route_node_match(pim->rp_table, group);
+ if (!rn) {
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "%s: BUG We should have found default group information",
+ __func__);
+ return best;
+ }
+
+ rp_info = rn->info;
+ if (PIM_DEBUG_PIM_TRACE) {
+ if (best)
+ zlog_debug(
+ "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
+ group, best->plist, rn, &rp_info->group);
+ else
+ zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
+ rn, &rp_info->group);
+ }
+
+ route_unlock_node(rn);
+
+ /*
+ * rp's with prefix lists have the group as 224.0.0.0/4 which will
+ * match anything. So if we have a rp_info that should match a prefix
+ * list then if we do match then best should be the answer( even
+ * if it is NULL )
+ */
+ if (!rp_info || (rp_info && rp_info->plist))
+ return best;
+
+ /*
+ * So we have a non plist rp_info found in the lookup and no plists
+ * at all to be choosen, return it!
+ */
+ if (!best)
+ return rp_info;
+
+ /*
+ * If we have a matching non prefix list and a matching prefix
+ * list we should return the actual rp_info that has the LPM
+ * If they are equal, use the prefix-list( but let's hope
+ * the end-operator doesn't do this )
+ */
+ if (rp_info->group.prefixlen > bp->prefixlen)
+ best = rp_info;
+
+ return best;
+}
+
+/*
+ * When the user makes "ip pim rp" configuration changes or if they change the
+ * prefix-list(s) used by these statements we must tickle the upstream state
+ * for each group to make them re-lookup who their RP should be.
+ *
+ * This is a placeholder function for now.
+ */
+void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
+{
+ pim_msdp_i_am_rp_changed(pim);
+ pim_upstream_reeval_use_rpt(pim);
+}
+
+void pim_rp_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *plist)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+ int refresh_needed = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (rp_info->plist
+ && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
+ refresh_needed = 1;
+ break;
+ }
+ }
+
+ if (refresh_needed)
+ pim_rp_refresh_group_to_rp_mapping(pim);
+}
+
+static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
+ struct pim_interface *pim_ifp)
+{
+ struct listnode *node;
+ struct pim_secondary_addr *sec_addr;
+ pim_addr sec_paddr;
+
+ if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
+ return 1;
+
+ if (!pim_ifp->sec_addr_list) {
+ return 0;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
+ sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
+ /* If an RP-address is self, It should be enough to say
+ * I am RP the prefix-length should not matter here */
+ if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void pim_rp_check_interfaces(struct pim_instance *pim,
+ struct rp_info *rp_info)
+{
+ struct interface *ifp;
+
+ rp_info->i_am_rp = 0;
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
+ rp_info->i_am_rp = 1;
+ }
+ }
+}
+
+void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
+{
+ struct pim_rpf old_rpf;
+ enum pim_rpf_result rpf_result;
+ pim_addr old_upstream_addr;
+ pim_addr new_upstream_addr;
+
+ old_upstream_addr = up->upstream_addr;
+ pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
+ up->sg.grp);
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: pim upstream update for old upstream %pPA",
+ __func__, &old_upstream_addr);
+
+ if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
+ return;
+
+ /* Lets consider a case, where a PIM upstream has a better RP as a
+ * result of a new RP configuration with more precise group range.
+ * This upstream has to be added to the upstream hash of new RP's
+ * NHT(pnc) and has to be removed from old RP's NHT upstream hash
+ */
+ if (!pim_addr_is_any(old_upstream_addr)) {
+ /* Deregister addr with Zebra NHT */
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+ __func__, up->sg_str, &old_upstream_addr);
+ pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
+ }
+
+ /* Update the upstream address */
+ up->upstream_addr = new_upstream_addr;
+
+ old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
+
+ rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
+ if (rpf_result == PIM_RPF_FAILURE)
+ pim_mroute_del(up->channel_oil, __func__);
+
+ /* update kernel multicast forwarding cache (MFC) */
+ if (up->rpf.source_nexthop.interface && up->channel_oil)
+ pim_upstream_mroute_iif_update(up->channel_oil, __func__);
+
+ if (rpf_result == PIM_RPF_CHANGED ||
+ (rpf_result == PIM_RPF_FAILURE &&
+ old_rpf.source_nexthop.interface))
+ pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
+
+}
+
+int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag)
+{
+ int result = 0;
+ struct rp_info *rp_info;
+ struct rp_info *rp_all;
+ struct prefix group_all;
+ struct listnode *node, *nnode;
+ struct rp_info *tmp_rp_info;
+ char buffer[BUFSIZ];
+ pim_addr nht_p;
+ struct route_node *rn = NULL;
+ struct pim_upstream *up;
+ bool upstream_updated = false;
+
+ if (pim_addr_is_any(rp_addr))
+ return PIM_RP_BAD_ADDRESS;
+
+ rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
+
+ rp_info->rp.rpf_addr = rp_addr;
+ prefix_copy(&rp_info->group, &group);
+ rp_info->rp_src = rp_src_flag;
+
+ if (plist) {
+ /*
+ * Return if the prefix-list is already configured for this RP
+ */
+ if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_SUCCESS;
+ }
+
+ /*
+ * Barf if the prefix-list is already configured for an RP
+ */
+ if (pim_rp_prefix_list_used(pim, plist)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_RP_PFXLIST_IN_USE;
+ }
+
+ /*
+ * Free any existing rp_info entries for this RP
+ */
+ for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
+ tmp_rp_info)) {
+ if (!pim_addr_cmp(rp_info->rp.rpf_addr,
+ tmp_rp_info->rp.rpf_addr)) {
+ if (tmp_rp_info->plist)
+ pim_rp_del_config(pim, rp_addr, NULL,
+ tmp_rp_info->plist);
+ else
+ pim_rp_del_config(
+ pim, rp_addr,
+ prefix2str(&tmp_rp_info->group,
+ buffer, BUFSIZ),
+ NULL);
+ }
+ }
+
+ rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
+ } else {
+
+ if (!pim_get_all_mcast_group(&group_all)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_BAD_ADDRESS;
+ }
+ rp_all = pim_rp_find_match_group(pim, &group_all);
+
+ /*
+ * Barf if group is a non-multicast subnet
+ */
+ if (!prefix_match(&rp_all->group, &rp_info->group)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_BAD_ADDRESS;
+ }
+
+ /*
+ * Remove any prefix-list rp_info entries for this RP
+ */
+ for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
+ tmp_rp_info)) {
+ if (tmp_rp_info->plist &&
+ (!pim_addr_cmp(rp_info->rp.rpf_addr,
+ tmp_rp_info->rp.rpf_addr))) {
+ pim_rp_del_config(pim, rp_addr, NULL,
+ tmp_rp_info->plist);
+ }
+ }
+
+ /*
+ * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
+ */
+ if (prefix_same(&rp_all->group, &rp_info->group) &&
+ pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
+ rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
+ rp_all->rp_src = rp_src_flag;
+ XFREE(MTYPE_PIM_RP, rp_info);
+
+ /* Register addr with Zebra NHT */
+ nht_p = rp_all->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug(
+ "%s: NHT Register rp_all addr %pPA grp %pFX ",
+ __func__, &nht_p, &rp_all->group);
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* Find (*, G) upstream whose RP is not
+ * configured yet
+ */
+ if (pim_addr_is_any(up->upstream_addr) &&
+ pim_addr_is_any(up->sg.src)) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(
+ pim, &grp);
+ if (trp_info == rp_all) {
+ pim_upstream_update(pim, up);
+ upstream_updated = true;
+ }
+ }
+ }
+ if (upstream_updated)
+ pim_zebra_update_all_interfaces(pim);
+
+ pim_rp_check_interfaces(pim, rp_all);
+ pim_rp_refresh_group_to_rp_mapping(pim);
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
+ NULL);
+
+ if (!pim_ecmp_nexthop_lookup(pim,
+ &rp_all->rp.source_nexthop,
+ nht_p, &rp_all->group, 1))
+ return PIM_RP_NO_PATH;
+ return PIM_SUCCESS;
+ }
+
+ /*
+ * Return if the group is already configured for this RP
+ */
+ tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
+ if (tmp_rp_info) {
+ if ((tmp_rp_info->rp_src != rp_src_flag)
+ && (rp_src_flag == RP_SRC_STATIC))
+ tmp_rp_info->rp_src = rp_src_flag;
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return result;
+ }
+
+ /*
+ * Barf if this group is already covered by some other RP
+ */
+ tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
+
+ if (tmp_rp_info) {
+ if (tmp_rp_info->plist) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_PFXLIST_OVERLAP;
+ } else {
+ /*
+ * If the only RP that covers this group is an
+ * RP configured for
+ * 224.0.0.0/4 that is fine, ignore that one.
+ * For all others
+ * though we must return PIM_GROUP_OVERLAP
+ */
+ if (prefix_same(&rp_info->group,
+ &tmp_rp_info->group)) {
+ if ((rp_src_flag == RP_SRC_STATIC)
+ && (tmp_rp_info->rp_src
+ == RP_SRC_STATIC)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_OVERLAP;
+ }
+
+ result = pim_rp_change(
+ pim, rp_addr,
+ tmp_rp_info->group,
+ rp_src_flag);
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return result;
+ }
+ }
+ }
+ }
+
+ listnode_add_sort(pim->rp_list, rp_info);
+
+ if (!rp_info->plist) {
+ rn = route_node_get(pim->rp_table, &rp_info->group);
+ rn->info = rp_info;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
+ rp_info, &rp_info->group,
+ rn ? route_node_get_lock_count(rn) : 0);
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_is_any(up->sg.src)) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ if (trp_info == rp_info) {
+ pim_upstream_update(pim, up);
+ upstream_updated = true;
+ }
+ }
+ }
+
+ if (upstream_updated)
+ pim_zebra_update_all_interfaces(pim);
+
+ pim_rp_check_interfaces(pim, rp_info);
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+ /* Register addr with Zebra NHT */
+ nht_p = rp_info->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
+ __func__, &nht_p, &rp_info->group);
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
+ &rp_info->group, 1))
+ return PIM_RP_NO_PATH;
+
+ return PIM_SUCCESS;
+}
+
+void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
+ const char *group_range, const char *plist)
+{
+ struct prefix group;
+ int result;
+
+ if (group_range == NULL)
+ result = pim_get_all_mcast_group(&group);
+ else
+ result = str2prefix(group_range, &group);
+
+ if (!result) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: String to prefix failed for %pPAs group",
+ __func__, &rp_addr);
+ return;
+ }
+
+ pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
+}
+
+int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag)
+{
+ struct prefix g_all;
+ struct rp_info *rp_info;
+ struct rp_info *rp_all;
+ pim_addr nht_p;
+ struct route_node *rn;
+ bool was_plist = false;
+ struct rp_info *trp_info;
+ struct pim_upstream *up;
+ struct bsgrp_node *bsgrp = NULL;
+ struct bsm_rpinfo *bsrp = NULL;
+ bool upstream_updated = false;
+
+ if (plist)
+ rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
+ else
+ rp_info = pim_rp_find_exact(pim, rp_addr, &group);
+
+ if (!rp_info)
+ return PIM_RP_NOT_FOUND;
+
+ if (rp_info->plist) {
+ XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
+ was_plist = true;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
+ &rp_addr, &group);
+
+ /* While static RP is getting deleted, we need to check if dynamic RP
+ * present for the same group in BSM RP table, then install the dynamic
+ * RP for the group node into the main rp table
+ */
+ if (rp_src_flag == RP_SRC_STATIC) {
+ bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
+
+ if (bsgrp) {
+ bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
+ if (bsrp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: BSM RP %pPA found for the group %pFX",
+ __func__, &bsrp->rp_address,
+ &group);
+ return pim_rp_change(pim, bsrp->rp_address,
+ group, RP_SRC_BSR);
+ }
+ } else {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: BSM RP not found for the group %pFX",
+ __func__, &group);
+ }
+ }
+
+ /* Deregister addr with Zebra NHT */
+ nht_p = rp_info->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
+ &nht_p);
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
+
+ if (!pim_get_all_mcast_group(&g_all))
+ return PIM_RP_BAD_ADDRESS;
+
+ rp_all = pim_rp_find_match_group(pim, &g_all);
+
+ if (rp_all == rp_info) {
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* Find the upstream (*, G) whose upstream address is
+ * same as the deleted RP
+ */
+ pim_addr rpf_addr;
+
+ rpf_addr = rp_info->rp.rpf_addr;
+ if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
+ pim_addr_is_any(up->sg.src)) {
+ struct prefix grp;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+ if (trp_info == rp_all) {
+ pim_upstream_rpf_clear(pim, up);
+ up->upstream_addr = PIMADDR_ANY;
+ }
+ }
+ }
+ rp_all->rp.rpf_addr = PIMADDR_ANY;
+ rp_all->i_am_rp = 0;
+ return PIM_SUCCESS;
+ }
+
+ listnode_delete(pim->rp_list, rp_info);
+
+ if (!was_plist) {
+ rn = route_node_get(pim->rp_table, &rp_info->group);
+ if (rn) {
+ if (rn->info != rp_info)
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "Expected rn->info to be equal to rp_info");
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
+ __func__, rn, rp_info, &rp_info->group,
+ route_node_get_lock_count(rn));
+
+ rn->info = NULL;
+ route_unlock_node(rn);
+ route_unlock_node(rn);
+ }
+ }
+
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* Find the upstream (*, G) whose upstream address is same as
+ * the deleted RP
+ */
+ pim_addr rpf_addr;
+
+ rpf_addr = rp_info->rp.rpf_addr;
+ if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
+ pim_addr_is_any(up->sg.src)) {
+ struct prefix grp;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ /* RP not found for the group grp */
+ if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
+ pim_upstream_rpf_clear(pim, up);
+ pim_rp_set_upstream_addr(
+ pim, &up->upstream_addr, up->sg.src,
+ up->sg.grp);
+ }
+
+ /* RP found for the group grp */
+ else {
+ pim_upstream_update(pim, up);
+ upstream_updated = true;
+ }
+ }
+ }
+
+ if (upstream_updated)
+ pim_zebra_update_all_interfaces(pim);
+
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_SUCCESS;
+}
+
+int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag)
+{
+ pim_addr nht_p;
+ struct route_node *rn;
+ int result = 0;
+ struct rp_info *rp_info = NULL;
+ struct pim_upstream *up;
+ bool upstream_updated = false;
+ pim_addr old_rp_addr;
+
+ rn = route_node_lookup(pim->rp_table, &group);
+ if (!rn) {
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ rp_info = rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ old_rp_addr = rp_info->rp.rpf_addr;
+ if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
+ if (rp_info->rp_src != rp_src_flag) {
+ rp_info->rp_src = rp_src_flag;
+ route_unlock_node(rn);
+ return PIM_SUCCESS;
+ }
+ }
+
+ /* Deregister old RP addr with Zebra NHT */
+
+ if (!pim_addr_is_any(old_rp_addr)) {
+ nht_p = rp_info->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
+ __func__, &nht_p);
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
+ }
+
+ pim_rp_nexthop_del(rp_info);
+ listnode_delete(pim->rp_list, rp_info);
+ /* Update the new RP address*/
+
+ rp_info->rp.rpf_addr = new_rp_addr;
+ rp_info->rp_src = rp_src_flag;
+ rp_info->i_am_rp = 0;
+
+ listnode_add_sort(pim->rp_list, rp_info);
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_is_any(up->sg.src)) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ pim_addr_to_prefix(&grp, up->sg.grp);
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ if (trp_info == rp_info) {
+ pim_upstream_update(pim, up);
+ upstream_updated = true;
+ }
+ }
+ }
+
+ if (upstream_updated)
+ pim_zebra_update_all_interfaces(pim);
+
+ /* Register new RP addr with Zebra NHT */
+ nht_p = rp_info->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
+ __func__, &nht_p, &rp_info->group);
+
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
+ &rp_info->group, 1)) {
+ route_unlock_node(rn);
+ return PIM_RP_NO_PATH;
+ }
+
+ pim_rp_check_interfaces(pim, rp_info);
+
+ route_unlock_node(rn);
+
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+ return result;
+}
+
+void pim_rp_setup(struct pim_instance *pim)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+ pim_addr nht_p;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ nht_p = rp_info->rp.rpf_addr;
+
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
+ nht_p, &rp_info->group, 1)) {
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug(
+ "Unable to lookup nexthop for rp specified");
+ pim_rp_nexthop_del(rp_info);
+ }
+ }
+}
+
+/*
+ * Checks to see if we should elect ourself the actual RP when new if
+ * addresses are added against an interface.
+ */
+void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+ bool i_am_rp_changed = false;
+ struct pim_instance *pim = pim_ifp->pim;
+
+ if (pim->rp_list == NULL)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ /* if i_am_rp is already set nothing to be done (adding new
+ * addresses
+ * is not going to make a difference). */
+ if (rp_info->i_am_rp) {
+ continue;
+ }
+
+ if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
+ i_am_rp_changed = true;
+ rp_info->i_am_rp = 1;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: %pPA: i am rp", __func__,
+ &rp_info->rp.rpf_addr);
+ }
+ }
+
+ if (i_am_rp_changed) {
+ pim_msdp_i_am_rp_changed(pim);
+ pim_upstream_reeval_use_rpt(pim);
+ }
+}
+
+/* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
+ * are removed. Removing numbers is an uncommon event in an active network
+ * so I have made no attempt to optimize it. */
+void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+ bool i_am_rp_changed = false;
+ int old_i_am_rp;
+
+ if (pim->rp_list == NULL)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ old_i_am_rp = rp_info->i_am_rp;
+ pim_rp_check_interfaces(pim, rp_info);
+
+ if (old_i_am_rp != rp_info->i_am_rp) {
+ i_am_rp_changed = true;
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ if (rp_info->i_am_rp)
+ zlog_debug("%s: %pPA: i am rp",
+ __func__,
+ &rp_info->rp.rpf_addr);
+ else
+ zlog_debug(
+ "%s: %pPA: i am no longer rp",
+ __func__,
+ &rp_info->rp.rpf_addr);
+ }
+ }
+ }
+
+ if (i_am_rp_changed) {
+ pim_msdp_i_am_rp_changed(pim);
+ pim_upstream_reeval_use_rpt(pim);
+ }
+}
+
+/*
+ * I_am_RP(G) is true if the group-to-RP mapping indicates that
+ * this router is the RP for the group.
+ *
+ * Since we only have static RP, all groups are part of this RP
+ */
+int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
+{
+ struct prefix g;
+ struct rp_info *rp_info;
+
+ memset(&g, 0, sizeof(g));
+ pim_addr_to_prefix(&g, group);
+ rp_info = pim_rp_find_match_group(pim, &g);
+
+ if (rp_info)
+ return rp_info->i_am_rp;
+ return 0;
+}
+
+/*
+ * RP(G)
+ *
+ * Return the RP that the Group belongs too.
+ */
+struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
+{
+ struct prefix g;
+ struct rp_info *rp_info;
+
+ memset(&g, 0, sizeof(g));
+ pim_addr_to_prefix(&g, group);
+
+ rp_info = pim_rp_find_match_group(pim, &g);
+
+ if (rp_info) {
+ pim_addr nht_p;
+
+ if (pim_addr_is_any(rp_info->rp.rpf_addr)) {
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug(
+ "%s: Skipping NHT Register since RP is not configured for the group %pPA",
+ __func__, &group);
+ return &rp_info->rp;
+ }
+
+ /* Register addr with Zebra NHT */
+ nht_p = rp_info->rp.rpf_addr;
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug(
+ "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
+ __func__, &nht_p, &rp_info->group);
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ pim_rpf_set_refresh_time(pim);
+ (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
+ nht_p, &rp_info->group, 1);
+ return (&rp_info->rp);
+ }
+
+ // About to Go Down
+ return NULL;
+}
+
+/*
+ * Set the upstream IP address we want to talk to based upon
+ * the rp configured and the source address
+ *
+ * If we have don't have a RP configured and the source address is *
+ * then set the upstream addr as INADDR_ANY and return failure.
+ *
+ */
+int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
+ pim_addr source, pim_addr group)
+{
+ struct rp_info *rp_info;
+ struct prefix g;
+
+ memset(&g, 0, sizeof(g));
+
+ pim_addr_to_prefix(&g, group);
+
+ rp_info = pim_rp_find_match_group(pim, &g);
+
+ if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
+ (pim_addr_is_any(source)))) {
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: Received a (*,G) with no RP configured",
+ __func__);
+ *up = PIMADDR_ANY;
+ return 0;
+ }
+
+ if (pim_addr_is_any(source))
+ *up = rp_info->rp.rpf_addr;
+ else
+ *up = source;
+
+ return 1;
+}
+
+int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *spaces)
+{
+ struct listnode *node;
+ struct rp_info *rp_info;
+ int count = 0;
+ pim_addr rp_addr;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ if (rp_info->rp_src == RP_SRC_BSR)
+ continue;
+
+ rp_addr = rp_info->rp.rpf_addr;
+ if (rp_info->plist)
+ vty_out(vty,
+ "%s" PIM_AF_NAME
+ " pim rp %pPA prefix-list %s\n",
+ spaces, &rp_addr, rp_info->plist);
+ else
+ vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
+ spaces, &rp_addr, &rp_info->group);
+ count++;
+ }
+
+ return count;
+}
+
+void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
+ struct vty *vty, json_object *json)
+{
+ struct rp_info *rp_info;
+ struct rp_info *prev_rp_info = NULL;
+ struct listnode *node;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ char source[7];
+ char grp[INET6_ADDRSTRLEN];
+
+ json_object *json_rp_rows = NULL;
+ json_object *json_row = NULL;
+
+ if (!json) {
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(
+ tt,
+ "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+#if PIM_IPV == 4
+ pim_addr group = rp_info->group.u.prefix4;
+#else
+ pim_addr group = rp_info->group.u.prefix6;
+#endif
+ const char *group_type =
+ pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
+
+ if (range && !prefix_match(&rp_info->group, range))
+ continue;
+
+ if (rp_info->rp_src == RP_SRC_STATIC)
+ strlcpy(source, "Static", sizeof(source));
+ else if (rp_info->rp_src == RP_SRC_BSR)
+ strlcpy(source, "BSR", sizeof(source));
+ else
+ strlcpy(source, "None", sizeof(source));
+ if (json) {
+ /*
+ * If we have moved on to a new RP then add the
+ * entry for the previous RP
+ */
+ if (prev_rp_info &&
+ (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
+ rp_info->rp.rpf_addr))) {
+ json_object_object_addf(
+ json, json_rp_rows, "%pPA",
+ &prev_rp_info->rp.rpf_addr);
+ json_rp_rows = NULL;
+ }
+
+ if (!json_rp_rows)
+ json_rp_rows = json_object_new_array();
+
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "rpAddress", "%pPA",
+ &rp_info->rp.rpf_addr);
+ if (rp_info->rp.source_nexthop.interface)
+ json_object_string_add(
+ json_row, "outboundInterface",
+ rp_info->rp.source_nexthop
+ .interface->name);
+ else
+ json_object_string_add(json_row,
+ "outboundInterface",
+ "Unknown");
+ if (rp_info->i_am_rp)
+ json_object_boolean_true_add(json_row, "iAmRP");
+ else
+ json_object_boolean_false_add(json_row,
+ "iAmRP");
+
+ if (rp_info->plist)
+ json_object_string_add(json_row, "prefixList",
+ rp_info->plist);
+ else
+ json_object_string_addf(json_row, "group",
+ "%pFX",
+ &rp_info->group);
+ json_object_string_add(json_row, "source", source);
+ json_object_string_add(json_row, "groupType",
+ group_type);
+
+ json_object_array_add(json_rp_rows, json_row);
+ } else {
+ prefix2str(&rp_info->group, grp, sizeof(grp));
+ ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
+ &rp_info->rp.rpf_addr,
+ rp_info->plist
+ ? rp_info->plist
+ : grp,
+ rp_info->rp.source_nexthop.interface
+ ? rp_info->rp.source_nexthop
+ .interface->name
+ : "Unknown",
+ rp_info->i_am_rp
+ ? "yes"
+ : "no",
+ source, group_type);
+ }
+ prev_rp_info = rp_info;
+ }
+
+ /* Dump the generated table. */
+ if (!json) {
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ ttable_del(tt);
+ } else {
+ if (prev_rp_info && json_rp_rows)
+ json_object_object_addf(json, json_rp_rows, "%pPA",
+ &prev_rp_info->rp.rpf_addr);
+ }
+}
+
+void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
+{
+ struct listnode *node = NULL;
+ struct rp_info *rp_info = NULL;
+ struct nexthop *nh_node = NULL;
+ pim_addr nht_p;
+ struct pim_nexthop_cache pnc;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
+
+ nht_p = rp_info->rp.rpf_addr;
+ memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
+ if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
+ continue;
+
+ for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
+ continue;
+#endif
+
+ struct interface *ifp1 = if_lookup_by_index(
+ nh_node->ifindex, pim->vrf->vrf_id);
+
+ if (nbr->interface != ifp1)
+ continue;
+
+#if PIM_IPV == 4
+ nh_node->gate.ipv4 = nbr->source_addr;
+#else
+ nh_node->gate.ipv6 = nbr->source_addr;
+#endif
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug(
+ "%s: addr %pPA new nexthop addr %pPAs interface %s",
+ __func__, &nht_p, &nbr->source_addr,
+ ifp1->name);
+ }
+ }
+}
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
new file mode 100644
index 0000000..9416a9a
--- /dev/null
+++ b/pimd/pim_rp.h
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2015 Cumulus Networks, Inc.
+ * Donald Sharp
+ */
+#ifndef PIM_RP_H
+#define PIM_RP_H
+
+#include <zebra.h>
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+#include "pim_rpf.h"
+#include "lib/json.h"
+
+struct pim_interface;
+
+enum rp_source {
+ RP_SRC_NONE = 0,
+ RP_SRC_STATIC,
+ RP_SRC_BSR
+};
+
+struct rp_info {
+ struct prefix group;
+ struct pim_rpf rp;
+ enum rp_source rp_src;
+ int i_am_rp;
+ char *plist;
+};
+
+void pim_rp_init(struct pim_instance *pim);
+void pim_rp_free(struct pim_instance *pim);
+
+void pim_rp_list_hash_clean(void *data);
+
+int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag);
+void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
+ const char *group, const char *plist);
+int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag);
+int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag);
+void pim_rp_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *plist);
+
+int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *spaces);
+
+void pim_rp_setup(struct pim_instance *pim);
+
+int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group);
+void pim_rp_check_on_if_add(struct pim_interface *pim_ifp);
+void pim_i_am_rp_re_evaluate(struct pim_instance *pim);
+
+bool pim_rp_check_is_my_ip_address(struct pim_instance *pim,
+ struct in_addr dest_addr);
+
+int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
+ pim_addr source, pim_addr group);
+
+struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group);
+
+#define I_am_RP(P, G) pim_rp_i_am_rp ((P), (G))
+#define RP(P, G) pim_rp_g ((P), (G))
+
+void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
+ struct vty *vty, json_object *json);
+void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr);
+int pim_rp_list_cmp(void *v1, void *v2);
+struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
+ const struct prefix *group);
+void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up);
+void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim);
+#endif
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
new file mode 100644
index 0000000..b17ae31
--- /dev/null
+++ b/pimd/pim_rpf.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+
+#include "log.h"
+#include "prefix.h"
+#include "memory.h"
+#include "jhash.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_rpf.h"
+#include "pim_pim.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_neighbor.h"
+#include "pim_zlookup.h"
+#include "pim_ifchannel.h"
+#include "pim_time.h"
+#include "pim_nht.h"
+#include "pim_oil.h"
+#include "pim_mlag.h"
+
+static pim_addr pim_rpf_find_rpf_addr(struct pim_upstream *up);
+
+void pim_rpf_set_refresh_time(struct pim_instance *pim)
+{
+ pim->last_route_change_time = pim_time_monotonic_usec();
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: vrf(%s) New last route change time: %" PRId64,
+ __func__, pim->vrf->name,
+ pim->last_route_change_time);
+}
+
+bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
+ pim_addr addr, int neighbor_needed)
+{
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ struct pim_neighbor *nbr = NULL;
+ int num_ifindex;
+ struct interface *ifp = NULL;
+ ifindex_t first_ifindex = 0;
+ int found = 0;
+ int i = 0;
+ struct pim_interface *pim_ifp;
+
+#if PIM_IPV == 4
+ /*
+ * We should not attempt to lookup a
+ * 255.255.255.255 address, since
+ * it will never work
+ */
+ if (pim_addr_is_any(addr))
+ return false;
+#endif
+
+ if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
+ (nexthop->last_lookup_time > pim->last_route_change_time)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: Using last lookup for %pPAs at %lld, %" PRId64
+ " addr %pPAs",
+ __func__, &addr, nexthop->last_lookup_time,
+ pim->last_route_change_time,
+ &nexthop->mrib_nexthop_addr);
+ pim->nexthop_lookups_avoided++;
+ return true;
+ } else {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64,
+ __func__, &addr, nexthop->last_lookup_time,
+ pim->last_route_change_time);
+ }
+
+ memset(nexthop_tab, 0,
+ sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex =
+ zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
+ addr, PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s %s: could not find nexthop ifindex for address %pPAs",
+ __FILE__, __func__, &addr);
+ return false;
+ }
+
+ while (!found && (i < num_ifindex)) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %pPAs)",
+ __FILE__, __func__, first_ifindex,
+ &addr);
+ i++;
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
+ __func__, ifp->name, first_ifindex,
+ &addr);
+ i++;
+ } else if (neighbor_needed &&
+ !pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(
+ ifp, nexthop_tab[i].nexthop_addr, true);
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("ifp name: %s, pim nbr: %p",
+ ifp->name, nbr);
+ if (!nbr && !if_is_loopback(ifp))
+ i++;
+ else
+ found = 1;
+ } else
+ found = 1;
+ }
+
+ if (found) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s %s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
+ __FILE__, __func__,
+ &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
+ first_ifindex, nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
+
+ /* update nexthop data */
+ nexthop->interface = ifp;
+ nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference =
+ nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
+ nexthop->last_lookup = addr;
+ nexthop->last_lookup_time = pim_time_monotonic_usec();
+ nexthop->nbr = nbr;
+ return true;
+ } else
+ return false;
+}
+
+static int nexthop_mismatch(const struct pim_nexthop *nh1,
+ const struct pim_nexthop *nh2)
+{
+ return (nh1->interface != nh2->interface) ||
+ (pim_addr_cmp(nh1->mrib_nexthop_addr, nh2->mrib_nexthop_addr)) ||
+ (nh1->mrib_metric_preference != nh2->mrib_metric_preference) ||
+ (nh1->mrib_route_metric != nh2->mrib_route_metric);
+}
+
+static void pim_rpf_cost_change(struct pim_instance *pim,
+ struct pim_upstream *up, uint32_t old_cost)
+{
+ struct pim_rpf *rpf = &up->rpf;
+ uint32_t new_cost;
+
+ new_cost = pim_up_mlag_local_cost(up);
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: Cost_to_rp of upstream-%s changed to:%u, from:%u",
+ __func__, up->sg_str, new_cost, old_cost);
+
+ if (old_cost == new_cost)
+ return;
+
+ /* Cost changed, it might Impact MLAG DF election, update */
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s: Cost_to_rp of upstream-%s changed to:%u",
+ __func__, up->sg_str,
+ rpf->source_nexthop.mrib_route_metric);
+
+ if (pim_up_mlag_is_local(up))
+ pim_mlag_up_local_add(pim, up);
+}
+
+enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
+ struct pim_upstream *up, struct pim_rpf *old,
+ const char *caller)
+{
+ struct pim_rpf *rpf = &up->rpf;
+ struct pim_rpf saved;
+ pim_addr src;
+ struct prefix grp;
+ bool neigh_needed = true;
+ uint32_t saved_mrib_route_metric;
+
+ if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
+ return PIM_RPF_OK;
+
+ if (pim_addr_is_any(up->upstream_addr)) {
+ zlog_debug("%s(%s): RP is not configured yet for %s",
+ __func__, caller, up->sg_str);
+ return PIM_RPF_OK;
+ }
+
+ saved.source_nexthop = rpf->source_nexthop;
+ saved.rpf_addr = rpf->rpf_addr;
+ saved_mrib_route_metric = pim_up_mlag_local_cost(up);
+ if (old) {
+ old->source_nexthop = saved.source_nexthop;
+ old->rpf_addr = saved.rpf_addr;
+ }
+
+ src = up->upstream_addr; // RP or Src address
+ pim_addr_to_prefix(&grp, up->sg.grp);
+
+ if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) ||
+ PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
+ neigh_needed = false;
+ pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp,
+ neigh_needed)) {
+ /* Route is Deleted in Zebra, reset the stored NH data */
+ pim_upstream_rpf_clear(pim, up);
+ pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
+ return PIM_RPF_FAILURE;
+ }
+
+ rpf->rpf_addr = pim_rpf_find_rpf_addr(up);
+
+ if (pim_rpf_addr_is_inaddr_any(rpf) && PIM_DEBUG_ZEBRA) {
+ /* RPF'(S,G) not found */
+ zlog_debug("%s(%s): RPF'%s not found: won't send join upstream",
+ __func__, caller, up->sg_str);
+ /* warning only */
+ }
+
+ /* detect change in pim_nexthop */
+ if (nexthop_mismatch(&rpf->source_nexthop, &saved.source_nexthop)) {
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s(%s): (S,G)=%s source nexthop now is: interface=%s address=%pPAs pref=%d metric=%d",
+ __func__, caller,
+ up->sg_str,
+ rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<ifname?>",
+ &rpf->source_nexthop.mrib_nexthop_addr,
+ rpf->source_nexthop.mrib_metric_preference,
+ rpf->source_nexthop.mrib_route_metric);
+
+ pim_upstream_update_join_desired(pim, up);
+ pim_upstream_update_could_assert(up);
+ pim_upstream_update_my_assert_metric(up);
+ }
+
+ /* detect change in RPF_interface(S) */
+ if (saved.source_nexthop.interface != rpf->source_nexthop.interface) {
+
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug("%s(%s): (S,G)=%s RPF_interface(S) changed from %s to %s",
+ __func__, caller,
+ up->sg_str,
+ saved.source_nexthop.interface ? saved.source_nexthop.interface->name : "<oldif?>",
+ rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<newif?>");
+ /* warning only */
+ }
+
+ pim_upstream_rpf_interface_changed(
+ up, saved.source_nexthop.interface);
+ }
+
+ /* detect change in RPF'(S,G) */
+ if (pim_addr_cmp(saved.rpf_addr, rpf->rpf_addr) ||
+ saved.source_nexthop.interface != rpf->source_nexthop.interface) {
+ pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
+ return PIM_RPF_CHANGED;
+ }
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(
+ "%s(%s): Cost_to_rp of upstream-%s changed to:%u",
+ __func__, caller, up->sg_str,
+ rpf->source_nexthop.mrib_route_metric);
+
+ pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
+
+ return PIM_RPF_OK;
+}
+
+/*
+ * In the case of RP deletion and RP unreachablity,
+ * uninstall the mroute in the kernel and clear the
+ * rpf information in the pim upstream and pim channel
+ * oil data structure.
+ */
+void pim_upstream_rpf_clear(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ if (up->rpf.source_nexthop.interface) {
+ pim_upstream_switch(pim, up, PIM_UPSTREAM_NOTJOINED);
+ up->rpf.source_nexthop.interface = NULL;
+ up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
+ up->rpf.source_nexthop.mrib_metric_preference =
+ router->infinite_assert_metric.metric_preference;
+ up->rpf.source_nexthop.mrib_route_metric =
+ router->infinite_assert_metric.route_metric;
+ up->rpf.rpf_addr = PIMADDR_ANY;
+ pim_upstream_mroute_iif_update(up->channel_oil, __func__);
+ }
+}
+
+/*
+ RFC 4601: 4.1.6. State Summarization Macros
+
+ neighbor RPF'(S,G) {
+ if ( I_Am_Assert_Loser(S, G, RPF_interface(S) )) {
+ return AssertWinner(S, G, RPF_interface(S) )
+ } else {
+ return NBR( RPF_interface(S), MRIB.next_hop( S ) )
+ }
+ }
+
+ RPF'(*,G) and RPF'(S,G) indicate the neighbor from which data
+ packets should be coming and to which joins should be sent on the RP
+ tree and SPT, respectively.
+*/
+static pim_addr pim_rpf_find_rpf_addr(struct pim_upstream *up)
+{
+ struct pim_ifchannel *rpf_ch;
+ struct pim_neighbor *neigh;
+ pim_addr rpf_addr;
+
+ if (!up->rpf.source_nexthop.interface) {
+ zlog_warn("%s: missing RPF interface for upstream (S,G)=%s",
+ __func__, up->sg_str);
+
+ return PIMADDR_ANY;
+ }
+
+ rpf_ch = pim_ifchannel_find(up->rpf.source_nexthop.interface, &up->sg);
+ if (rpf_ch) {
+ if (rpf_ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
+ return rpf_ch->ifassert_winner;
+ }
+ }
+
+ /* return NBR( RPF_interface(S), MRIB.next_hop( S ) ) */
+
+ neigh = pim_if_find_neighbor(up->rpf.source_nexthop.interface,
+ up->rpf.source_nexthop.mrib_nexthop_addr);
+ if (neigh)
+ rpf_addr = neigh->source_addr;
+ else
+ rpf_addr = PIMADDR_ANY;
+
+ return rpf_addr;
+}
+
+int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf)
+{
+ return pim_addr_is_any(rpf->rpf_addr);
+}
+
+int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
+{
+ if (rpf1->source_nexthop.interface == rpf2->source_nexthop.interface)
+ return 1;
+
+ return 0;
+}
+
+unsigned int pim_rpf_hash_key(const void *arg)
+{
+ const struct pim_nexthop_cache *r = arg;
+
+#if PIM_IPV == 4
+ return jhash_1word(r->rpf.rpf_addr.s_addr, 0);
+#else
+ return jhash2(r->rpf.rpf_addr.s6_addr32,
+ array_size(r->rpf.rpf_addr.s6_addr32), 0);
+#endif
+}
+
+bool pim_rpf_equal(const void *arg1, const void *arg2)
+{
+ const struct pim_nexthop_cache *r1 =
+ (const struct pim_nexthop_cache *)arg1;
+ const struct pim_nexthop_cache *r2 =
+ (const struct pim_nexthop_cache *)arg2;
+
+ return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr));
+}
diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h
new file mode 100644
index 0000000..7dae53f
--- /dev/null
+++ b/pimd/pim_rpf.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_RPF_H
+#define PIM_RPF_H
+
+#include <zebra.h>
+#include "pim_str.h"
+
+struct pim_instance;
+
+/*
+ RFC 4601:
+
+ Metric Preference
+ Preference value assigned to the unicast routing protocol that
+ provided the route to the multicast source or Rendezvous-Point.
+
+ Metric
+ The unicast routing table metric associated with the route used to
+ reach the multicast source or Rendezvous-Point. The metric is in
+ units applicable to the unicast routing protocol used.
+*/
+struct pim_nexthop {
+ pim_addr last_lookup;
+ long long last_lookup_time;
+ struct interface *interface; /* RPF_interface(S) */
+ pim_addr mrib_nexthop_addr; /* MRIB.next_hop(S) */
+ uint32_t mrib_metric_preference; /* MRIB.pref(S) */
+ uint32_t mrib_route_metric; /* MRIB.metric(S) */
+ struct pim_neighbor *nbr;
+};
+
+struct pim_rpf {
+ struct pim_nexthop source_nexthop;
+ pim_addr rpf_addr; /* RPF'(S,G) */
+};
+
+enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE };
+
+struct pim_upstream;
+
+unsigned int pim_rpf_hash_key(const void *arg);
+bool pim_rpf_equal(const void *arg1, const void *arg2);
+
+bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
+ pim_addr addr, int neighbor_needed);
+enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
+ struct pim_upstream *up,
+ struct pim_rpf *old, const char *caller);
+void pim_upstream_rpf_clear(struct pim_instance *pim,
+ struct pim_upstream *up);
+int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf);
+
+int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2);
+void pim_rpf_set_refresh_time(struct pim_instance *pim);
+#endif /* PIM_RPF_H */
diff --git a/pimd/pim_signals.c b/pimd/pim_signals.c
new file mode 100644
index 0000000..146a4e9
--- /dev/null
+++ b/pimd/pim_signals.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include <signal.h>
+
+#include "sigevent.h"
+#include "memory.h"
+#include "log.h"
+#include "if.h"
+
+#include "pim_signals.h"
+#include "pimd.h"
+
+/*
+ * Signal handlers
+ */
+
+static void pim_sighup(void)
+{
+ zlog_info("SIGHUP received, ignoring");
+}
+
+static void pim_sigint(void)
+{
+ zlog_notice("Terminating on signal SIGINT");
+ pim_terminate();
+ exit(1);
+}
+
+static void pim_sigterm(void)
+{
+ zlog_notice("Terminating on signal SIGTERM");
+ pim_terminate();
+ exit(1);
+}
+
+static void pim_sigusr1(void)
+{
+ zlog_rotate();
+}
+
+struct frr_signal_t pimd_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &pim_sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &pim_sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &pim_sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &pim_sigterm,
+ },
+};
diff --git a/pimd/pim_signals.h b/pimd/pim_signals.h
new file mode 100644
index 0000000..a3ee604
--- /dev/null
+++ b/pimd/pim_signals.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_SIGNALS_H
+#define PIM_SIGNALS_H
+
+#include "sigevent.h"
+extern struct frr_signal_t pimd_signals[];
+
+#endif /* PIM_SIGNALS_H */
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
new file mode 100644
index 0000000..6c65c5d
--- /dev/null
+++ b/pimd/pim_sock.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/igmp.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <netdb.h>
+#include <errno.h>
+
+#include "log.h"
+#include "privs.h"
+#include "if.h"
+#include "vrf.h"
+#include "sockopt.h"
+#include "lib_errors.h"
+#include "network.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_mroute.h"
+#include "pim_iface.h"
+#include "pim_sock.h"
+#include "pim_str.h"
+
+#if PIM_IPV == 4
+#define setsockopt_iptos setsockopt_ipv4_tos
+#define setsockopt_multicast_loop setsockopt_ipv4_multicast_loop
+#else
+#define setsockopt_iptos setsockopt_ipv6_tclass
+#define setsockopt_multicast_loop setsockopt_ipv6_multicast_loop
+#endif
+
+int pim_socket_raw(int protocol)
+{
+ int fd;
+
+ frr_with_privs(&pimd_privs) {
+ fd = socket(PIM_AF, SOCK_RAW, protocol);
+ }
+
+ if (fd < 0) {
+ zlog_warn("Could not create raw socket: errno=%d: %s", errno,
+ safe_strerror(errno));
+ return PIM_SOCK_ERR_SOCKET;
+ }
+
+ return fd;
+}
+
+void pim_socket_ip_hdr(int fd)
+{
+ frr_with_privs(&pimd_privs) {
+#if PIM_IPV == 4
+ const int on = 1;
+
+ if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)))
+ zlog_err("%s: Could not turn on IP_HDRINCL option: %m",
+ __func__);
+#endif
+ }
+}
+
+/*
+ * Given a socket and a interface,
+ * Bind that socket to that interface
+ */
+int pim_socket_bind(int fd, struct interface *ifp)
+{
+ int ret = 0;
+
+#ifdef SO_BINDTODEVICE
+ frr_with_privs(&pimd_privs) {
+ ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifp->name,
+ strlen(ifp->name));
+ }
+#endif
+ return ret;
+}
+
+#if PIM_IPV == 4
+static inline int pim_setsockopt(int protocol, int fd, struct interface *ifp)
+{
+ int one = 1;
+ int ttl = 1;
+
+#if defined(HAVE_IP_PKTINFO)
+ /* Linux and Solaris IP_PKTINFO */
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one)))
+ zlog_warn("Could not set PKTINFO on socket fd=%d: %m", fd);
+#elif defined(HAVE_IP_RECVDSTADDR)
+ /* BSD IP_RECVDSTADDR */
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one)))
+ zlog_warn("Could not set IP_RECVDSTADDR on socket fd=%d: %m",
+ fd);
+#else
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "Missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()");
+ close(fd);
+ return PIM_SOCK_ERR_DSTADDR;
+#endif
+
+ /* Set router alert (RFC 2113) for all IGMP messages (RFC
+ * 3376 4. Message Formats)*/
+ if (protocol == IPPROTO_IGMP) {
+ uint8_t ra[4];
+
+ ra[0] = 148;
+ ra[1] = 4;
+ ra[2] = 0;
+ ra[3] = 0;
+ if (setsockopt(fd, IPPROTO_IP, IP_OPTIONS, ra, 4)) {
+ zlog_warn(
+ "Could not set Router Alert Option on socket fd=%d: %m",
+ fd);
+ close(fd);
+ return PIM_SOCK_ERR_RA;
+ }
+ }
+
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl))) {
+ zlog_warn("Could not set multicast TTL=%d on socket fd=%d: %m",
+ ttl, fd);
+ close(fd);
+ return PIM_SOCK_ERR_TTL;
+ }
+
+ if (setsockopt_ipv4_multicast_if(fd, PIMADDR_ANY, ifp->ifindex)) {
+ zlog_warn(
+ "Could not set Outgoing Interface Option on socket fd=%d: %m",
+ fd);
+ close(fd);
+ return PIM_SOCK_ERR_IFACE;
+ }
+
+ return 0;
+}
+#else /* PIM_IPV != 4 */
+static inline int pim_setsockopt(int protocol, int fd, struct interface *ifp)
+{
+ int ttl = 1;
+ struct ipv6_mreq mreq = {};
+
+ setsockopt_ipv6_pktinfo(fd, 1);
+ setsockopt_ipv6_multicast_hops(fd, ttl);
+
+ mreq.ipv6mr_interface = ifp->ifindex;
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, &mreq,
+ sizeof(mreq))) {
+ zlog_warn(
+ "Could not set Outgoing Interface Option on socket fd=%d: %m",
+ fd);
+ close(fd);
+ return PIM_SOCK_ERR_IFACE;
+ }
+
+ return 0;
+}
+#endif
+
+int pim_reg_sock(void)
+{
+ int fd;
+ long flags;
+
+ frr_with_privs (&pimd_privs) {
+ fd = socket(PIM_AF, SOCK_RAW, PIM_PROTO_REG);
+ }
+
+ if (fd < 0) {
+ zlog_warn("Could not create raw socket: errno=%d: %s", errno,
+ safe_strerror(errno));
+ return PIM_SOCK_ERR_SOCKET;
+ }
+
+ if (sockopt_reuseaddr(fd)) {
+ close(fd);
+ return PIM_SOCK_ERR_REUSE;
+ }
+
+ flags = fcntl(fd, F_GETFL, 0);
+ if (flags < 0) {
+ zlog_warn(
+ "Could not get fcntl(F_GETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
+ fd, errno, safe_strerror(errno));
+ close(fd);
+ return PIM_SOCK_ERR_NONBLOCK_GETFL;
+ }
+
+ if (fcntl(fd, F_SETFL, flags | O_NONBLOCK)) {
+ zlog_warn(
+ "Could not set fcntl(F_SETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
+ fd, errno, safe_strerror(errno));
+ close(fd);
+ return PIM_SOCK_ERR_NONBLOCK_SETFL;
+ }
+
+ return fd;
+}
+
+int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp,
+ uint8_t loop)
+{
+ int fd;
+ int ret;
+
+ fd = pim_socket_raw(protocol);
+ if (fd < 0) {
+ zlog_warn("Could not create multicast socket: errno=%d: %s",
+ errno, safe_strerror(errno));
+ return PIM_SOCK_ERR_SOCKET;
+ }
+
+ /* XXX: if SO_BINDTODEVICE isn't available, use IP_PKTINFO / IP_RECVIF
+ * to emulate behaviour? Or change to only use 1 socket for all
+ * interfaces? */
+ ret = pim_socket_bind(fd, ifp);
+ if (ret) {
+ close(fd);
+ zlog_warn("Could not set fd: %d for interface: %s to device",
+ fd, ifp->name);
+ return PIM_SOCK_ERR_BIND;
+ }
+
+ set_nonblocking(fd);
+ sockopt_reuseaddr(fd);
+ setsockopt_so_recvbuf(fd, 8 * 1024 * 1024);
+
+ ret = pim_setsockopt(protocol, fd, ifp);
+ if (ret) {
+ zlog_warn("pim_setsockopt failed for interface: %s to device ",
+ ifp->name);
+ return ret;
+ }
+
+ /* leftover common sockopts */
+ if (setsockopt_multicast_loop(fd, loop)) {
+ zlog_warn(
+ "Could not %s Multicast Loopback Option on socket fd=%d: %m",
+ loop ? "enable" : "disable", fd);
+ close(fd);
+ return PIM_SOCK_ERR_LOOP;
+ }
+
+ /* Set Tx socket DSCP byte */
+ if (setsockopt_iptos(fd, IPTOS_PREC_INTERNETCONTROL))
+ zlog_warn("can't set sockopt IP[V6]_TOS to socket %d: %m", fd);
+
+ return fd;
+}
+
+int pim_socket_join(int fd, pim_addr group, pim_addr ifaddr, ifindex_t ifindex,
+ struct pim_interface *pim_ifp)
+{
+ int ret;
+
+#if PIM_IPV == 4
+ ret = setsockopt_ipv4_multicast(fd, IP_ADD_MEMBERSHIP, ifaddr,
+ group.s_addr, ifindex);
+#else
+ struct ipv6_mreq opt;
+
+ memcpy(&opt.ipv6mr_multiaddr, &group, 16);
+ opt.ipv6mr_interface = ifindex;
+ ret = setsockopt(fd, IPPROTO_IPV6, IPV6_JOIN_GROUP, &opt, sizeof(opt));
+#endif
+
+ pim_ifp->igmp_ifstat_joins_sent++;
+
+ if (ret) {
+ flog_err(
+ EC_LIB_SOCKET,
+ "Failure socket joining fd=%d group %pPAs on interface address %pPAs: %m",
+ fd, &group, &ifaddr);
+ pim_ifp->igmp_ifstat_joins_failed++;
+ return ret;
+ }
+
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "Socket fd=%d joined group %pPAs on interface address %pPAs",
+ fd, &group, &ifaddr);
+ return ret;
+}
+
+#if PIM_IPV == 4
+static void cmsg_getdstaddr(struct msghdr *mh, struct sockaddr_storage *dst,
+ ifindex_t *ifindex)
+{
+ struct cmsghdr *cmsg;
+ struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
+
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(mh, cmsg)) {
+#ifdef HAVE_IP_PKTINFO
+ if ((cmsg->cmsg_level == IPPROTO_IP) &&
+ (cmsg->cmsg_type == IP_PKTINFO)) {
+ struct in_pktinfo *i;
+
+ i = (struct in_pktinfo *)CMSG_DATA(cmsg);
+ if (dst4)
+ dst4->sin_addr = i->ipi_addr;
+ if (ifindex)
+ *ifindex = i->ipi_ifindex;
+
+ break;
+ }
+#endif
+
+#ifdef HAVE_IP_RECVDSTADDR
+ if ((cmsg->cmsg_level == IPPROTO_IP) &&
+ (cmsg->cmsg_type == IP_RECVDSTADDR)) {
+ struct in_addr *i = (struct in_addr *)CMSG_DATA(cmsg);
+
+ if (dst4)
+ dst4->sin_addr = *i;
+
+ break;
+ }
+#endif
+
+#if defined(HAVE_IP_RECVIF) && defined(CMSG_IFINDEX)
+ if (cmsg->cmsg_type == IP_RECVIF)
+ if (ifindex)
+ *ifindex = CMSG_IFINDEX(cmsg);
+#endif
+ }
+}
+#else /* PIM_IPV != 4 */
+static void cmsg_getdstaddr(struct msghdr *mh, struct sockaddr_storage *dst,
+ ifindex_t *ifindex)
+{
+ struct cmsghdr *cmsg;
+ struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
+
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(mh, cmsg)) {
+ if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+ (cmsg->cmsg_type == IPV6_PKTINFO)) {
+ struct in6_pktinfo *i;
+
+ i = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+
+ if (dst6)
+ dst6->sin6_addr = i->ipi6_addr;
+ if (ifindex)
+ *ifindex = i->ipi6_ifindex;
+ break;
+ }
+ }
+}
+#endif /* PIM_IPV != 4 */
+
+int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
+ struct sockaddr_storage *from, socklen_t *fromlen,
+ struct sockaddr_storage *to, socklen_t *tolen,
+ ifindex_t *ifindex)
+{
+ struct msghdr msgh;
+ struct iovec iov;
+ char cbuf[1000];
+ int err;
+
+ /*
+ * IP_PKTINFO / IP_RECVDSTADDR don't yield sin_port.
+ * Use getsockname() to get sin_port.
+ */
+ if (to) {
+ socklen_t to_len = sizeof(*to);
+
+ pim_socket_getsockname(fd, (struct sockaddr *)to, &to_len);
+
+ if (tolen)
+ *tolen = sizeof(*to);
+ }
+
+ memset(&msgh, 0, sizeof(msgh));
+ iov.iov_base = buf;
+ iov.iov_len = len;
+ msgh.msg_control = cbuf;
+ msgh.msg_controllen = sizeof(cbuf);
+ msgh.msg_name = from;
+ msgh.msg_namelen = fromlen ? *fromlen : 0;
+ msgh.msg_iov = &iov;
+ msgh.msg_iovlen = 1;
+ msgh.msg_flags = 0;
+
+ err = recvmsg(fd, &msgh, 0);
+ if (err < 0)
+ return err;
+
+ if (fromlen)
+ *fromlen = msgh.msg_namelen;
+
+ cmsg_getdstaddr(&msgh, to, ifindex);
+
+ return err; /* len */
+}
+
+int pim_socket_getsockname(int fd, struct sockaddr *name, socklen_t *namelen)
+{
+ if (getsockname(fd, name, namelen)) {
+ int e = errno;
+ zlog_warn(
+ "Could not get Socket Name for socket fd=%d: errno=%d: %s",
+ fd, errno, safe_strerror(errno));
+ errno = e;
+ return PIM_SOCK_ERR_NAME;
+ }
+
+ return PIM_SOCK_ERR_NONE;
+}
diff --git a/pimd/pim_sock.h b/pimd/pim_sock.h
new file mode 100644
index 0000000..04ab864
--- /dev/null
+++ b/pimd/pim_sock.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_SOCK_H
+#define PIM_SOCK_H
+
+#include <netinet/in.h>
+
+#define PIM_SOCK_ERR_NONE (0) /* No error */
+#define PIM_SOCK_ERR_SOCKET (-1) /* socket() */
+#define PIM_SOCK_ERR_RA (-2) /* Router Alert option */
+#define PIM_SOCK_ERR_REUSE (-3) /* Reuse option */
+#define PIM_SOCK_ERR_TTL (-4) /* TTL option */
+#define PIM_SOCK_ERR_LOOP (-5) /* Loopback option */
+#define PIM_SOCK_ERR_IFACE (-6) /* Outgoing interface option */
+#define PIM_SOCK_ERR_DSTADDR (-7) /* Outgoing interface option */
+#define PIM_SOCK_ERR_NONBLOCK_GETFL (-8) /* Get O_NONBLOCK */
+#define PIM_SOCK_ERR_NONBLOCK_SETFL (-9) /* Set O_NONBLOCK */
+#define PIM_SOCK_ERR_NAME (-10) /* Socket name (getsockname) */
+#define PIM_SOCK_ERR_BIND (-11) /* Can't bind to interface */
+
+struct pim_instance;
+
+int pim_socket_bind(int fd, struct interface *ifp);
+void pim_socket_ip_hdr(int fd);
+int pim_socket_raw(int protocol);
+int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp,
+ uint8_t loop);
+int pim_socket_join(int fd, pim_addr group, pim_addr ifaddr, ifindex_t ifindex,
+ struct pim_interface *pim_ifp);
+int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
+ struct sockaddr_storage *from, socklen_t *fromlen,
+ struct sockaddr_storage *to, socklen_t *tolen,
+ ifindex_t *ifindex);
+
+int pim_socket_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+int pim_reg_sock(void);
+
+#endif /* PIM_SOCK_H */
diff --git a/pimd/pim_ssm.c b/pimd/pim_ssm.c
new file mode 100644
index 0000000..7b7503a
--- /dev/null
+++ b/pimd/pim_ssm.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP SSM ranges for FRR
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ */
+
+#include <zebra.h>
+
+#include <lib/linklist.h>
+#include <lib/prefix.h>
+#include <lib/vty.h>
+#include <lib/vrf.h>
+#include <lib/plist.h>
+#include <lib/lib_errors.h>
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_ssm.h"
+#include "pim_igmp.h"
+
+static void pim_ssm_range_reevaluate(struct pim_instance *pim)
+{
+#if PIM_IPV == 4
+ /* 1. Setup register state for (S,G) entries if G has changed from SSM
+ * to
+ * ASM.
+ * 2. check existing (*,G) IGMP registrations to see if they are
+ * still ASM. if they are now SSM delete them.
+ * 3. Allow channel setup for IGMP (*,G) members if G is now ASM
+ * 4. I could tear down all (*,G), (S,G,rpt) states. But that is an
+ * unnecessary sladge hammer and may not be particularly useful as it is
+ * likely the SPT switchover has already happened for flows along such
+ * RPTs.
+ * As for the RPT states it seems that the best thing to do is let them
+ * age
+ * out gracefully. As long as the FHR and LHR do the right thing RPTs
+ * will
+ * disappear in time for SSM groups.
+ */
+ pim_upstream_register_reevaluate(pim);
+ igmp_source_forward_reevaluate_all(pim);
+#endif
+}
+
+void pim_ssm_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *plist)
+{
+ struct pim_ssm *ssm = pim->ssm_info;
+
+ if (!ssm->plist_name
+ || strcmp(ssm->plist_name, prefix_list_name(plist))) {
+ /* not ours */
+ return;
+ }
+
+ pim_ssm_range_reevaluate(pim);
+}
+
+static int pim_is_grp_standard_ssm(struct prefix *group)
+{
+ pim_addr addr = pim_addr_from_prefix(group);
+
+ return pim_addr_ssm(addr);
+}
+
+int pim_is_grp_ssm(struct pim_instance *pim, pim_addr group_addr)
+{
+ struct pim_ssm *ssm;
+ struct prefix group;
+ struct prefix_list *plist;
+
+ pim_addr_to_prefix(&group, group_addr);
+
+ ssm = pim->ssm_info;
+ if (!ssm->plist_name) {
+ return pim_is_grp_standard_ssm(&group);
+ }
+
+ plist = prefix_list_lookup(PIM_AFI, ssm->plist_name);
+ if (!plist)
+ return 0;
+
+ return (prefix_list_apply_ext(plist, NULL, &group, true) ==
+ PREFIX_PERMIT);
+}
+
+int pim_ssm_range_set(struct pim_instance *pim, vrf_id_t vrf_id,
+ const char *plist_name)
+{
+ struct pim_ssm *ssm;
+ int change = 0;
+
+ if (vrf_id != pim->vrf->vrf_id)
+ return PIM_SSM_ERR_NO_VRF;
+
+ ssm = pim->ssm_info;
+ if (plist_name) {
+ if (ssm->plist_name) {
+ if (!strcmp(ssm->plist_name, plist_name))
+ return PIM_SSM_ERR_DUP;
+ XFREE(MTYPE_PIM_FILTER_NAME, ssm->plist_name);
+ }
+ ssm->plist_name = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist_name);
+ change = 1;
+ } else {
+ if (ssm->plist_name) {
+ change = 1;
+ XFREE(MTYPE_PIM_FILTER_NAME, ssm->plist_name);
+ }
+ }
+
+ if (change)
+ pim_ssm_range_reevaluate(pim);
+
+ return PIM_SSM_ERR_NONE;
+}
+
+void *pim_ssm_init(void)
+{
+ struct pim_ssm *ssm;
+
+ ssm = XCALLOC(MTYPE_PIM_SSM_INFO, sizeof(*ssm));
+
+ return ssm;
+}
+
+void pim_ssm_terminate(struct pim_ssm *ssm)
+{
+ if (!ssm)
+ return;
+
+ XFREE(MTYPE_PIM_FILTER_NAME, ssm->plist_name);
+
+ XFREE(MTYPE_PIM_SSM_INFO, ssm);
+}
diff --git a/pimd/pim_ssm.h b/pimd/pim_ssm.h
new file mode 100644
index 0000000..24a037e
--- /dev/null
+++ b/pimd/pim_ssm.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IP SSM ranges for FRR
+ * Copyright (C) 2017 Cumulus Networks, Inc.
+ */
+#ifndef PIM_SSM_H
+#define PIM_SSM_H
+
+#define PIM_SSM_STANDARD_RANGE "232.0.0.0/8"
+
+struct pim_instance;
+
+/* SSM error codes */
+enum pim_ssm_err {
+ PIM_SSM_ERR_NONE = 0,
+ PIM_SSM_ERR_NO_VRF = -1,
+ PIM_SSM_ERR_DUP = -2,
+};
+
+struct pim_ssm {
+ char *plist_name; /* prefix list of group ranges */
+};
+
+void pim_ssm_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *plist);
+extern int pim_is_grp_ssm(struct pim_instance *pim, pim_addr group_addr);
+int pim_ssm_range_set(struct pim_instance *pim, vrf_id_t vrf_id,
+ const char *plist_name);
+void *pim_ssm_init(void);
+void pim_ssm_terminate(struct pim_ssm *ssm);
+#endif
diff --git a/pimd/pim_ssmpingd.c b/pimd/pim_ssmpingd.c
new file mode 100644
index 0000000..27dbb0d
--- /dev/null
+++ b/pimd/pim_ssmpingd.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+#include "log.h"
+#include "memory.h"
+#include "sockopt.h"
+#include "vrf.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_ssmpingd.h"
+#include "pim_time.h"
+#include "pim_sock.h"
+#include "network.h"
+
+#if PIM_IPV == 4
+static const char *const PIM_SSMPINGD_REPLY_GROUP = "232.43.211.234";
+#else
+static const char *const PIM_SSMPINGD_REPLY_GROUP = "ff3e::4321:1234";
+#endif
+
+enum { PIM_SSMPINGD_REQUEST = 'Q', PIM_SSMPINGD_REPLY = 'A' };
+
+static void ssmpingd_read_on(struct ssmpingd_sock *ss);
+
+void pim_ssmpingd_init(struct pim_instance *pim)
+{
+ int result;
+
+ assert(!pim->ssmpingd_list);
+
+ result = inet_pton(PIM_AF, PIM_SSMPINGD_REPLY_GROUP,
+ &pim->ssmpingd_group_addr);
+
+ assert(result > 0);
+}
+
+void pim_ssmpingd_destroy(struct pim_instance *pim)
+{
+ if (pim->ssmpingd_list)
+ list_delete(&pim->ssmpingd_list);
+}
+
+static struct ssmpingd_sock *ssmpingd_find(struct pim_instance *pim,
+ pim_addr source_addr)
+{
+ struct listnode *node;
+ struct ssmpingd_sock *ss;
+
+ if (!pim->ssmpingd_list)
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss))
+ if (!pim_addr_cmp(source_addr, ss->source_addr))
+ return ss;
+
+ return 0;
+}
+
+static void ssmpingd_free(struct ssmpingd_sock *ss)
+{
+ XFREE(MTYPE_PIM_SSMPINGD, ss);
+}
+
+#if PIM_IPV == 4
+static inline int ssmpingd_setsockopt(int fd, pim_addr addr, int mttl)
+{
+ /* Needed to obtain destination address from recvmsg() */
+#if defined(HAVE_IP_PKTINFO)
+ /* Linux and Solaris IP_PKTINFO */
+ int opt = 1;
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &opt, sizeof(opt))) {
+ zlog_warn(
+ "%s: could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ }
+#elif defined(HAVE_IP_RECVDSTADDR)
+ /* BSD IP_RECVDSTADDR */
+ int opt = 1;
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &opt, sizeof(opt))) {
+ zlog_warn(
+ "%s: could not set IP_RECVDSTADDR on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ }
+#else
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "%s %s: missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()",
+ __FILE__, __func__);
+ close(fd);
+ return -1;
+#endif
+
+ if (setsockopt_ipv4_multicast_loop(fd, 0)) {
+ zlog_warn(
+ "%s: could not disable Multicast Loopback Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ close(fd);
+ return PIM_SOCK_ERR_LOOP;
+ }
+
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, (void *)&addr,
+ sizeof(addr))) {
+ zlog_warn(
+ "%s: could not set Outgoing Interface Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (void *)&mttl,
+ sizeof(mttl))) {
+ zlog_warn(
+ "%s: could not set multicast TTL=%d on socket fd=%d: errno=%d: %s",
+ __func__, mttl, fd, errno, safe_strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ return 0;
+}
+#else
+static inline int ssmpingd_setsockopt(int fd, pim_addr addr, int mttl)
+{
+ setsockopt_ipv6_pktinfo(fd, 1);
+ setsockopt_ipv6_multicast_hops(fd, mttl);
+
+ if (setsockopt_ipv6_multicast_loop(fd, 0)) {
+ zlog_warn(
+ "%s: could not disable Multicast Loopback Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ close(fd);
+ return PIM_SOCK_ERR_LOOP;
+ }
+
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, (void *)&addr,
+ sizeof(addr))) {
+ zlog_warn(
+ "%s: could not set Outgoing Interface Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ close(fd);
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+
+static int ssmpingd_socket(pim_addr addr, int port, int mttl)
+{
+ struct sockaddr_storage sockaddr;
+ int fd;
+ int ret;
+ socklen_t len = sizeof(sockaddr);
+
+ fd = socket(PIM_AF, SOCK_DGRAM, IPPROTO_UDP);
+ if (fd < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "%s: could not create socket: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ pim_socket_getsockname(fd, (struct sockaddr *)&sockaddr, &len);
+
+ if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr))) {
+ zlog_warn(
+ "%s: bind(fd=%d,addr=%pSUp,port=%d,len=%zu) failure: errno=%d: %s",
+ __func__, fd, &sockaddr, port, sizeof(sockaddr), errno,
+ safe_strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ set_nonblocking(fd);
+ sockopt_reuseaddr(fd);
+
+ ret = ssmpingd_setsockopt(fd, addr, mttl);
+ if (ret) {
+ zlog_warn("ssmpingd_setsockopt failed");
+ return -1;
+ }
+
+ return fd;
+}
+
+static void ssmpingd_delete(struct ssmpingd_sock *ss)
+{
+ assert(ss);
+
+ EVENT_OFF(ss->t_sock_read);
+
+ if (close(ss->sock_fd)) {
+ zlog_warn(
+ "%s: failure closing ssmpingd sock_fd=%d for source %pPA: errno=%d: %s",
+ __func__, ss->sock_fd, &ss->source_addr, errno,
+ safe_strerror(errno));
+ /* warning only */
+ }
+
+ listnode_delete(ss->pim->ssmpingd_list, ss);
+ ssmpingd_free(ss);
+}
+
+static void ssmpingd_sendto(struct ssmpingd_sock *ss, const uint8_t *buf,
+ int len, struct sockaddr_storage to)
+{
+ socklen_t tolen = sizeof(to);
+ int sent;
+
+ sent = sendto(ss->sock_fd, buf, len, MSG_DONTWAIT,
+ (struct sockaddr *)&to, tolen);
+ if (sent != len) {
+ if (sent < 0) {
+ zlog_warn(
+ "%s: sendto() failure to %pSUp,fd=%d len=%d: errno=%d: %s",
+ __func__, &to, ss->sock_fd, len, errno,
+ safe_strerror(errno));
+ } else {
+ zlog_warn(
+ "%s: sendto() partial to %pSUp, fd=%d len=%d: sent=%d",
+ __func__, &to, ss->sock_fd, len, sent);
+ }
+ }
+}
+
+static int ssmpingd_read_msg(struct ssmpingd_sock *ss)
+{
+ struct interface *ifp;
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
+ socklen_t fromlen = sizeof(from);
+ socklen_t tolen = sizeof(to);
+ ifindex_t ifindex = -1;
+ uint8_t buf[1000];
+ int len;
+
+ ++ss->requests;
+
+ len = pim_socket_recvfromto(ss->sock_fd, buf, sizeof(buf), &from,
+ &fromlen, &to, &tolen, &ifindex);
+
+ if (len < 0) {
+ zlog_warn(
+ "%s: failure receiving ssmping for source %pPA on fd=%d: errno=%d: %s",
+ __func__, &ss->source_addr, ss->sock_fd, errno,
+ safe_strerror(errno));
+ return -1;
+ }
+
+ ifp = if_lookup_by_index(ifindex, ss->pim->vrf->vrf_id);
+
+ if (buf[0] != PIM_SSMPINGD_REQUEST) {
+ zlog_warn(
+ "%s: bad ssmping type=%d from %pSUp to %pSUp on interface %s ifindex=%d fd=%d src=%pPA",
+ __func__, buf[0], &from, &to,
+ ifp ? ifp->name : "<iface?>", ifindex, ss->sock_fd,
+ &ss->source_addr);
+ return 0;
+ }
+
+ if (PIM_DEBUG_SSMPINGD) {
+ zlog_debug(
+ "%s: recv ssmping from %pSUp, to %pSUp, on interface %s ifindex=%d fd=%d src=%pPA",
+ __func__, &from, &to, ifp ? ifp->name : "<iface?>",
+ ifindex, ss->sock_fd, &ss->source_addr);
+ }
+
+ buf[0] = PIM_SSMPINGD_REPLY;
+
+ /* unicast reply */
+ ssmpingd_sendto(ss, buf, len, from);
+
+ /* multicast reply */
+ memcpy(&from, &ss->pim->ssmpingd_group_addr, sizeof(pim_addr));
+ ssmpingd_sendto(ss, buf, len, from);
+
+ return 0;
+}
+
+static void ssmpingd_sock_read(struct event *t)
+{
+ struct ssmpingd_sock *ss;
+
+ ss = EVENT_ARG(t);
+
+ ssmpingd_read_msg(ss);
+
+ /* Keep reading */
+ ssmpingd_read_on(ss);
+}
+
+static void ssmpingd_read_on(struct ssmpingd_sock *ss)
+{
+ event_add_read(router->master, ssmpingd_sock_read, ss, ss->sock_fd,
+ &ss->t_sock_read);
+}
+
+static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
+ pim_addr source_addr)
+{
+ struct ssmpingd_sock *ss;
+ int sock_fd;
+
+ if (!pim->ssmpingd_list) {
+ pim->ssmpingd_list = list_new();
+ pim->ssmpingd_list->del = (void (*)(void *))ssmpingd_free;
+ }
+
+ sock_fd =
+ ssmpingd_socket(source_addr, /* port: */ 4321, /* mTTL: */ 64);
+ if (sock_fd < 0) {
+ zlog_warn("%s: ssmpingd_socket() failure for source %pPA",
+ __func__, &source_addr);
+ return 0;
+ }
+
+ ss = XCALLOC(MTYPE_PIM_SSMPINGD, sizeof(*ss));
+
+ ss->pim = pim;
+ ss->sock_fd = sock_fd;
+ ss->t_sock_read = NULL;
+ ss->source_addr = source_addr;
+ ss->creation = pim_time_monotonic_sec();
+ ss->requests = 0;
+
+ listnode_add(pim->ssmpingd_list, ss);
+
+ ssmpingd_read_on(ss);
+
+ return ss;
+}
+
+int pim_ssmpingd_start(struct pim_instance *pim, pim_addr source_addr)
+{
+ struct ssmpingd_sock *ss;
+
+ ss = ssmpingd_find(pim, source_addr);
+ if (ss) {
+ /* silently ignore request to recreate entry */
+ return 0;
+ }
+
+ zlog_info("%s: starting ssmpingd for source %pPAs", __func__,
+ &source_addr);
+
+ ss = ssmpingd_new(pim, source_addr);
+ if (!ss) {
+ zlog_warn("%s: ssmpingd_new() failure for source %pPAs",
+ __func__, &source_addr);
+ return -1;
+ }
+
+ return 0;
+}
+
+int pim_ssmpingd_stop(struct pim_instance *pim, pim_addr source_addr)
+{
+ struct ssmpingd_sock *ss;
+
+ ss = ssmpingd_find(pim, source_addr);
+ if (!ss) {
+ zlog_warn("%s: could not find ssmpingd for source %pPAs",
+ __func__, &source_addr);
+ return -1;
+ }
+
+ zlog_info("%s: stopping ssmpingd for source %pPAs", __func__,
+ &source_addr);
+
+ ssmpingd_delete(ss);
+
+ return 0;
+}
diff --git a/pimd/pim_ssmpingd.h b/pimd/pim_ssmpingd.h
new file mode 100644
index 0000000..71286e4
--- /dev/null
+++ b/pimd/pim_ssmpingd.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_SSMPINGD_H
+#define PIM_SSMPINGD_H
+
+#include <zebra.h>
+
+#include "if.h"
+
+#include "pim_iface.h"
+
+struct ssmpingd_sock {
+ struct pim_instance *pim;
+
+ int sock_fd; /* socket */
+ struct event *t_sock_read; /* thread for reading socket */
+ pim_addr source_addr; /* source address */
+ int64_t creation; /* timestamp of socket creation */
+ int64_t requests; /* counter */
+};
+
+void pim_ssmpingd_init(struct pim_instance *pim);
+void pim_ssmpingd_destroy(struct pim_instance *pim);
+int pim_ssmpingd_start(struct pim_instance *pim, pim_addr source_addr);
+int pim_ssmpingd_stop(struct pim_instance *pim, pim_addr source_addr);
+
+#endif /* PIM_SSMPINGD_H */
diff --git a/pimd/pim_static.c b/pimd/pim_static.c
new file mode 100644
index 0000000..b9effa2
--- /dev/null
+++ b/pimd/pim_static.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga: add the ability to configure multicast static routes
+ * Copyright (C) 2014 Nathan Bahr, ATCorp
+ */
+
+#include <zebra.h>
+
+#include "vty.h"
+#include "if.h"
+#include "log.h"
+#include "memory.h"
+#include "linklist.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_oil.h"
+#include "pim_static.h"
+#include "pim_time.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+
+void pim_static_route_free(struct static_route *s_route)
+{
+ XFREE(MTYPE_PIM_STATIC_ROUTE, s_route);
+}
+
+static struct static_route *static_route_alloc(void)
+{
+ return XCALLOC(MTYPE_PIM_STATIC_ROUTE, sizeof(struct static_route));
+}
+
+static struct static_route *static_route_new(ifindex_t iif, ifindex_t oif,
+ pim_addr group,
+ pim_addr source)
+{
+ struct static_route *s_route;
+ s_route = static_route_alloc();
+
+ s_route->group = group;
+ s_route->source = source;
+ s_route->iif = iif;
+ s_route->oif_ttls[oif] = 1;
+ s_route->c_oil.oil_ref_count = 1;
+ *oil_origin(&s_route->c_oil) = source;
+ *oil_mcastgrp(&s_route->c_oil) = group;
+ *oil_incoming_vif(&s_route->c_oil) = iif;
+ oil_if_set(&s_route->c_oil, oif, 1);
+ s_route->c_oil.oif_creation[oif] = pim_time_monotonic_sec();
+
+ return s_route;
+}
+
+
+int pim_static_add(struct pim_instance *pim, struct interface *iif,
+ struct interface *oif, pim_addr group, pim_addr source)
+{
+ struct listnode *node = NULL;
+ struct static_route *s_route = NULL;
+ struct static_route *original_s_route = NULL;
+ struct pim_interface *pim_iif = iif ? iif->info : NULL;
+ struct pim_interface *pim_oif = oif ? oif->info : NULL;
+ ifindex_t iif_index = pim_iif ? pim_iif->mroute_vif_index : 0;
+ ifindex_t oif_index = pim_oif ? pim_oif->mroute_vif_index : 0;
+
+ if (!iif_index || !oif_index || iif_index == -1 || oif_index == -1) {
+ zlog_warn(
+ "%s %s: Unable to add static route: Invalid interface index(iif=%d,oif=%d)",
+ __FILE__, __func__, iif_index, oif_index);
+ return -2;
+ }
+
+#ifdef PIM_ENFORCE_LOOPFREE_MFC
+ if (iif_index == oif_index) {
+ /* looped MFC entry */
+ zlog_warn(
+ "%s %s: Unable to add static route: Looped MFC entry(iif=%d,oif=%d)",
+ __FILE__, __func__, iif_index, oif_index);
+ return -4;
+ }
+#endif
+ if (iif->vrf->vrf_id != oif->vrf->vrf_id) {
+ return -3;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ if (!pim_addr_cmp(s_route->group, group) &&
+ !pim_addr_cmp(s_route->source, source) &&
+ (s_route->iif == iif_index)) {
+
+ if (s_route->oif_ttls[oif_index]) {
+ zlog_warn(
+ "%s %s: Unable to add static route: Route already exists (iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __FILE__, __func__, iif_index,
+ oif_index, &group, &source);
+ return -3;
+ }
+
+ /* Ok, from here on out we will be making changes to the
+ * s_route structure, but if
+ * for some reason we fail to commit these changes to
+ * the kernel, we want to be able
+ * restore the state of the list. So copy the node data
+ * and if need be, we can copy
+ * back if it fails.
+ */
+ original_s_route = static_route_alloc();
+ memcpy(original_s_route, s_route,
+ sizeof(struct static_route));
+
+ /* Route exists and has the same input interface, but
+ * adding a new output interface */
+ s_route->oif_ttls[oif_index] = 1;
+ oil_if_set(&s_route->c_oil, oif_index, 1);
+ s_route->c_oil.oif_creation[oif_index] =
+ pim_time_monotonic_sec();
+ ++s_route->c_oil.oil_ref_count;
+ break;
+ }
+ }
+
+ /* If node is null then we reached the end of the list without finding a
+ * match */
+ if (!node) {
+ s_route = static_route_new(iif_index, oif_index, group, source);
+ listnode_add(pim->static_routes, s_route);
+ }
+
+ s_route->c_oil.pim = pim;
+
+ if (pim_static_mroute_add(&s_route->c_oil, __func__)) {
+ zlog_warn(
+ "%s %s: Unable to add static route(iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __FILE__, __func__, iif_index, oif_index, &group,
+ &source);
+
+ /* Need to put s_route back to the way it was */
+ if (original_s_route) {
+ memcpy(s_route, original_s_route,
+ sizeof(struct static_route));
+ } else {
+ /* we never stored off a copy, so it must have been a
+ * fresh new route */
+ listnode_delete(pim->static_routes, s_route);
+ pim_static_route_free(s_route);
+ }
+
+ if (original_s_route) {
+ pim_static_route_free(original_s_route);
+ }
+
+ return -1;
+ }
+
+ /* Make sure we free the memory for the route copy if used */
+ if (original_s_route) {
+ pim_static_route_free(original_s_route);
+ }
+
+ if (PIM_DEBUG_STATIC) {
+ zlog_debug(
+ "%s: Static route added(iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __func__, iif_index, oif_index, &group,
+ &source);
+ }
+
+ return 0;
+}
+
+int pim_static_del(struct pim_instance *pim, struct interface *iif,
+ struct interface *oif, pim_addr group, pim_addr source)
+{
+ struct listnode *node = NULL;
+ struct listnode *nextnode = NULL;
+ struct static_route *s_route = NULL;
+ struct pim_interface *pim_iif = iif ? iif->info : 0;
+ struct pim_interface *pim_oif = oif ? oif->info : 0;
+ ifindex_t iif_index = pim_iif ? pim_iif->mroute_vif_index : 0;
+ ifindex_t oif_index = pim_oif ? pim_oif->mroute_vif_index : 0;
+
+ if (!iif_index || !oif_index) {
+ zlog_warn(
+ "%s %s: Unable to remove static route: Invalid interface index(iif=%d,oif=%d)",
+ __FILE__, __func__, iif_index, oif_index);
+ return -2;
+ }
+
+ for (ALL_LIST_ELEMENTS(pim->static_routes, node, nextnode, s_route)) {
+ if (s_route->iif == iif_index
+ && !pim_addr_cmp(s_route->group, group)
+ && !pim_addr_cmp(s_route->source, source)
+ && s_route->oif_ttls[oif_index]) {
+ s_route->oif_ttls[oif_index] = 0;
+ oil_if_set(&s_route->c_oil, oif_index, 0);
+ --s_route->c_oil.oil_ref_count;
+
+ /* If there are no more outputs then delete the whole
+ * route, otherwise set the route with the new outputs
+ */
+ if (s_route->c_oil.oil_ref_count <= 0
+ ? pim_mroute_del(&s_route->c_oil, __func__)
+ : pim_static_mroute_add(&s_route->c_oil,
+ __func__)) {
+ zlog_warn(
+ "%s %s: Unable to remove static route(iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __FILE__, __func__, iif_index,
+ oif_index, &group, &source);
+
+ s_route->oif_ttls[oif_index] = 1;
+ oil_if_set(&s_route->c_oil, oif_index, 1);
+ ++s_route->c_oil.oil_ref_count;
+
+ return -1;
+ }
+
+ s_route->c_oil.oif_creation[oif_index] = 0;
+
+ if (s_route->c_oil.oil_ref_count <= 0) {
+ listnode_delete(pim->static_routes, s_route);
+ pim_static_route_free(s_route);
+ }
+
+ if (PIM_DEBUG_STATIC) {
+ zlog_debug(
+ "%s: Static route removed(iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __func__, iif_index, oif_index,
+ &group, &source);
+ }
+
+ break;
+ }
+ }
+
+ if (!node) {
+ zlog_warn(
+ "%s %s: Unable to remove static route: Route does not exist(iif=%d,oif=%d,group=%pPAs,source=%pPAs)",
+ __FILE__, __func__, iif_index, oif_index, &group,
+ &source);
+ return -3;
+ }
+
+ return 0;
+}
+
+int pim_static_write_mroute(struct pim_instance *pim, struct vty *vty,
+ struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *node;
+ struct static_route *sroute;
+ int count = 0;
+
+ if (!pim_ifp)
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sroute)) {
+ if (sroute->iif == pim_ifp->mroute_vif_index) {
+ int i;
+ for (i = 0; i < MAXVIFS; i++)
+ if (sroute->oif_ttls[i]) {
+ struct interface *oifp =
+ pim_if_find_by_vif_index(pim,
+ i);
+ if (pim_addr_is_any(sroute->source))
+ vty_out(vty,
+ " " PIM_AF_NAME " mroute %s %pPA\n",
+ oifp->name, &sroute->group);
+ else
+ vty_out(vty,
+ " " PIM_AF_NAME " mroute %s %pPA %pPA\n",
+ oifp->name, &sroute->group,
+ &sroute->source);
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
diff --git a/pimd/pim_static.h b/pimd/pim_static.h
new file mode 100644
index 0000000..c868d02
--- /dev/null
+++ b/pimd/pim_static.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga: add the ability to configure multicast static routes
+ * Copyright (C) 2014 Nathan Bahr, ATCorp
+ */
+
+#ifndef PIM_STATIC_H_
+#define PIM_STATIC_H_
+
+#include <zebra.h>
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "if.h"
+
+struct static_route {
+ /* Each static route is unique by these pair of addresses */
+ pim_addr group;
+ pim_addr source;
+
+ struct channel_oil c_oil;
+ ifindex_t iif;
+ unsigned char oif_ttls[MAXVIFS];
+};
+
+void pim_static_route_free(struct static_route *s_route);
+
+int pim_static_add(struct pim_instance *pim, struct interface *iif,
+ struct interface *oif, pim_addr group, pim_addr source);
+int pim_static_del(struct pim_instance *pim, struct interface *iif,
+ struct interface *oif, pim_addr group, pim_addr source);
+int pim_static_write_mroute(struct pim_instance *pim, struct vty *vty,
+ struct interface *ifp);
+
+#endif /* PIM_STATIC_H_ */
diff --git a/pimd/pim_str.h b/pimd/pim_str.h
new file mode 100644
index 0000000..029a9f4
--- /dev/null
+++ b/pimd/pim_str.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_STR_H
+#define PIM_STR_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+
+#include "prefix.h"
+#include "pim_addr.h"
+
+#if PIM_IPV == 4
+/*
+ * Longest possible length of a IPV4 (S,G) string is 34 bytes
+ * 123.123.123.123 = 16 * 2
+ * (,) = 3
+ * NULL Character at end = 1
+ * (123.123.123.123,123.123.123.123)
+ */
+#define PIM_SG_LEN PREFIX_SG_STR_LEN
+#else
+/*
+ * Longest possible length of a IPV6 (S,G) string is 94 bytes
+ * INET6_ADDRSTRLEN * 2 = 46 * 2
+ * (,) = 3
+ * NULL Character at end = 1
+ */
+#define PIM_SG_LEN 96
+#endif
+
+#define pim_inet4_dump prefix_mcast_inet4_dump
+
+void pim_inet4_dump(const char *onfail, struct in_addr addr, char *buf,
+ int buf_size);
+
+#endif
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
new file mode 100644
index 0000000..4081786
--- /dev/null
+++ b/pimd/pim_tib.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TIB (Tree Information Base) - just PIM <> IGMP/MLD glue for now
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ */
+
+#include <zebra.h>
+
+#include "pim_tib.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_iface.h"
+#include "pim_upstream.h"
+#include "pim_oil.h"
+#include "pim_nht.h"
+
+static struct channel_oil *
+tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
+{
+ struct pim_interface *pim_oif = oif->info;
+ int input_iface_vif_index = 0;
+ pim_addr vif_source;
+ struct prefix grp;
+ struct pim_nexthop nexthop;
+ struct pim_upstream *up = NULL;
+
+ if (!pim_rp_set_upstream_addr(pim, &vif_source, sg.src, sg.grp)) {
+ /* no PIM RP - create a dummy channel oil */
+ return pim_channel_oil_add(pim, &sg, __func__);
+ }
+
+ pim_addr_to_prefix(&grp, sg.grp);
+
+ up = pim_upstream_find(pim, &sg);
+ if (up) {
+ memcpy(&nexthop, &up->rpf.source_nexthop,
+ sizeof(struct pim_nexthop));
+ (void)pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp,
+ 0);
+ if (nexthop.interface)
+ input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
+ pim, nexthop.interface->ifindex);
+ } else
+ input_iface_vif_index =
+ pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
+ __func__, &sg, &vif_source, input_iface_vif_index);
+
+ if (input_iface_vif_index < 1) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s %s: could not find input interface for %pSG",
+ __FILE__, __func__, &sg);
+
+ return pim_channel_oil_add(pim, &sg, __func__);
+ }
+
+ /*
+ * Protect IGMP against adding looped MFC entries created by both
+ * source and receiver attached to the same interface. See TODO T22.
+ * Block only when the intf is non DR DR must create upstream.
+ */
+ if ((input_iface_vif_index == pim_oif->mroute_vif_index) &&
+ !(PIM_I_am_DR(pim_oif))) {
+ /* ignore request for looped MFC entry */
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s: ignoring request for looped MFC entry (S,G)=%pSG: oif=%s vif_index=%d",
+ __func__, &sg, oif->name,
+ input_iface_vif_index);
+
+ return NULL;
+ }
+
+ return pim_channel_oil_add(pim, &sg, __func__);
+}
+
+bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp)
+{
+ struct pim_interface *pim_oif = oif->info;
+
+ if (!pim_oif) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug("%s: multicast not enabled on oif=%s?",
+ __func__, oif->name);
+ return false;
+ }
+
+ if (!*oilp)
+ *oilp = tib_sg_oil_setup(pim, sg, oif);
+ if (!*oilp)
+ return false;
+
+ if (PIM_I_am_DR(pim_oif) || PIM_I_am_DualActive(pim_oif)) {
+ int result;
+
+ result = pim_channel_add_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,
+ __func__);
+ if (result) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_warn("%s: add_oif() failed with return=%d",
+ __func__, result);
+ return false;
+ }
+ } else {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s: %pSG was received on %s interface but we are not DR for that interface",
+ __func__, &sg, oif->name);
+
+ return false;
+ }
+ /*
+ Feed IGMPv3-gathered local membership information into PIM
+ per-interface (S,G) state.
+ */
+ if (!pim_ifchannel_local_membership_add(oif, &sg, false /*is_vxlan*/)) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_warn(
+ "%s: Failure to add local membership for %pSG",
+ __func__, &sg);
+
+ pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,
+ __func__);
+ return false;
+ }
+
+ return true;
+}
+
+void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp)
+{
+ int result;
+
+ /*
+ It appears that in certain circumstances that
+ igmp_source_forward_stop is called when IGMP forwarding
+ was not enabled in oif_flags for this outgoing interface.
+ Possibly because of multiple calls. When that happens, we
+ enter the below if statement and this function returns early
+ which in turn triggers the calling function to assert.
+ Making the call to pim_channel_del_oif and ignoring the return code
+ fixes the issue without ill effect, similar to
+ pim_forward_stop below.
+ */
+ result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,
+ __func__);
+ if (result) {
+ if (PIM_DEBUG_GM_TRACE)
+ zlog_debug(
+ "%s: pim_channel_del_oif() failed with return=%d",
+ __func__, result);
+ return;
+ }
+
+ /*
+ Feed IGMPv3-gathered local membership information into PIM
+ per-interface (S,G) state.
+ */
+ pim_ifchannel_local_membership_del(oif, &sg);
+
+ pim_channel_oil_del(*oilp, __func__);
+}
diff --git a/pimd/pim_tib.h b/pimd/pim_tib.h
new file mode 100644
index 0000000..081ad90
--- /dev/null
+++ b/pimd/pim_tib.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TIB (Tree Information Base) - just PIM <> IGMP/MLD glue for now
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ */
+
+#ifndef _FRR_PIM_GLUE_H
+#define _FRR_PIM_GLUE_H
+
+#include "pim_addr.h"
+
+struct pim_instance;
+struct channel_oil;
+
+extern bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp);
+extern void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp);
+
+#endif /* _FRR_PIM_GLUE_H */
diff --git a/pimd/pim_time.c b/pimd/pim_time.c
new file mode 100644
index 0000000..205945e
--- /dev/null
+++ b/pimd/pim_time.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include <string.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include "log.h"
+#include "frrevent.h"
+#include "lib_errors.h"
+
+#include "pim_time.h"
+
+static int gettime_monotonic(struct timeval *tv)
+{
+ int result;
+
+ result = gettimeofday(tv, 0);
+ if (result) {
+ flog_err_sys(EC_LIB_SYSTEM_CALL,
+ "%s: gettimeofday() failure: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ }
+
+ return result;
+}
+
+/*
+ pim_time_monotonic_sec():
+ number of seconds since some unspecified starting point
+*/
+int64_t pim_time_monotonic_sec(void)
+{
+ struct timeval now_tv;
+
+ if (gettime_monotonic(&now_tv)) {
+ flog_err_sys(EC_LIB_SYSTEM_CALL,
+ "%s: gettime_monotonic() failure: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ return now_tv.tv_sec;
+}
+
+/*
+ pim_time_monotonic_dsec():
+ number of deciseconds since some unspecified starting point
+*/
+int64_t pim_time_monotonic_dsec(void)
+{
+ struct timeval now_tv;
+ int64_t now_dsec;
+
+ if (gettime_monotonic(&now_tv)) {
+ flog_err_sys(EC_LIB_SYSTEM_CALL,
+ "%s: gettime_monotonic() failure: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ now_dsec = ((int64_t)now_tv.tv_sec) * 10
+ + ((int64_t)now_tv.tv_usec) / 100000;
+
+ return now_dsec;
+}
+
+int64_t pim_time_monotonic_usec(void)
+{
+ struct timeval now_tv;
+ int64_t now_dsec;
+
+ if (gettime_monotonic(&now_tv)) {
+ flog_err_sys(EC_LIB_SYSTEM_CALL,
+ "%s: gettime_monotonic() failure: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ now_dsec =
+ ((int64_t)now_tv.tv_sec) * 1000000 + ((int64_t)now_tv.tv_usec);
+
+ return now_dsec;
+}
+
+int pim_time_mmss(char *buf, int buf_size, long sec)
+{
+ long mm;
+ int wr;
+
+ assert(buf_size >= 5);
+
+ mm = sec / 60;
+ sec %= 60;
+
+ wr = snprintf(buf, buf_size, "%02ld:%02ld", mm, sec);
+
+ return wr != 8;
+}
+
+static int pim_time_hhmmss(char *buf, int buf_size, long sec)
+{
+ long hh;
+ long mm;
+ int wr;
+
+ assert(buf_size >= 8);
+
+ hh = sec / 3600;
+ sec %= 3600;
+ mm = sec / 60;
+ sec %= 60;
+
+ wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec);
+
+ return wr != 8;
+}
+
+void pim_time_timer_to_mmss(char *buf, int buf_size, struct event *t_timer)
+{
+ if (t_timer) {
+ pim_time_mmss(buf, buf_size,
+ event_timer_remain_second(t_timer));
+ } else {
+ snprintf(buf, buf_size, "--:--");
+ }
+}
+
+void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
+{
+ if (t_timer) {
+ pim_time_hhmmss(buf, buf_size,
+ event_timer_remain_second(t_timer));
+ } else {
+ snprintf(buf, buf_size, "--:--:--");
+ }
+}
+
+void pim_time_uptime(char *buf, int buf_size, int64_t uptime_sec)
+{
+ assert(buf_size >= 8);
+
+ pim_time_hhmmss(buf, buf_size, uptime_sec);
+}
+
+void pim_time_uptime_begin(char *buf, int buf_size, int64_t now, int64_t begin)
+{
+ if (begin > 0)
+ pim_time_uptime(buf, buf_size, now - begin);
+ else
+ snprintf(buf, buf_size, "--:--:--");
+}
+
+long pim_time_timer_remain_msec(struct event *t_timer)
+{
+ /* no timer thread running means timer has expired: return 0 */
+
+ return t_timer ? event_timer_remain_msec(t_timer) : 0;
+}
diff --git a/pimd/pim_time.h b/pimd/pim_time.h
new file mode 100644
index 0000000..6c0e073
--- /dev/null
+++ b/pimd/pim_time.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_TIME_H
+#define PIM_TIME_H
+
+#include <stdint.h>
+
+#include <zebra.h>
+#include "frrevent.h"
+
+int64_t pim_time_monotonic_sec(void);
+int64_t pim_time_monotonic_dsec(void);
+int64_t pim_time_monotonic_usec(void);
+int pim_time_mmss(char *buf, int buf_size, long sec);
+void pim_time_timer_to_mmss(char *buf, int buf_size, struct event *t);
+void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct event *t);
+void pim_time_uptime(char *buf, int buf_size, int64_t uptime_sec);
+void pim_time_uptime_begin(char *buf, int buf_size, int64_t now, int64_t begin);
+long pim_time_timer_remain_msec(struct event *t_timer);
+
+#endif /* PIM_TIME_H */
diff --git a/pimd/pim_tlv.c b/pimd/pim_tlv.c
new file mode 100644
index 0000000..80d60b8
--- /dev/null
+++ b/pimd/pim_tlv.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "if.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_int.h"
+#include "pim_tlv.h"
+#include "pim_str.h"
+#include "pim_msg.h"
+#include "pim_iface.h"
+#include "pim_addr.h"
+
+#if PIM_IPV == 4
+#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV4
+#else
+#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV6
+#endif
+
+uint8_t *pim_tlv_append_uint16(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint16_t option_value)
+{
+ uint16_t option_len = 2;
+
+ if ((buf + PIM_TLV_OPTION_SIZE(option_len)) > buf_pastend)
+ return NULL;
+
+ *(uint16_t *)buf = htons(option_type);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_len);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_value);
+ buf += option_len;
+
+ return buf;
+}
+
+uint8_t *pim_tlv_append_2uint16(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint16_t option_value1,
+ uint16_t option_value2)
+{
+ uint16_t option_len = 4;
+
+ if ((buf + PIM_TLV_OPTION_SIZE(option_len)) > buf_pastend)
+ return NULL;
+
+ *(uint16_t *)buf = htons(option_type);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_len);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_value1);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_value2);
+ buf += 2;
+
+ return buf;
+}
+
+uint8_t *pim_tlv_append_uint32(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint32_t option_value)
+{
+ uint16_t option_len = 4;
+
+ if ((buf + PIM_TLV_OPTION_SIZE(option_len)) > buf_pastend)
+ return NULL;
+
+ *(uint16_t *)buf = htons(option_type);
+ buf += 2;
+ *(uint16_t *)buf = htons(option_len);
+ buf += 2;
+ pim_write_uint32(buf, option_value);
+ buf += option_len;
+
+ return buf;
+}
+
+#define ucast_ipv4_encoding_len (2 + sizeof(struct in_addr))
+#define ucast_ipv6_encoding_len (2 + sizeof(struct in6_addr))
+
+/*
+ * An Encoded-Unicast address takes the following format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Addr Family | Encoding Type | Unicast Address
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+...
+ *
+ * Addr Family
+ * The PIM address family of the 'Unicast Address' field of this
+ * address.
+ *
+ * Values 0-127 are as assigned by the IANA for Internet Address *
+ * Families in [7]. Values 128-250 are reserved to be assigned by
+ * the IANA for PIM-specific Address Families. Values 251 though
+ * 255 are designated for private use. As there is no assignment
+ * authority for this space, collisions should be expected.
+ *
+ * Encoding Type
+ * The type of encoding used within a specific Address Family. The
+ * value '0' is reserved for this field and represents the native
+ * encoding of the Address Family.
+ *
+ * Unicast Address
+ * The unicast address as represented by the given Address Family
+ * and Encoding Type.
+ */
+int pim_encode_addr_ucast(uint8_t *buf, pim_addr addr)
+{
+ uint8_t *start = buf;
+
+ *buf++ = PIM_MSG_ADDRESS_FAMILY;
+ *buf++ = 0;
+ memcpy(buf, &addr, sizeof(addr));
+ buf += sizeof(addr);
+
+ return buf - start;
+}
+
+int pim_encode_addr_ucast_prefix(uint8_t *buf, struct prefix *p)
+{
+ switch (p->family) {
+ case AF_INET:
+ *buf = PIM_MSG_ADDRESS_FAMILY_IPV4; /* notice: AF_INET !=
+ PIM_MSG_ADDRESS_FAMILY_IPV4
+ */
+ ++buf;
+ *buf = 0; /* ucast IPv4 native encoding type (RFC
+ 4601: 4.9.1) */
+ ++buf;
+ memcpy(buf, &p->u.prefix4, sizeof(struct in_addr));
+ return ucast_ipv4_encoding_len;
+ case AF_INET6:
+ *buf = PIM_MSG_ADDRESS_FAMILY_IPV6;
+ ++buf;
+ *buf = 0;
+ ++buf;
+ memcpy(buf, &p->u.prefix6, sizeof(struct in6_addr));
+ return ucast_ipv6_encoding_len;
+ default:
+ return 0;
+ }
+}
+
+#define group_ipv4_encoding_len (4 + sizeof(struct in_addr))
+
+/*
+ * Encoded-Group addresses take the following format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Addr Family | Encoding Type |B| Reserved |Z| Mask Len |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group multicast Address
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+...
+ *
+ * Addr Family
+ * Described above.
+ *
+ * Encoding Type
+ * Described above.
+ *
+ * [B]idirectional PIM
+ * Indicates the group range should use Bidirectional PIM [13].
+ * For PIM-SM defined in this specification, this bit MUST be zero.
+ *
+ * Reserved
+ * Transmitted as zero. Ignored upon receipt.
+ *
+ * Admin Scope [Z]one
+ * indicates the group range is an admin scope zone. This is used
+ * in the Bootstrap Router Mechanism [11] only. For all other
+ * purposes, this bit is set to zero and ignored on receipt.
+ *
+ * Mask Len
+ * The Mask length field is 8 bits. The value is the number of
+ * contiguous one bits that are left justified and used as a mask;
+ * when combined with the group address, it describes a range of
+ * groups. It is less than or equal to the address length in bits
+ * for the given Address Family and Encoding Type. If the message
+ * is sent for a single group, then the Mask length must equal the
+ * address length in bits for the given Address Family and Encoding
+ * Type (e.g., 32 for IPv4 native encoding, 128 for IPv6 native
+ * encoding).
+ *
+ * Group multicast Address
+ * Contains the group address.
+ */
+int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope,
+ pim_addr group)
+{
+ uint8_t *start = buf;
+ uint8_t flags = 0;
+
+ flags |= bidir << 8;
+ flags |= scope;
+
+ *buf++ = PIM_MSG_ADDRESS_FAMILY;
+ *buf++ = 0;
+ *buf++ = flags;
+ *buf++ = sizeof(group) * 8;
+ memcpy(buf, &group, sizeof(group));
+ buf += sizeof(group);
+
+ return buf - start;
+}
+
+uint8_t *pim_tlv_append_addrlist_ucast(uint8_t *buf, const uint8_t *buf_pastend,
+ struct interface *ifp, int family)
+{
+ struct listnode *node;
+ uint16_t option_len = 0;
+ uint8_t *curr;
+ size_t uel;
+ struct list *ifconnected = ifp->connected;
+ struct pim_interface *pim_ifp = ifp->info;
+ pim_addr addr;
+
+ node = listhead(ifconnected);
+
+ /* Empty address list ? */
+ if (!node) {
+ return buf;
+ }
+
+ if (family == AF_INET)
+ uel = ucast_ipv4_encoding_len;
+ else
+ uel = ucast_ipv6_encoding_len;
+
+ /* Scan secondary address list */
+ curr = buf + 4; /* skip T and L */
+ for (; node; node = listnextnode(node)) {
+ struct connected *ifc = listgetdata(node);
+ struct prefix *p = ifc->address;
+ int l_encode;
+
+ addr = pim_addr_from_prefix(p);
+ if (!pim_addr_cmp(pim_ifp->primary_address, addr))
+ /* don't add the primary address
+ * into the secondary address list */
+ continue;
+
+ if ((curr + uel) > buf_pastend)
+ return 0;
+
+ if (p->family != family)
+ continue;
+
+ l_encode = pim_encode_addr_ucast_prefix(curr, p);
+ curr += l_encode;
+ option_len += l_encode;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE_DETAIL) {
+ zlog_debug(
+ "%s: number of encoded secondary unicast IPv4 addresses: %zu",
+ __func__, option_len / uel);
+ }
+
+ if (option_len < 1) {
+ /* Empty secondary unicast IPv4 address list */
+ return buf;
+ }
+
+ /*
+ * Write T and L
+ */
+ *(uint16_t *)buf = htons(PIM_MSG_OPTION_TYPE_ADDRESS_LIST);
+ *(uint16_t *)(buf + 2) = htons(option_len);
+
+ return curr;
+}
+
+static int check_tlv_length(const char *label, const char *tlv_name,
+ const char *ifname, pim_addr src_addr,
+ int correct_len, int option_len)
+{
+ if (option_len != correct_len) {
+ zlog_warn(
+ "%s: PIM hello %s TLV with incorrect value size=%d correct=%d from %pPAs on interface %s",
+ label, tlv_name, option_len, correct_len, &src_addr,
+ ifname);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void check_tlv_redefinition_uint16(const char *label,
+ const char *tlv_name,
+ const char *ifname, pim_addr src_addr,
+ pim_hello_options options,
+ pim_hello_options opt_mask,
+ uint16_t new, uint16_t old)
+{
+ if (PIM_OPTION_IS_SET(options, opt_mask))
+ zlog_warn(
+ "%s: PIM hello TLV redefined %s=%u old=%u from %pPAs on interface %s",
+ label, tlv_name, new, old, &src_addr, ifname);
+}
+
+static void check_tlv_redefinition_uint32(const char *label,
+ const char *tlv_name,
+ const char *ifname, pim_addr src_addr,
+ pim_hello_options options,
+ pim_hello_options opt_mask,
+ uint32_t new, uint32_t old)
+{
+ if (PIM_OPTION_IS_SET(options, opt_mask))
+ zlog_warn(
+ "%s: PIM hello TLV redefined %s=%u old=%u from %pPAs on interface %s",
+ label, tlv_name, new, old, &src_addr, ifname);
+}
+
+static void check_tlv_redefinition_uint32_hex(
+ const char *label, const char *tlv_name, const char *ifname,
+ pim_addr src_addr, pim_hello_options options,
+ pim_hello_options opt_mask, uint32_t new, uint32_t old)
+{
+ if (PIM_OPTION_IS_SET(options, opt_mask))
+ zlog_warn(
+ "%s: PIM hello TLV redefined %s=%08x old=%08x from %pPAs on interface %s",
+ label, tlv_name, new, old, &src_addr, ifname);
+}
+
+int pim_tlv_parse_holdtime(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint16_t *hello_option_holdtime, uint16_t option_len,
+ const uint8_t *tlv_curr)
+{
+ const char *label = "holdtime";
+
+ if (check_tlv_length(__func__, label, ifname, src_addr,
+ sizeof(uint16_t), option_len)) {
+ return -1;
+ }
+
+ check_tlv_redefinition_uint16(__func__, label, ifname, src_addr,
+ *hello_options, PIM_OPTION_MASK_HOLDTIME,
+ PIM_TLV_GET_HOLDTIME(tlv_curr),
+ *hello_option_holdtime);
+
+ PIM_OPTION_SET(*hello_options, PIM_OPTION_MASK_HOLDTIME);
+
+ *hello_option_holdtime = PIM_TLV_GET_HOLDTIME(tlv_curr);
+
+ return 0;
+}
+
+int pim_tlv_parse_lan_prune_delay(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint16_t *hello_option_propagation_delay,
+ uint16_t *hello_option_override_interval,
+ uint16_t option_len, const uint8_t *tlv_curr)
+{
+ if (check_tlv_length(__func__, "lan_prune_delay", ifname, src_addr,
+ sizeof(uint32_t), option_len)) {
+ return -1;
+ }
+
+ check_tlv_redefinition_uint16(__func__, "propagation_delay", ifname,
+ src_addr, *hello_options,
+ PIM_OPTION_MASK_LAN_PRUNE_DELAY,
+ PIM_TLV_GET_PROPAGATION_DELAY(tlv_curr),
+ *hello_option_propagation_delay);
+
+ PIM_OPTION_SET(*hello_options, PIM_OPTION_MASK_LAN_PRUNE_DELAY);
+
+ *hello_option_propagation_delay =
+ PIM_TLV_GET_PROPAGATION_DELAY(tlv_curr);
+ if (PIM_TLV_GET_CAN_DISABLE_JOIN_SUPPRESSION(tlv_curr)) {
+ PIM_OPTION_SET(*hello_options,
+ PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION);
+ } else {
+ PIM_OPTION_UNSET(*hello_options,
+ PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION);
+ }
+ ++tlv_curr;
+ ++tlv_curr;
+ *hello_option_override_interval =
+ PIM_TLV_GET_OVERRIDE_INTERVAL(tlv_curr);
+
+ return 0;
+}
+
+int pim_tlv_parse_dr_priority(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint32_t *hello_option_dr_priority,
+ uint16_t option_len, const uint8_t *tlv_curr)
+{
+ const char *label = "dr_priority";
+
+ if (check_tlv_length(__func__, label, ifname, src_addr,
+ sizeof(uint32_t), option_len)) {
+ return -1;
+ }
+
+ check_tlv_redefinition_uint32(
+ __func__, label, ifname, src_addr, *hello_options,
+ PIM_OPTION_MASK_DR_PRIORITY, PIM_TLV_GET_DR_PRIORITY(tlv_curr),
+ *hello_option_dr_priority);
+
+ PIM_OPTION_SET(*hello_options, PIM_OPTION_MASK_DR_PRIORITY);
+
+ *hello_option_dr_priority = PIM_TLV_GET_DR_PRIORITY(tlv_curr);
+
+ return 0;
+}
+
+int pim_tlv_parse_generation_id(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint32_t *hello_option_generation_id,
+ uint16_t option_len, const uint8_t *tlv_curr)
+{
+ const char *label = "generation_id";
+
+ if (check_tlv_length(__func__, label, ifname, src_addr,
+ sizeof(uint32_t), option_len)) {
+ return -1;
+ }
+
+ check_tlv_redefinition_uint32_hex(__func__, label, ifname, src_addr,
+ *hello_options,
+ PIM_OPTION_MASK_GENERATION_ID,
+ PIM_TLV_GET_GENERATION_ID(tlv_curr),
+ *hello_option_generation_id);
+
+ PIM_OPTION_SET(*hello_options, PIM_OPTION_MASK_GENERATION_ID);
+
+ *hello_option_generation_id = PIM_TLV_GET_GENERATION_ID(tlv_curr);
+
+ return 0;
+}
+
+int pim_parse_addr_ucast_prefix(struct prefix *p, const uint8_t *buf,
+ int buf_size)
+{
+ const int ucast_encoding_min_len = 3; /* 1 family + 1 type + 1 addr */
+ const uint8_t *addr;
+ const uint8_t *pastend;
+ int family;
+ int type;
+
+ if (buf_size < ucast_encoding_min_len) {
+ zlog_warn(
+ "%s: unicast address encoding overflow: left=%d needed=%d",
+ __func__, buf_size, ucast_encoding_min_len);
+ return -1;
+ }
+
+ addr = buf;
+ pastend = buf + buf_size;
+
+ family = *addr++;
+ type = *addr++;
+
+ if (type) {
+ zlog_warn("%s: unknown unicast address encoding type=%d",
+ __func__, type);
+ return -2;
+ }
+
+ switch (family) {
+ case PIM_MSG_ADDRESS_FAMILY_IPV4:
+ if ((addr + sizeof(struct in_addr)) > pastend) {
+ zlog_warn(
+ "%s: IPv4 unicast address overflow: left=%td needed=%zu",
+ __func__, pastend - addr,
+ sizeof(struct in_addr));
+ return -3;
+ }
+
+ p->family = AF_INET; /* notice: AF_INET !=
+ PIM_MSG_ADDRESS_FAMILY_IPV4 */
+ memcpy(&p->u.prefix4, addr, sizeof(struct in_addr));
+ p->prefixlen = IPV4_MAX_BITLEN;
+ addr += sizeof(struct in_addr);
+
+ break;
+ case PIM_MSG_ADDRESS_FAMILY_IPV6:
+ if ((addr + sizeof(struct in6_addr)) > pastend) {
+ zlog_warn(
+ "%s: IPv6 unicast address overflow: left=%td needed %zu",
+ __func__, pastend - addr,
+ sizeof(struct in6_addr));
+ return -3;
+ }
+
+ p->family = AF_INET6;
+ p->prefixlen = IPV6_MAX_BITLEN;
+ memcpy(&p->u.prefix6, addr, sizeof(struct in6_addr));
+ addr += sizeof(struct in6_addr);
+
+ break;
+ default: {
+ zlog_warn("%s: unknown unicast address encoding family=%d from",
+ __func__, family);
+ return -4;
+ }
+ }
+
+ return addr - buf;
+}
+
+int pim_parse_addr_ucast(pim_addr *out, const uint8_t *buf, int buf_size,
+ bool *wrong_af)
+{
+ struct prefix p;
+ int ret;
+
+ ret = pim_parse_addr_ucast_prefix(&p, buf, buf_size);
+ if (ret < 0)
+ return ret;
+
+ if (p.family != PIM_AF) {
+ *wrong_af = true;
+ return -5;
+ }
+
+ memcpy(out, &p.u.val, sizeof(*out));
+ return ret;
+}
+
+int pim_parse_addr_group(pim_sgaddr *sg, const uint8_t *buf, int buf_size)
+{
+ const int grp_encoding_min_len =
+ 4; /* 1 family + 1 type + 1 reserved + 1 addr */
+ const uint8_t *addr;
+ const uint8_t *pastend;
+ int family;
+ int type;
+ int mask_len;
+
+ if (buf_size < grp_encoding_min_len) {
+ zlog_warn(
+ "%s: group address encoding overflow: left=%d needed=%d",
+ __func__, buf_size, grp_encoding_min_len);
+ return -1;
+ }
+
+ addr = buf;
+ pastend = buf + buf_size;
+
+ family = *addr++;
+ type = *addr++;
+ ++addr; /* skip b_reserved_z fields */
+ mask_len = *addr++;
+
+ if (type) {
+ zlog_warn("%s: unknown group address encoding type=%d from",
+ __func__, type);
+ return -2;
+ }
+
+ if (family != PIM_MSG_ADDRESS_FAMILY) {
+ zlog_warn(
+ "%s: unknown group address encoding family=%d mask_len=%d from",
+ __func__, family, mask_len);
+ return -4;
+ }
+
+ if ((addr + sizeof(sg->grp)) > pastend) {
+ zlog_warn(
+ "%s: group address overflow: left=%td needed=%zu from",
+ __func__, pastend - addr, sizeof(sg->grp));
+ return -3;
+ }
+
+ memcpy(&sg->grp, addr, sizeof(sg->grp));
+ addr += sizeof(sg->grp);
+
+ return addr - buf;
+}
+
+int pim_parse_addr_source(pim_sgaddr *sg, uint8_t *flags, const uint8_t *buf,
+ int buf_size)
+{
+ const int src_encoding_min_len =
+ 4; /* 1 family + 1 type + 1 reserved + 1 addr */
+ const uint8_t *addr;
+ const uint8_t *pastend;
+ int family;
+ int type;
+ int mask_len;
+
+ if (buf_size < src_encoding_min_len) {
+ zlog_warn(
+ "%s: source address encoding overflow: left=%d needed=%d",
+ __func__, buf_size, src_encoding_min_len);
+ return -1;
+ }
+
+ addr = buf;
+ pastend = buf + buf_size;
+
+ family = *addr++;
+ type = *addr++;
+ *flags = *addr++;
+ mask_len = *addr++;
+
+ if (type) {
+ zlog_warn(
+ "%s: unknown source address encoding type=%d: %02x%02x%02x%02x",
+ __func__, type, buf[0], buf[1], buf[2], buf[3]);
+ return -2;
+ }
+
+ switch (family) {
+ case PIM_MSG_ADDRESS_FAMILY:
+ if ((addr + sizeof(sg->src)) > pastend) {
+ zlog_warn(
+ "%s: IP source address overflow: left=%td needed=%zu",
+ __func__, pastend - addr, sizeof(sg->src));
+ return -3;
+ }
+
+ memcpy(&sg->src, addr, sizeof(sg->src));
+
+ /*
+ RFC 4601: 4.9.1 Encoded Source and Group Address Formats
+
+ Encoded-Source Address
+
+ The mask length MUST be equal to the mask length in bits for
+ the given Address Family and Encoding Type (32 for IPv4
+ native and 128 for IPv6 native). A router SHOULD ignore any
+ messages received with any other mask length.
+ */
+ if (mask_len != PIM_MAX_BITLEN) {
+ zlog_warn("%s: IP bad source address mask: %d",
+ __func__, mask_len);
+ return -4;
+ }
+
+ addr += sizeof(sg->src);
+
+ break;
+ default:
+ zlog_warn(
+ "%s: unknown source address encoding family=%d: %02x%02x%02x%02x",
+ __func__, family, buf[0], buf[1], buf[2], buf[3]);
+ return -5;
+ }
+
+ return addr - buf;
+}
+
+#define FREE_ADDR_LIST(hello_option_addr_list) \
+ { \
+ if (hello_option_addr_list) { \
+ list_delete(&hello_option_addr_list); \
+ hello_option_addr_list = 0; \
+ } \
+ }
+
+int pim_tlv_parse_addr_list(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ struct list **hello_option_addr_list,
+ uint16_t option_len, const uint8_t *tlv_curr)
+{
+ const uint8_t *addr;
+ const uint8_t *pastend;
+
+ assert(hello_option_addr_list);
+
+ /*
+ Scan addr list
+ */
+ addr = tlv_curr;
+ pastend = tlv_curr + option_len;
+ while (addr < pastend) {
+ struct prefix tmp, src_pfx;
+ int addr_offset;
+
+ /*
+ Parse ucast addr
+ */
+ addr_offset =
+ pim_parse_addr_ucast_prefix(&tmp, addr, pastend - addr);
+ if (addr_offset < 1) {
+ zlog_warn(
+ "%s: pim_parse_addr_ucast() failure: from %pPAs on %s",
+ __func__, &src_addr, ifname);
+ FREE_ADDR_LIST(*hello_option_addr_list);
+ return -1;
+ }
+ addr += addr_offset;
+
+ /*
+ Debug
+ */
+ if (PIM_DEBUG_PIM_TRACE) {
+ switch (tmp.family) {
+ case AF_INET: {
+ char addr_str[INET_ADDRSTRLEN];
+ pim_inet4_dump("<addr?>", tmp.u.prefix4,
+ addr_str, sizeof(addr_str));
+ zlog_debug(
+ "%s: PIM hello TLV option: list_old_size=%d IPv4 address %s from %pPAs on %s",
+ __func__,
+ *hello_option_addr_list
+ ? ((int)listcount(
+ *hello_option_addr_list))
+ : -1,
+ addr_str, &src_addr, ifname);
+ } break;
+ case AF_INET6:
+ break;
+ default:
+ zlog_debug(
+ "%s: PIM hello TLV option: list_old_size=%d UNKNOWN address family from %pPAs on %s",
+ __func__,
+ *hello_option_addr_list
+ ? ((int)listcount(
+ *hello_option_addr_list))
+ : -1,
+ &src_addr, ifname);
+ }
+ }
+
+ /*
+ Exclude neighbor's primary address if incorrectly included in
+ the secondary address list
+ */
+ pim_addr_to_prefix(&src_pfx, src_addr);
+ if (!prefix_cmp(&tmp, &src_pfx)) {
+ zlog_warn(
+ "%s: ignoring primary address in secondary list from %pPAs on %s",
+ __func__, &src_addr, ifname);
+ continue;
+ }
+
+ /*
+ Allocate list if needed
+ */
+ if (!*hello_option_addr_list) {
+ *hello_option_addr_list = list_new();
+ (*hello_option_addr_list)->del = prefix_free_lists;
+ }
+
+ /*
+ Attach addr to list
+ */
+ {
+ struct prefix *p;
+ p = prefix_new();
+ prefix_copy(p, &tmp);
+ listnode_add(*hello_option_addr_list, p);
+ }
+
+ } /* while (addr < pastend) */
+
+ /*
+ Mark hello option
+ */
+ PIM_OPTION_SET(*hello_options, PIM_OPTION_MASK_ADDRESS_LIST);
+
+ return 0;
+}
diff --git a/pimd/pim_tlv.h b/pimd/pim_tlv.h
new file mode 100644
index 0000000..ea2af64
--- /dev/null
+++ b/pimd/pim_tlv.h
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_TLV_H
+#define PIM_TLV_H
+
+#include <zebra.h>
+
+#include "config.h"
+#include "if.h"
+#include "linklist.h"
+
+#define PIM_MSG_OPTION_TYPE_HOLDTIME (1)
+#define PIM_MSG_OPTION_TYPE_LAN_PRUNE_DELAY (2)
+#define PIM_MSG_OPTION_TYPE_DR_PRIORITY (19)
+#define PIM_MSG_OPTION_TYPE_GENERATION_ID (20)
+#define PIM_MSG_OPTION_TYPE_DM_STATE_REFRESH (21)
+#define PIM_MSG_OPTION_TYPE_ADDRESS_LIST (24)
+
+typedef uint32_t pim_hello_options;
+#define PIM_OPTION_MASK_HOLDTIME (1 << 0) /* recv holdtime */
+#define PIM_OPTION_MASK_LAN_PRUNE_DELAY (1 << 1) /* recv lan_prune_delay */
+#define PIM_OPTION_MASK_DR_PRIORITY (1 << 2) /* recv dr_priority */
+#define PIM_OPTION_MASK_GENERATION_ID (1 << 3) /* recv generation_id */
+#define PIM_OPTION_MASK_ADDRESS_LIST (1 << 4) /* recv secondary address list */
+#define PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION (1 << 5) /* T bit value (valid if recv lan_prune_delay) */
+
+#define PIM_RPT_BIT_MASK (1 << 0)
+#define PIM_WILDCARD_BIT_MASK (1 << 1)
+
+#define PIM_OPTION_SET(options, option_mask) ((options) |= (option_mask))
+#define PIM_OPTION_UNSET(options, option_mask) ((options) &= ~(option_mask))
+#define PIM_OPTION_IS_SET(options, option_mask) ((options) & (option_mask))
+
+#define PIM_TLV_GET_UINT16(buf) \
+ ({ \
+ uint16_t _tmp; \
+ memcpy(&_tmp, (buf), sizeof(uint16_t)); \
+ ntohs(_tmp); \
+ })
+#define PIM_TLV_GET_UINT32(buf) \
+ ({ \
+ uint32_t _tmp; \
+ memcpy(&_tmp, (buf), sizeof(uint32_t)); \
+ ntohl(_tmp); \
+ })
+#define PIM_TLV_GET_TYPE(buf) PIM_TLV_GET_UINT16(buf)
+#define PIM_TLV_GET_LENGTH(buf) PIM_TLV_GET_UINT16(buf)
+#define PIM_TLV_GET_HOLDTIME(buf) PIM_TLV_GET_UINT16(buf)
+#define PIM_TLV_GET_PROPAGATION_DELAY(buf) (PIM_TLV_GET_UINT16(buf) & 0x7FFF)
+#define PIM_TLV_GET_OVERRIDE_INTERVAL(buf) PIM_TLV_GET_UINT16(buf)
+#define PIM_TLV_GET_CAN_DISABLE_JOIN_SUPPRESSION(buf) ((*(const uint8_t *)(buf)) & 0x80)
+#define PIM_TLV_GET_DR_PRIORITY(buf) PIM_TLV_GET_UINT32(buf)
+#define PIM_TLV_GET_GENERATION_ID(buf) PIM_TLV_GET_UINT32(buf)
+
+#define PIM_TLV_TYPE_SIZE (2)
+#define PIM_TLV_LENGTH_SIZE (2)
+#define PIM_TLV_MIN_SIZE (PIM_TLV_TYPE_SIZE + PIM_TLV_LENGTH_SIZE)
+#define PIM_TLV_OPTION_SIZE(option_len) (PIM_TLV_MIN_SIZE + (option_len))
+
+uint8_t *pim_tlv_append_uint16(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint16_t option_value);
+uint8_t *pim_tlv_append_2uint16(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint16_t option_value1,
+ uint16_t option_value2);
+uint8_t *pim_tlv_append_uint32(uint8_t *buf, const uint8_t *buf_pastend,
+ uint16_t option_type, uint32_t option_value);
+uint8_t *pim_tlv_append_addrlist_ucast(uint8_t *buf, const uint8_t *buf_pastend,
+ struct interface *ifp, int family);
+
+int pim_tlv_parse_holdtime(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint16_t *hello_option_holdtime, uint16_t option_len,
+ const uint8_t *tlv_curr);
+int pim_tlv_parse_lan_prune_delay(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint16_t *hello_option_propagation_delay,
+ uint16_t *hello_option_override_interval,
+ uint16_t option_len, const uint8_t *tlv_curr);
+int pim_tlv_parse_dr_priority(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint32_t *hello_option_dr_priority,
+ uint16_t option_len, const uint8_t *tlv_curr);
+int pim_tlv_parse_generation_id(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ uint32_t *hello_option_generation_id,
+ uint16_t option_len, const uint8_t *tlv_curr);
+int pim_tlv_parse_addr_list(const char *ifname, pim_addr src_addr,
+ pim_hello_options *hello_options,
+ struct list **hello_option_addr_list,
+ uint16_t option_len, const uint8_t *tlv_curr);
+
+int pim_encode_addr_ucast(uint8_t *buf, pim_addr addr);
+int pim_encode_addr_ucast_prefix(uint8_t *buf, struct prefix *p);
+int pim_encode_addr_group(uint8_t *buf, afi_t afi, int bidir, int scope,
+ pim_addr group);
+
+int pim_parse_addr_ucast(pim_addr *out, const uint8_t *buf, int buf_size,
+ bool *wrong_af);
+int pim_parse_addr_ucast_prefix(struct prefix *out, const uint8_t *buf,
+ int buf_size);
+int pim_parse_addr_group(pim_sgaddr *sg, const uint8_t *buf, int buf_size);
+int pim_parse_addr_source(pim_sgaddr *sg, uint8_t *flags, const uint8_t *buf,
+ int buf_size);
+
+#endif /* PIM_TLV_H */
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
new file mode 100644
index 0000000..e36bd82
--- /dev/null
+++ b/pimd/pim_upstream.c
@@ -0,0 +1,2174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "zclient.h"
+#include "memory.h"
+#include "frrevent.h"
+#include "linklist.h"
+#include "vty.h"
+#include "plist.h"
+#include "hash.h"
+#include "jhash.h"
+#include "wheel.h"
+#include "network.h"
+
+#include "pimd.h"
+#include "pim_pim.h"
+#include "pim_str.h"
+#include "pim_time.h"
+#include "pim_iface.h"
+#include "pim_join.h"
+#include "pim_zlookup.h"
+#include "pim_upstream.h"
+#include "pim_ifchannel.h"
+#include "pim_neighbor.h"
+#include "pim_rpf.h"
+#include "pim_zebra.h"
+#include "pim_oil.h"
+#include "pim_macro.h"
+#include "pim_rp.h"
+#include "pim_register.h"
+#include "pim_msdp.h"
+#include "pim_jp_agg.h"
+#include "pim_nht.h"
+#include "pim_ssm.h"
+#include "pim_vxlan.h"
+#include "pim_mlag.h"
+
+static void join_timer_stop(struct pim_upstream *up);
+static void
+pim_upstream_update_assert_tracking_desired(struct pim_upstream *up);
+static bool pim_upstream_sg_running_proc(struct pim_upstream *up);
+
+/*
+ * A (*,G) or a (*,*) is going away
+ * remove the parent pointer from
+ * those pointing at us
+ */
+static void pim_upstream_remove_children(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct pim_upstream *child;
+
+ if (!up->sources)
+ return;
+
+ while (!list_isempty(up->sources)) {
+ child = listnode_head(up->sources);
+ listnode_delete(up->sources, child);
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_LHR(child->flags)) {
+ PIM_UPSTREAM_FLAG_UNSET_SRC_LHR(child->flags);
+ child = pim_upstream_del(pim, child, __func__);
+ }
+ if (child) {
+ child->parent = NULL;
+ if (PIM_UPSTREAM_FLAG_TEST_USE_RPT(child->flags))
+ pim_upstream_mroute_iif_update(
+ child->channel_oil,
+ __func__);
+ }
+ }
+ list_delete(&up->sources);
+}
+
+/*
+ * A (*,G) or a (*,*) is being created
+ * Find the children that would point
+ * at us.
+ */
+static void pim_upstream_find_new_children(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct pim_upstream *child;
+
+ if (!pim_addr_is_any(up->sg.src) && !pim_addr_is_any(up->sg.grp))
+ return;
+
+ if (pim_addr_is_any(up->sg.src) && pim_addr_is_any(up->sg.grp))
+ return;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, child) {
+ if (!pim_addr_is_any(up->sg.grp) &&
+ !pim_addr_cmp(child->sg.grp, up->sg.grp) && (child != up)) {
+ child->parent = up;
+ listnode_add_sort(up->sources, child);
+ if (PIM_UPSTREAM_FLAG_TEST_USE_RPT(child->flags))
+ pim_upstream_mroute_iif_update(
+ child->channel_oil,
+ __func__);
+ }
+ }
+}
+
+/*
+ * If we have a (*,*) || (S,*) there is no parent
+ * If we have a (S,G), find the (*,G)
+ * If we have a (*,G), find the (*,*)
+ */
+static struct pim_upstream *pim_upstream_find_parent(struct pim_instance *pim,
+ struct pim_upstream *child)
+{
+ pim_sgaddr any = child->sg;
+ struct pim_upstream *up = NULL;
+
+ // (S,G)
+ if (!pim_addr_is_any(child->sg.src) &&
+ !pim_addr_is_any(child->sg.grp)) {
+ any.src = PIMADDR_ANY;
+ up = pim_upstream_find(pim, &any);
+
+ if (up)
+ listnode_add(up->sources, child);
+
+ /*
+ * In case parent is MLAG entry copy the data to child
+ */
+ if (up && PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags)) {
+ PIM_UPSTREAM_FLAG_SET_MLAG_INTERFACE(child->flags);
+ if (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))
+ PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(child->flags);
+ else
+ PIM_UPSTREAM_FLAG_UNSET_MLAG_NON_DF(
+ child->flags);
+ }
+
+ return up;
+ }
+
+ return NULL;
+}
+
+static void upstream_channel_oil_detach(struct pim_upstream *up)
+{
+ struct channel_oil *channel_oil = up->channel_oil;
+
+ if (channel_oil) {
+ /* Detaching from channel_oil, channel_oil may exist post del,
+ but upstream would not keep reference of it
+ */
+ channel_oil->up = NULL;
+ up->channel_oil = NULL;
+
+ /* attempt to delete channel_oil; if channel_oil is being held
+ * because of other references cleanup info such as "Mute"
+ * inferred from the parent upstream
+ */
+ pim_channel_oil_upstream_deref(channel_oil);
+ }
+
+}
+
+static void pim_upstream_timers_stop(struct pim_upstream *up)
+{
+ EVENT_OFF(up->t_ka_timer);
+ EVENT_OFF(up->t_rs_timer);
+ EVENT_OFF(up->t_msdp_reg_timer);
+ EVENT_OFF(up->t_join_timer);
+}
+
+struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
+ struct pim_upstream *up, const char *name)
+{
+ struct listnode *node, *nnode;
+ struct pim_ifchannel *ch;
+ bool notify_msdp = false;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s(%s): Delete %s[%s] ref count: %d, flags: %d c_oil ref count %d (Pre decrement)",
+ __func__, name, up->sg_str, pim->vrf->name,
+ up->ref_count, up->flags,
+ up->channel_oil->oil_ref_count);
+
+ assert(up->ref_count > 0);
+
+ --up->ref_count;
+
+ if (up->ref_count >= 1)
+ return up;
+
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("pim_upstream free vrf:%s %s flags 0x%x",
+ pim->vrf->name, up->sg_str, up->flags);
+
+ if (pim_up_mlag_is_local(up))
+ pim_mlag_up_local_del(pim, up);
+
+ pim_upstream_timers_stop(up);
+
+ if (up->join_state == PIM_UPSTREAM_JOINED) {
+ pim_jp_agg_single_upstream_send(&up->rpf, up, 0);
+
+ if (pim_addr_is_any(up->sg.src)) {
+ /* if a (*, G) entry in the joined state is being
+ * deleted we
+ * need to notify MSDP */
+ notify_msdp = true;
+ }
+ }
+
+ join_timer_stop(up);
+ pim_jp_agg_upstream_verification(up, false);
+ up->rpf.source_nexthop.interface = NULL;
+
+ if (!pim_addr_is_any(up->sg.src)) {
+ if (pim->upstream_sg_wheel)
+ wheel_remove_item(pim->upstream_sg_wheel, up);
+ notify_msdp = true;
+ }
+
+ pim_mroute_del(up->channel_oil, __func__);
+ upstream_channel_oil_detach(up);
+
+ for (ALL_LIST_ELEMENTS(up->ifchannels, node, nnode, ch))
+ pim_ifchannel_delete(ch);
+ list_delete(&up->ifchannels);
+
+ pim_upstream_remove_children(pim, up);
+ if (up->sources)
+ list_delete(&up->sources);
+
+ if (up->parent && up->parent->sources)
+ listnode_delete(up->parent->sources, up);
+ up->parent = NULL;
+
+ rb_pim_upstream_del(&pim->upstream_head, up);
+
+ if (notify_msdp) {
+ pim_msdp_up_del(pim, &up->sg);
+ }
+
+ /* When RP gets deleted, pim_rp_del() deregister addr with Zebra NHT
+ * and assign up->upstream_addr as INADDR_ANY.
+ * So before de-registering the upstream address, check if is not equal
+ * to INADDR_ANY. This is done in order to avoid de-registering for
+ * 255.255.255.255 which is maintained for some reason..
+ */
+ if (!pim_addr_is_any(up->upstream_addr)) {
+ /* Deregister addr with Zebra NHT */
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+ __func__, up->sg_str, &up->upstream_addr);
+ pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL);
+ }
+
+ XFREE(MTYPE_PIM_UPSTREAM, up);
+
+ return NULL;
+}
+
+void pim_upstream_send_join(struct pim_upstream *up)
+{
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: RPF'%s=%pPA(%s) for Interface %s", __func__,
+ up->sg_str, &up->rpf.rpf_addr,
+ pim_upstream_state2str(up->join_state),
+ up->rpf.source_nexthop.interface->name);
+ if (pim_rpf_addr_is_inaddr_any(&up->rpf)) {
+ zlog_debug("%s: can't send join upstream: RPF'%s=%pPA",
+ __func__, up->sg_str, &up->rpf.rpf_addr);
+ /* warning only */
+ }
+ }
+
+ /* send Join(S,G) to the current upstream neighbor */
+ pim_jp_agg_single_upstream_send(&up->rpf, up, 1 /* join */);
+}
+
+static void on_join_timer(struct event *t)
+{
+ struct pim_upstream *up;
+
+ up = EVENT_ARG(t);
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return;
+ }
+
+ /*
+ * In the case of a HFR we will not ahve anyone to send this to.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
+ return;
+
+ /*
+ * Don't send the join if the outgoing interface is a loopback
+ * But since this might change leave the join timer running
+ */
+ if (up->rpf.source_nexthop
+ .interface && !if_is_loopback(up->rpf.source_nexthop.interface))
+ pim_upstream_send_join(up);
+
+ join_timer_start(up);
+}
+
+static void join_timer_stop(struct pim_upstream *up)
+{
+ struct pim_neighbor *nbr = NULL;
+
+ EVENT_OFF(up->t_join_timer);
+
+ if (up->rpf.source_nexthop.interface)
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr, true);
+
+ if (nbr)
+ pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
+
+ pim_jp_agg_upstream_verification(up, false);
+}
+
+void join_timer_start(struct pim_upstream *up)
+{
+ struct pim_neighbor *nbr = NULL;
+
+ if (up->rpf.source_nexthop.interface) {
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr, true);
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug(
+ "%s: starting %d sec timer for upstream (S,G)=%s",
+ __func__, router->t_periodic, up->sg_str);
+ }
+ }
+
+ if (nbr)
+ pim_jp_agg_add_group(nbr->upstream_jp_agg, up, 1, nbr);
+ else {
+ EVENT_OFF(up->t_join_timer);
+ event_add_timer(router->master, on_join_timer, up,
+ router->t_periodic, &up->t_join_timer);
+ }
+ pim_jp_agg_upstream_verification(up, true);
+}
+
+/*
+ * This is only called when we are switching the upstream
+ * J/P from one neighbor to another
+ *
+ * As such we need to remove from the old list and
+ * add to the new list.
+ */
+void pim_upstream_join_timer_restart(struct pim_upstream *up,
+ struct pim_rpf *old)
+{
+ // EVENT_OFF(up->t_join_timer);
+ join_timer_start(up);
+}
+
+static void pim_upstream_join_timer_restart_msec(struct pim_upstream *up,
+ int interval_msec)
+{
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug("%s: restarting %d msec timer for upstream (S,G)=%s",
+ __func__, interval_msec, up->sg_str);
+ }
+
+ EVENT_OFF(up->t_join_timer);
+ event_add_timer_msec(router->master, on_join_timer, up, interval_msec,
+ &up->t_join_timer);
+}
+
+void pim_update_suppress_timers(uint32_t suppress_time)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+ unsigned int old_rp_ka_time;
+
+ /* stash the old one so we know which values were manually configured */
+ old_rp_ka_time = (3 * router->register_suppress_time
+ + router->register_probe_time);
+ router->register_suppress_time = suppress_time;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ if (!pim)
+ continue;
+
+ /* Only adjust if not manually configured */
+ if (pim->rp_keep_alive_time == old_rp_ka_time)
+ pim->rp_keep_alive_time = PIM_RP_KEEPALIVE_PERIOD;
+ }
+}
+
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
+ int holdtime)
+{
+ long t_joinsuppress_msec;
+ long join_timer_remain_msec = 0;
+ struct pim_neighbor *nbr = NULL;
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return;
+ }
+
+ t_joinsuppress_msec =
+ MIN(pim_if_t_suppressed_msec(up->rpf.source_nexthop.interface),
+ 1000 * holdtime);
+
+ if (up->t_join_timer)
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(up->t_join_timer);
+ else {
+ /* Remove it from jp agg from the nbr for suppression */
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr, true);
+
+ if (nbr) {
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(nbr->jp_timer);
+ }
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s %s: detected Join%s to RPF'(S,G)=%pPA: join_timer=%ld msec t_joinsuppress=%ld msec",
+ __FILE__, __func__, up->sg_str, &rpf,
+ join_timer_remain_msec, t_joinsuppress_msec);
+
+ if (join_timer_remain_msec < t_joinsuppress_msec) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s %s: suppressing Join(S,G)=%s for %ld msec",
+ __FILE__, __func__, up->sg_str,
+ t_joinsuppress_msec);
+ }
+
+ if (nbr)
+ pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
+
+ pim_upstream_join_timer_restart_msec(up, t_joinsuppress_msec);
+ }
+}
+
+void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
+ struct pim_upstream *up)
+{
+ long join_timer_remain_msec;
+ int t_override_msec;
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ return;
+ }
+
+ t_override_msec =
+ pim_if_t_override_msec(up->rpf.source_nexthop.interface);
+
+ if (up->t_join_timer) {
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(up->t_join_timer);
+ } else {
+ /* upstream join tracked with neighbor jp timer */
+ struct pim_neighbor *nbr;
+
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr, true);
+
+ if (nbr)
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(nbr->jp_timer);
+ else
+ /* Manipulate such that override takes place */
+ join_timer_remain_msec = t_override_msec + 1;
+ }
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: to RPF'%s=%pPA: join_timer=%ld msec t_override=%d msec",
+ debug_label, up->sg_str, &up->rpf.rpf_addr,
+ join_timer_remain_msec, t_override_msec);
+
+ if (join_timer_remain_msec > t_override_msec) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s: decreasing (S,G)=%s join timer to t_override=%d msec",
+ debug_label, up->sg_str, t_override_msec);
+ }
+
+ pim_upstream_join_timer_restart_msec(up, t_override_msec);
+ }
+}
+
+static void forward_on(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch = NULL;
+
+ /* scan (S,G) state */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ if (pim_macro_chisin_oiflist(ch))
+ pim_forward_start(ch);
+
+ } /* scan iface channel list */
+}
+
+static void forward_off(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+
+ /* scan per-interface (S,G) state */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+
+ pim_forward_stop(ch);
+
+ } /* scan iface channel list */
+}
+
+int pim_upstream_could_register(struct pim_upstream *up)
+{
+ struct pim_interface *pim_ifp = NULL;
+
+ /* FORCE_PIMREG is a generic flag to let an app like VxLAN-AA register
+ * a source on an upstream entry even if the source is not directly
+ * connected on the IIF.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_FORCE_PIMREG(up->flags))
+ return 1;
+
+ if (up->rpf.source_nexthop.interface)
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ else {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+ }
+
+ if (pim_ifp && PIM_I_am_DR(pim_ifp)
+ && pim_if_connected_to_source(up->rpf.source_nexthop.interface,
+ up->sg.src))
+ return 1;
+
+ return 0;
+}
+
+/* Source registration is suppressed for SSM groups. When the SSM range changes
+ * we re-revaluate register setup for existing upstream entries */
+void pim_upstream_register_reevaluate(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* If FHR is set CouldRegister is True. Also check if the flow
+ * is actually active; if it is not kat setup will trigger
+ * source
+ * registration whenever the flow becomes active. */
+ if (!PIM_UPSTREAM_FLAG_TEST_FHR(up->flags) ||
+ !pim_upstream_is_kat_running(up))
+ continue;
+
+ if (pim_is_grp_ssm(pim, up->sg.grp)) {
+ /* clear the register state for SSM groups */
+ if (up->reg_state != PIM_REG_NOINFO) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "Clear register for %s as G is now SSM",
+ up->sg_str);
+ /* remove regiface from the OIL if it is there*/
+ pim_channel_del_oif(up->channel_oil,
+ pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM,
+ __func__);
+ up->reg_state = PIM_REG_NOINFO;
+ }
+ } else {
+ /* register ASM sources with the RP */
+ if (up->reg_state == PIM_REG_NOINFO) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "Register %s as G is now ASM",
+ up->sg_str);
+ pim_channel_add_oif(up->channel_oil,
+ pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM,
+ __func__);
+ up->reg_state = PIM_REG_JOIN;
+ }
+ }
+ }
+}
+
+/* RFC7761, Section 4.2 “Data Packet Forwarding Rules” says we should
+ * forward a S -
+ * 1. along the SPT if SPTbit is set
+ * 2. and along the RPT if SPTbit is not set
+ * If forwarding is hw accelerated i.e. control and dataplane components
+ * are separate you may not be able to reliably set SPT bit on intermediate
+ * routers while still forwarding on the (S,G,rpt).
+ *
+ * This macro is a slight deviation on the RFC and uses "traffic-agnostic"
+ * criteria to decide between using the RPT vs. SPT for forwarding.
+ */
+void pim_upstream_update_use_rpt(struct pim_upstream *up,
+ bool update_mroute)
+{
+ bool old_use_rpt;
+ bool new_use_rpt;
+
+ if (pim_addr_is_any(up->sg.src))
+ return;
+
+ old_use_rpt = !!PIM_UPSTREAM_FLAG_TEST_USE_RPT(up->flags);
+
+ /* We will use the SPT (IIF=RPF_interface(S) if -
+ * 1. We have decided to join the SPT
+ * 2. We are FHR
+ * 3. Source is directly connected
+ * 4. We are RP (parent's IIF is lo or vrf-device)
+ * In all other cases the source will stay along the RPT and
+ * IIF=RPF_interface(RP).
+ */
+ if (up->join_state == PIM_UPSTREAM_JOINED ||
+ PIM_UPSTREAM_FLAG_TEST_FHR(up->flags) ||
+ pim_if_connected_to_source(
+ up->rpf.source_nexthop.interface,
+ up->sg.src) ||
+ /* XXX - need to switch this to a more efficient
+ * lookup API
+ */
+ I_am_RP(up->pim, up->sg.grp))
+ /* use SPT */
+ PIM_UPSTREAM_FLAG_UNSET_USE_RPT(up->flags);
+ else
+ /* use RPT */
+ PIM_UPSTREAM_FLAG_SET_USE_RPT(up->flags);
+
+ new_use_rpt = !!PIM_UPSTREAM_FLAG_TEST_USE_RPT(up->flags);
+ if (old_use_rpt != new_use_rpt) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s switched from %s to %s", up->sg_str,
+ old_use_rpt ? "RPT" : "SPT",
+ new_use_rpt ? "RPT" : "SPT");
+ if (update_mroute)
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+ }
+}
+
+/* some events like RP change require re-evaluation of SGrpt across
+ * all groups
+ */
+void pim_upstream_reeval_use_rpt(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_is_any(up->sg.src))
+ continue;
+
+ pim_upstream_update_use_rpt(up, true /*update_mroute*/);
+ }
+}
+
+void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
+ enum pim_upstream_state new_state)
+{
+ enum pim_upstream_state old_state = up->join_state;
+
+ if (pim_addr_is_any(up->upstream_addr)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s: RPF not configured for %s", __func__,
+ up->sg_str);
+ return;
+ }
+
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug("%s: RP not reachable for %s", __func__,
+ up->sg_str);
+ return;
+ }
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ zlog_debug("%s: PIM_UPSTREAM_%s: (S,G) old: %s new: %s",
+ __func__, up->sg_str,
+ pim_upstream_state2str(up->join_state),
+ pim_upstream_state2str(new_state));
+ }
+
+ up->join_state = new_state;
+ if (old_state != new_state)
+ up->state_transition = pim_time_monotonic_sec();
+
+ pim_upstream_update_assert_tracking_desired(up);
+
+ if (new_state == PIM_UPSTREAM_JOINED) {
+ pim_upstream_inherited_olist_decide(pim, up);
+ if (old_state != PIM_UPSTREAM_JOINED) {
+ int old_fhr = PIM_UPSTREAM_FLAG_TEST_FHR(up->flags);
+
+ pim_msdp_up_join_state_changed(pim, up);
+ if (pim_upstream_could_register(up)) {
+ PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
+ if (!old_fhr
+ && PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(
+ up->flags)) {
+ pim_upstream_keep_alive_timer_start(
+ up, pim->keep_alive_time);
+ pim_register_join(up);
+ }
+ } else {
+ pim_upstream_send_join(up);
+ join_timer_start(up);
+ }
+ }
+ if (old_state != new_state)
+ pim_upstream_update_use_rpt(up, true /*update_mroute*/);
+ } else {
+ bool old_use_rpt;
+ bool new_use_rpt;
+ bool send_xg_jp = false;
+
+ forward_off(up);
+ /*
+ * RFC 4601 Sec 4.5.7:
+ * JoinDesired(S,G) -> False, set SPTbit to false.
+ */
+ if (!pim_addr_is_any(up->sg.src))
+ up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE;
+
+ if (old_state == PIM_UPSTREAM_JOINED)
+ pim_msdp_up_join_state_changed(pim, up);
+
+ if (old_state != new_state) {
+ old_use_rpt =
+ !!PIM_UPSTREAM_FLAG_TEST_USE_RPT(up->flags);
+ pim_upstream_update_use_rpt(up, true /*update_mroute*/);
+ new_use_rpt =
+ !!PIM_UPSTREAM_FLAG_TEST_USE_RPT(up->flags);
+ if (new_use_rpt &&
+ (new_use_rpt != old_use_rpt) &&
+ up->parent)
+ /* we have decided to switch from the SPT back
+ * to the RPT which means we need to cancel
+ * any previously sent SGrpt prunes immediately
+ */
+ send_xg_jp = true;
+ }
+
+ /* IHR, Trigger SGRpt on *,G IIF to prune S,G from RPT towards
+ RP.
+ If I am RP for G then send S,G prune to its IIF. */
+ if (pim_upstream_is_sg_rpt(up) && up->parent &&
+ !I_am_RP(pim, up->sg.grp))
+ send_xg_jp = true;
+
+ pim_jp_agg_single_upstream_send(&up->rpf, up, 0 /* prune */);
+
+ if (send_xg_jp) {
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug(
+ "re-join RPT; *,G IIF %s S,G IIF %s ",
+ up->parent->rpf.source_nexthop.interface ?
+ up->parent->rpf.source_nexthop.interface->name
+ : "Unknown",
+ up->rpf.source_nexthop.interface ?
+ up->rpf.source_nexthop.interface->name :
+ "Unknown");
+ pim_jp_agg_single_upstream_send(&up->parent->rpf,
+ up->parent,
+ 1 /* (W,G) Join */);
+ }
+ join_timer_stop(up);
+ }
+}
+
+int pim_upstream_compare(const struct pim_upstream *up1,
+ const struct pim_upstream *up2)
+{
+ return pim_sgaddr_cmp(up1->sg, up2->sg);
+}
+
+void pim_upstream_fill_static_iif(struct pim_upstream *up,
+ struct interface *incoming)
+{
+ up->rpf.source_nexthop.interface = incoming;
+
+ /* reset other parameters to matched a connected incoming interface */
+ up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
+ up->rpf.source_nexthop.mrib_metric_preference =
+ ZEBRA_CONNECT_DISTANCE_DEFAULT;
+ up->rpf.source_nexthop.mrib_route_metric = 0;
+ up->rpf.rpf_addr = PIMADDR_ANY;
+}
+
+static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
+ pim_sgaddr *sg,
+ struct interface *incoming,
+ int flags,
+ struct pim_ifchannel *ch)
+{
+ enum pim_rpf_result rpf_result;
+ struct pim_interface *pim_ifp;
+ struct pim_upstream *up;
+
+ up = XCALLOC(MTYPE_PIM_UPSTREAM, sizeof(*up));
+
+ up->pim = pim;
+ up->sg = *sg;
+ snprintfrr(up->sg_str, sizeof(up->sg_str), "%pSG", sg);
+ if (ch)
+ ch->upstream = up;
+
+ rb_pim_upstream_add(&pim->upstream_head, up);
+ /* Set up->upstream_addr as INADDR_ANY, if RP is not
+ * configured and retain the upstream data structure
+ */
+ if (!pim_rp_set_upstream_addr(pim, &up->upstream_addr, sg->src,
+ sg->grp)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: Received a (*,G) with no RP configured",
+ __func__);
+ }
+
+ up->parent = pim_upstream_find_parent(pim, up);
+ if (pim_addr_is_any(up->sg.src)) {
+ up->sources = list_new();
+ up->sources->cmp =
+ (int (*)(void *, void *))pim_upstream_compare;
+ } else
+ up->sources = NULL;
+
+ pim_upstream_find_new_children(pim, up);
+ up->flags = flags;
+ up->ref_count = 1;
+ up->t_join_timer = NULL;
+ up->t_ka_timer = NULL;
+ up->t_rs_timer = NULL;
+ up->t_msdp_reg_timer = NULL;
+ up->join_state = PIM_UPSTREAM_NOTJOINED;
+ up->reg_state = PIM_REG_NOINFO;
+ up->state_transition = pim_time_monotonic_sec();
+ up->channel_oil = pim_channel_oil_add(pim, &up->sg, __func__);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE;
+
+ up->rpf.source_nexthop.interface = NULL;
+ up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
+ up->rpf.source_nexthop.mrib_metric_preference =
+ router->infinite_assert_metric.metric_preference;
+ up->rpf.source_nexthop.mrib_route_metric =
+ router->infinite_assert_metric.route_metric;
+ up->rpf.rpf_addr = PIMADDR_ANY;
+ up->ifchannels = list_new();
+ up->ifchannels->cmp = (int (*)(void *, void *))pim_ifchannel_compare;
+
+ if (!pim_addr_is_any(up->sg.src)) {
+ wheel_add_item(pim->upstream_sg_wheel, up);
+
+ /* Inherit the DF role from the parent (*, G) entry for
+ * VxLAN BUM groups
+ */
+ if (up->parent
+ && PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->parent->flags)
+ && PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->parent->flags)) {
+ PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(up->flags);
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug(
+ "upstream %s inherited mlag non-df flag from parent",
+ up->sg_str);
+ }
+ }
+
+ if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)
+ || PIM_UPSTREAM_FLAG_TEST_SRC_NOCACHE(up->flags)) {
+ pim_upstream_fill_static_iif(up, incoming);
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ assert(pim_ifp);
+ pim_upstream_update_use_rpt(up,
+ false /*update_mroute*/);
+ pim_upstream_mroute_iif_update(up->channel_oil, __func__);
+
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_NOCACHE(up->flags)) {
+ /*
+ * Set the right RPF so that future changes will
+ * be right
+ */
+ rpf_result = pim_rpf_update(pim, up, NULL, __func__);
+ pim_upstream_keep_alive_timer_start(
+ up, pim->keep_alive_time);
+ }
+ } else if (!pim_addr_is_any(up->upstream_addr)) {
+ pim_upstream_update_use_rpt(up,
+ false /*update_mroute*/);
+ rpf_result = pim_rpf_update(pim, up, NULL, __func__);
+ if (rpf_result == PIM_RPF_FAILURE) {
+ up->channel_oil->oil_inherited_rescan = 1;
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Attempting to create upstream(%s), Unable to RPF for source",
+ __func__, up->sg_str);
+ }
+
+ /* Consider a case where (S,G,rpt) prune is received and this
+ * upstream is getting created due to that, then as per RFC
+ * until prune pending time we need to behave same as NOINFO
+ * state, therefore do not install if OIF is NULL until then
+ * This is for PIM Conformance PIM-SM 16.3 fix
+ * When the prune pending timer pop, this mroute will get
+ * installed with none as OIF */
+ if (up->rpf.source_nexthop.interface &&
+ !(pim_upstream_empty_inherited_olist(up) && (ch != NULL) &&
+ PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))) {
+ pim_upstream_mroute_iif_update(up->channel_oil,
+ __func__);
+ }
+ }
+
+ /* send the entry to the MLAG peer */
+ /* XXX - duplicate send is possible here if pim_rpf_update
+ * successfully resolved the nexthop
+ */
+ if (pim_up_mlag_is_local(up)
+ || PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags))
+ pim_mlag_up_local_add(pim, up);
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s: Created Upstream %s upstream_addr %pPAs ref count %d increment",
+ __func__, up->sg_str, &up->upstream_addr,
+ up->ref_count);
+ }
+
+ return up;
+}
+
+uint32_t pim_up_mlag_local_cost(struct pim_upstream *up)
+{
+ if (!(pim_up_mlag_is_local(up))
+ && !(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
+ return router->infinite_assert_metric.route_metric;
+
+ if ((up->rpf.source_nexthop.interface ==
+ up->pim->vxlan.peerlink_rif) &&
+ (up->rpf.source_nexthop.mrib_route_metric <
+ (router->infinite_assert_metric.route_metric -
+ PIM_UPSTREAM_MLAG_PEERLINK_PLUS_METRIC)))
+ return up->rpf.source_nexthop.mrib_route_metric +
+ PIM_UPSTREAM_MLAG_PEERLINK_PLUS_METRIC;
+
+ return up->rpf.source_nexthop.mrib_route_metric;
+}
+
+uint32_t pim_up_mlag_peer_cost(struct pim_upstream *up)
+{
+ if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
+ return router->infinite_assert_metric.route_metric;
+
+ return up->mlag.peer_mrib_metric;
+}
+
+struct pim_upstream *pim_upstream_find(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct pim_upstream lookup;
+ struct pim_upstream *up = NULL;
+
+ lookup.sg = *sg;
+ up = rb_pim_upstream_find(&pim->upstream_head, &lookup);
+ return up;
+}
+
+struct pim_upstream *pim_upstream_find_or_add(pim_sgaddr *sg,
+ struct interface *incoming,
+ int flags, const char *name)
+{
+ struct pim_interface *pim_ifp = incoming->info;
+
+ return (pim_upstream_add(pim_ifp->pim, sg, incoming, flags, name,
+ NULL));
+}
+
+void pim_upstream_ref(struct pim_upstream *up, int flags, const char *name)
+{
+ /* if a local MLAG reference is being created we need to send the mroute
+ * to the peer
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->flags) &&
+ PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(flags)) {
+ PIM_UPSTREAM_FLAG_SET_MLAG_VXLAN(up->flags);
+ pim_mlag_up_local_add(up->pim, up);
+ }
+
+ /* when we go from non-FHR to FHR we need to re-eval traffic
+ * forwarding path
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_FHR(up->flags) &&
+ PIM_UPSTREAM_FLAG_TEST_FHR(flags)) {
+ PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
+ pim_upstream_update_use_rpt(up, true /*update_mroute*/);
+ }
+
+ /* re-eval joinDesired; clearing peer-msdp-sa flag can
+ * cause JD to change
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags) &&
+ PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(flags)) {
+ PIM_UPSTREAM_FLAG_SET_SRC_MSDP(up->flags);
+ pim_upstream_update_join_desired(up->pim, up);
+ }
+
+ up->flags |= flags;
+ ++up->ref_count;
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s(%s): upstream %s ref count %d increment",
+ __func__, name, up->sg_str, up->ref_count);
+}
+
+struct pim_upstream *pim_upstream_add(struct pim_instance *pim, pim_sgaddr *sg,
+ struct interface *incoming, int flags,
+ const char *name,
+ struct pim_ifchannel *ch)
+{
+ struct pim_upstream *up = NULL;
+ int found = 0;
+
+ up = pim_upstream_find(pim, sg);
+ if (up) {
+ pim_upstream_ref(up, flags, name);
+ found = 1;
+ } else {
+ up = pim_upstream_new(pim, sg, incoming, flags, ch);
+ }
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s(%s): %s, iif %pPA (%s) found: %d: ref_count: %d",
+ __func__, name, up->sg_str, &up->rpf.rpf_addr,
+ up->rpf.source_nexthop.interface ? up->rpf.source_nexthop
+ .interface->name
+ : "Unknown",
+ found, up->ref_count);
+ }
+
+ return up;
+}
+
+/*
+ * Passed in up must be the upstream for ch. starch is NULL if no
+ * information
+ * This function is copied over from
+ * pim_upstream_evaluate_join_desired_interface but limited to
+ * parent (*,G)'s includes/joins.
+ */
+int pim_upstream_eval_inherit_if(struct pim_upstream *up,
+ struct pim_ifchannel *ch,
+ struct pim_ifchannel *starch)
+{
+ /* if there is an explicit prune for this interface we cannot
+ * add it to the OIL
+ */
+ if (ch) {
+ if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))
+ return 0;
+ }
+
+ /* Check if the OIF can be inherited fron the (*,G) entry
+ */
+ if (starch) {
+ if (!pim_macro_ch_lost_assert(starch)
+ && pim_macro_chisin_joins_or_include(starch))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Passed in up must be the upstream for ch. starch is NULL if no
+ * information
+ */
+int pim_upstream_evaluate_join_desired_interface(struct pim_upstream *up,
+ struct pim_ifchannel *ch,
+ struct pim_ifchannel *starch)
+{
+ if (ch) {
+ if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))
+ return 0;
+
+ if (!pim_macro_ch_lost_assert(ch)
+ && pim_macro_chisin_joins_or_include(ch))
+ return 1;
+ }
+
+ /*
+ * joins (*,G)
+ */
+ if (starch) {
+ /* XXX: check on this with donald
+ * we are looking for PIM_IF_FLAG_MASK_S_G_RPT in
+ * upstream flags?
+ */
+#if 0
+ if (PIM_IF_FLAG_TEST_S_G_RPT(starch->upstream->flags))
+ return 0;
+#endif
+
+ if (!pim_macro_ch_lost_assert(starch)
+ && pim_macro_chisin_joins_or_include(starch))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Returns true if immediate OIL is empty and is used to evaluate
+ * JoinDesired. See pim_upstream_evaluate_join_desired.
+ */
+static bool pim_upstream_empty_immediate_olist(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct interface *ifp;
+ struct pim_ifchannel *ch;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ if (!ifp->info)
+ continue;
+
+ ch = pim_ifchannel_find(ifp, &up->sg);
+ if (!ch)
+ continue;
+
+ /* If we have even one immediate OIF we can return with
+ * not-empty
+ */
+ if (pim_upstream_evaluate_join_desired_interface(up, ch,
+ NULL /* starch */))
+ return false;
+ } /* scan iface channel list */
+
+ /* immediate_oil is empty */
+ return true;
+}
+
+
+static inline bool pim_upstream_is_msdp_peer_sa(struct pim_upstream *up)
+{
+ return PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags);
+}
+
+/*
+ * bool JoinDesired(*,G) {
+ * if (immediate_olist(*,G) != NULL)
+ * return TRUE
+ * else
+ * return FALSE
+ * }
+ *
+ * bool JoinDesired(S,G) {
+ * return( immediate_olist(S,G) != NULL
+ * OR ( KeepaliveTimer(S,G) is running
+ * AND inherited_olist(S,G) != NULL ) )
+ * }
+ */
+bool pim_upstream_evaluate_join_desired(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ bool empty_imm_oil;
+ bool empty_inh_oil;
+
+ empty_imm_oil = pim_upstream_empty_immediate_olist(pim, up);
+
+ /* (*,G) */
+ if (pim_addr_is_any(up->sg.src))
+ return !empty_imm_oil;
+
+ /* (S,G) */
+ if (!empty_imm_oil)
+ return true;
+ empty_inh_oil = pim_upstream_empty_inherited_olist(up);
+ if (!empty_inh_oil &&
+ (pim_upstream_is_kat_running(up) ||
+ pim_upstream_is_msdp_peer_sa(up)))
+ return true;
+
+ return false;
+}
+
+/*
+ See also pim_upstream_evaluate_join_desired() above.
+*/
+void pim_upstream_update_join_desired(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ int was_join_desired; /* boolean */
+ int is_join_desired; /* boolean */
+
+ was_join_desired = PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags);
+
+ is_join_desired = pim_upstream_evaluate_join_desired(pim, up);
+ if (is_join_desired)
+ PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED(up->flags);
+ else
+ PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED(up->flags);
+
+ /* switched from false to true */
+ if (is_join_desired && (up->join_state == PIM_UPSTREAM_NOTJOINED)) {
+ pim_upstream_switch(pim, up, PIM_UPSTREAM_JOINED);
+ return;
+ }
+
+ /* switched from true to false */
+ if (!is_join_desired && was_join_desired) {
+ pim_upstream_switch(pim, up, PIM_UPSTREAM_NOTJOINED);
+ return;
+ }
+}
+
+/*
+ RFC 4601 4.5.7. Sending (S,G) Join/Prune Messages
+ Transitions from Joined State
+ RPF'(S,G) GenID changes
+
+ The upstream (S,G) state machine remains in Joined state. If the
+ Join Timer is set to expire in more than t_override seconds, reset
+ it so that it expires after t_override seconds.
+*/
+void pim_upstream_rpf_genid_changed(struct pim_instance *pim,
+ pim_addr neigh_addr)
+{
+ struct pim_upstream *up;
+
+ /*
+ * Scan all (S,G) upstreams searching for RPF'(S,G)=neigh_addr
+ */
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ pim_addr rpf_addr;
+
+ rpf_addr = up->rpf.rpf_addr;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: matching neigh=%pPA against upstream (S,G)=%s[%s] joined=%d rpf_addr=%pPA",
+ __func__, &neigh_addr, up->sg_str,
+ pim->vrf->name,
+ up->join_state == PIM_UPSTREAM_JOINED,
+ &rpf_addr);
+
+ /* consider only (S,G) upstream in Joined state */
+ if (up->join_state != PIM_UPSTREAM_JOINED)
+ continue;
+
+ /* match RPF'(S,G)=neigh_addr */
+ if (pim_addr_cmp(rpf_addr, neigh_addr))
+ continue;
+
+ pim_upstream_join_timer_decrease_to_t_override(
+ "RPF'(S,G) GenID change", up);
+ }
+}
+
+
+void pim_upstream_rpf_interface_changed(struct pim_upstream *up,
+ struct interface *old_rpf_ifp)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+
+ /* search all ifchannels */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ if (ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
+ if (
+ /* RPF_interface(S) was NOT I */
+ (old_rpf_ifp == ch->interface) &&
+ /* RPF_interface(S) stopped being I */
+ (ch->upstream->rpf.source_nexthop
+ .interface) &&
+ (ch->upstream->rpf.source_nexthop
+ .interface != ch->interface)) {
+ assert_action_a5(ch);
+ }
+ } /* PIM_IFASSERT_I_AM_LOSER */
+
+ pim_ifchannel_update_assert_tracking_desired(ch);
+ }
+}
+
+void pim_upstream_update_could_assert(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+
+ /* scan per-interface (S,G) state */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ pim_ifchannel_update_could_assert(ch);
+ } /* scan iface channel list */
+}
+
+void pim_upstream_update_my_assert_metric(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_ifchannel *ch;
+
+ /* scan per-interface (S,G) state */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ pim_ifchannel_update_my_assert_metric(ch);
+
+ } /* scan iface channel list */
+}
+
+static void pim_upstream_update_assert_tracking_desired(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct listnode *chnextnode;
+ struct pim_interface *pim_ifp;
+ struct pim_ifchannel *ch;
+
+ /* scan per-interface (S,G) state */
+ for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) {
+ if (!ch->interface)
+ continue;
+ pim_ifp = ch->interface->info;
+ if (!pim_ifp)
+ continue;
+
+ pim_ifchannel_update_assert_tracking_desired(ch);
+
+ } /* scan iface channel list */
+}
+
+/* When kat is stopped CouldRegister goes to false so we need to
+ * transition the (S, G) on FHR to NI state and remove reg tunnel
+ * from the OIL */
+static void pim_upstream_fhr_kat_expiry(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ if (!PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
+ return;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("kat expired on %s; clear fhr reg state",
+ up->sg_str);
+
+ /* stop reg-stop timer */
+ EVENT_OFF(up->t_rs_timer);
+ /* remove regiface from the OIL if it is there*/
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ /* clear the register state */
+ up->reg_state = PIM_REG_NOINFO;
+ PIM_UPSTREAM_FLAG_UNSET_FHR(up->flags);
+}
+
+/* When kat is started CouldRegister can go to true. And if it does we
+ * need to transition the (S, G) on FHR to JOINED state and add reg tunnel
+ * to the OIL */
+static void pim_upstream_fhr_kat_start(struct pim_upstream *up)
+{
+ if (pim_upstream_could_register(up)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "kat started on %s; set fhr reg state to joined",
+ up->sg_str);
+
+ PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
+ if (up->reg_state == PIM_REG_NOINFO)
+ pim_register_join(up);
+ pim_upstream_update_use_rpt(up, true /*update_mroute*/);
+ }
+}
+
+/*
+ * On an RP, the PMBR value must be cleared when the
+ * Keepalive Timer expires
+ * KAT expiry indicates that flow is inactive. If the flow was created or
+ * maintained by activity now is the time to deref it.
+ */
+struct pim_upstream *pim_upstream_keep_alive_timer_proc(
+ struct pim_upstream *up)
+{
+ struct pim_instance *pim;
+
+ pim = up->channel_oil->pim;
+
+ if (PIM_UPSTREAM_FLAG_TEST_DISABLE_KAT_EXPIRY(up->flags)) {
+ /* if the router is a PIM vxlan encapsulator we prevent expiry
+ * of KAT as the mroute is pre-setup without any traffic
+ */
+ pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
+ return up;
+ }
+
+ if (I_am_RP(pim, up->sg.grp)) {
+ /*
+ * Handle Border Router
+ * We need to do more here :)
+ * But this is the start.
+ */
+ }
+
+ /* source is no longer active - pull the SA from MSDP's cache */
+ pim_msdp_sa_local_del(pim, &up->sg);
+
+ /* JoinDesired can change when KAT is started or stopped */
+ pim_upstream_update_join_desired(pim, up);
+
+ /* if entry was created because of activity we need to deref it */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
+ pim_upstream_fhr_kat_expiry(pim, up);
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "kat expired on %s[%s]; remove stream reference",
+ up->sg_str, pim->vrf->name);
+ PIM_UPSTREAM_FLAG_UNSET_SRC_STREAM(up->flags);
+
+ /* Return if upstream entry got deleted.*/
+ if (!pim_upstream_del(pim, up, __func__))
+ return NULL;
+ }
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_NOCACHE(up->flags)) {
+ PIM_UPSTREAM_FLAG_UNSET_SRC_NOCACHE(up->flags);
+
+ if (!pim_upstream_del(pim, up, __func__))
+ return NULL;
+ }
+
+ /* upstream reference would have been added to track the local
+ * membership if it is LHR. We have to clear it when KAT expires.
+ * Otherwise would result in stale entry with uncleared ref count.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_LHR(up->flags)) {
+ struct pim_upstream *parent = up->parent;
+
+ PIM_UPSTREAM_FLAG_UNSET_SRC_LHR(up->flags);
+ up = pim_upstream_del(pim, up, __func__);
+
+ if (parent) {
+ pim_jp_agg_single_upstream_send(&parent->rpf, parent,
+ true);
+ }
+ }
+
+ return up;
+}
+static void pim_upstream_keep_alive_timer(struct event *t)
+{
+ struct pim_upstream *up;
+
+ up = EVENT_ARG(t);
+
+ /* pull the stats and re-check */
+ if (pim_upstream_sg_running_proc(up))
+ /* kat was restarted because of new activity */
+ return;
+
+ pim_upstream_keep_alive_timer_proc(up);
+}
+
+void pim_upstream_keep_alive_timer_start(struct pim_upstream *up, uint32_t time)
+{
+ if (!PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("kat start on %s with no stream reference",
+ up->sg_str);
+ }
+ EVENT_OFF(up->t_ka_timer);
+ event_add_timer(router->master, pim_upstream_keep_alive_timer, up, time,
+ &up->t_ka_timer);
+
+ /* any time keepalive is started against a SG we will have to
+ * re-evaluate our active source database */
+ pim_msdp_sa_local_update(up);
+ /* JoinDesired can change when KAT is started or stopped */
+ pim_upstream_update_join_desired(up->pim, up);
+}
+
+/* MSDP on RP needs to know if a source is registerable to this RP */
+static void pim_upstream_msdp_reg_timer(struct event *t)
+{
+ struct pim_upstream *up = EVENT_ARG(t);
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ /* source is no longer active - pull the SA from MSDP's cache */
+ pim_msdp_sa_local_del(pim, &up->sg);
+}
+
+void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
+{
+ EVENT_OFF(up->t_msdp_reg_timer);
+ event_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
+ PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
+
+ pim_msdp_sa_local_update(up);
+}
+
+/*
+ * 4.2.1 Last-Hop Switchover to the SPT
+ *
+ * In Sparse-Mode PIM, last-hop routers join the shared tree towards the
+ * RP. Once traffic from sources to joined groups arrives at a last-hop
+ * router, it has the option of switching to receive the traffic on a
+ * shortest path tree (SPT).
+ *
+ * The decision for a router to switch to the SPT is controlled as
+ * follows:
+ *
+ * void
+ * CheckSwitchToSpt(S,G) {
+ * if ( ( pim_include(*,G) (-) pim_exclude(S,G)
+ * (+) pim_include(S,G) != NULL )
+ * AND SwitchToSptDesired(S,G) ) {
+ * # Note: Restarting the KAT will result in the SPT switch
+ * set KeepaliveTimer(S,G) to Keepalive_Period
+ * }
+ * }
+ *
+ * SwitchToSptDesired(S,G) is a policy function that is implementation
+ * defined. An "infinite threshold" policy can be implemented by making
+ * SwitchToSptDesired(S,G) return false all the time. A "switch on
+ * first packet" policy can be implemented by making
+ * SwitchToSptDesired(S,G) return true once a single packet has been
+ * received for the source and group.
+ */
+int pim_upstream_switch_to_spt_desired_on_rp(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+ if (I_am_RP(pim, sg->grp))
+ return 1;
+
+ return 0;
+}
+
+int pim_upstream_is_sg_rpt(struct pim_upstream *up)
+{
+ struct listnode *chnode;
+ struct pim_ifchannel *ch;
+
+ for (ALL_LIST_ELEMENTS_RO(up->ifchannels, chnode, ch)) {
+ if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))
+ return 1;
+ }
+
+ return 0;
+}
+/*
+ * After receiving a packet set SPTbit:
+ * void
+ * Update_SPTbit(S,G,iif) {
+ * if ( iif == RPF_interface(S)
+ * AND JoinDesired(S,G) == true
+ * AND ( DirectlyConnected(S) == true
+ * OR RPF_interface(S) != RPF_interface(RP(G))
+ * OR inherited_olist(S,G,rpt) == NULL
+ * OR ( ( RPF'(S,G) == RPF'(*,G) ) AND
+ * ( RPF'(S,G) != NULL ) )
+ * OR ( I_Am_Assert_Loser(S,G,iif) ) {
+ * Set SPTbit(S,G) to true
+ * }
+ * }
+ */
+void pim_upstream_set_sptbit(struct pim_upstream *up,
+ struct interface *incoming)
+{
+ struct pim_upstream *starup = up->parent;
+
+ // iif == RPF_interfvace(S)
+ if (up->rpf.source_nexthop.interface != incoming) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Incoming Interface: %s is different than RPF_interface(S) %s",
+ __func__, incoming->name,
+ up->rpf.source_nexthop.interface->name);
+ return;
+ }
+
+ // AND JoinDesired(S,G) == true
+ if (!pim_upstream_evaluate_join_desired(up->channel_oil->pim, up)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: %s Join is not Desired", __func__,
+ up->sg_str);
+ return;
+ }
+
+ // DirectlyConnected(S) == true
+ if (pim_if_connected_to_source(up->rpf.source_nexthop.interface,
+ up->sg.src)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: %s is directly connected to the source",
+ __func__, up->sg_str);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ return;
+ }
+
+ // OR RPF_interface(S) != RPF_interface(RP(G))
+ if (!starup
+ || up->rpf.source_nexthop
+ .interface != starup->rpf.source_nexthop.interface) {
+ struct pim_upstream *starup = up->parent;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: %s RPF_interface(S) != RPF_interface(RP(G))",
+ __func__, up->sg_str);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+
+ pim_jp_agg_single_upstream_send(&starup->rpf, starup, true);
+ return;
+ }
+
+ // OR inherited_olist(S,G,rpt) == NULL
+ if (pim_upstream_is_sg_rpt(up)
+ && pim_upstream_empty_inherited_olist(up)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: %s OR inherited_olist(S,G,rpt) == NULL",
+ __func__, up->sg_str);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ return;
+ }
+
+ // OR ( ( RPF'(S,G) == RPF'(*,G) ) AND
+ // ( RPF'(S,G) != NULL ) )
+ if (up->parent && pim_rpf_is_same(&up->rpf, &up->parent->rpf)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: %s RPF'(S,G) is the same as RPF'(*,G)",
+ __func__, up->sg_str);
+ up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE;
+ return;
+ }
+
+ return;
+}
+
+const char *pim_upstream_state2str(enum pim_upstream_state join_state)
+{
+ switch (join_state) {
+ case PIM_UPSTREAM_NOTJOINED:
+ return "NotJoined";
+ case PIM_UPSTREAM_JOINED:
+ return "Joined";
+ }
+ return "Unknown";
+}
+
+const char *pim_reg_state2str(enum pim_reg_state reg_state, char *state_str,
+ size_t state_str_len)
+{
+ switch (reg_state) {
+ case PIM_REG_NOINFO:
+ strlcpy(state_str, "RegNoInfo", state_str_len);
+ break;
+ case PIM_REG_JOIN:
+ strlcpy(state_str, "RegJoined", state_str_len);
+ break;
+ case PIM_REG_JOIN_PENDING:
+ strlcpy(state_str, "RegJoinPend", state_str_len);
+ break;
+ case PIM_REG_PRUNE:
+ strlcpy(state_str, "RegPrune", state_str_len);
+ break;
+ }
+ return state_str;
+}
+
+static void pim_upstream_register_stop_timer(struct event *t)
+{
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim;
+ struct pim_upstream *up;
+ up = EVENT_ARG(t);
+ pim = up->channel_oil->pim;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ char state_str[PIM_REG_STATE_STR_LEN];
+ zlog_debug("%s: (S,G)=%s[%s] upstream register stop timer %s",
+ __func__, up->sg_str, pim->vrf->name,
+ pim_reg_state2str(up->reg_state, state_str,
+ sizeof(state_str)));
+ }
+
+ switch (up->reg_state) {
+ case PIM_REG_JOIN_PENDING:
+ up->reg_state = PIM_REG_JOIN;
+ pim_channel_add_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM,
+ __func__);
+ pim_vxlan_update_sg_reg_state(pim, up, true /*reg_join*/);
+ break;
+ case PIM_REG_JOIN:
+ break;
+ case PIM_REG_PRUNE:
+ /* This is equalent to Couldreg -> False */
+ if (!up->rpf.source_nexthop.interface) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present",
+ __func__, up->sg_str);
+ up->reg_state = PIM_REG_NOINFO;
+ PIM_UPSTREAM_FLAG_UNSET_FHR(up->flags);
+ return;
+ }
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Interface: %s is not configured for pim",
+ __func__,
+ up->rpf.source_nexthop.interface->name);
+ return;
+ }
+ up->reg_state = PIM_REG_JOIN_PENDING;
+ pim_upstream_start_register_stop_timer(up, 1);
+
+ if (((up->channel_oil->cc.lastused / 100)
+ > pim->keep_alive_time)
+ && (I_am_RP(pim_ifp->pim, up->sg.grp))) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Stop sending the register, because I am the RP and we haven't seen a packet in a while",
+ __func__);
+ return;
+ }
+ pim_null_register_send(up);
+ break;
+ case PIM_REG_NOINFO:
+ break;
+ }
+}
+
+void pim_upstream_start_register_stop_timer(struct pim_upstream *up,
+ int null_register)
+{
+ uint32_t time;
+
+ EVENT_OFF(up->t_rs_timer);
+
+ if (!null_register) {
+ uint32_t lower = (0.5 * router->register_suppress_time);
+ uint32_t upper = (1.5 * router->register_suppress_time);
+ time = lower + (frr_weak_random() % (upper - lower + 1));
+ /* Make sure we don't wrap around */
+ if (time >= router->register_probe_time)
+ time -= router->register_probe_time;
+ else
+ time = 0;
+ } else
+ time = router->register_probe_time;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s: (S,G)=%s Starting upstream register stop timer %d",
+ __func__, up->sg_str, time);
+ }
+ event_add_timer(router->master, pim_upstream_register_stop_timer, up,
+ time, &up->t_rs_timer);
+}
+
+int pim_upstream_inherited_olist_decide(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ struct interface *ifp;
+ struct pim_ifchannel *ch, *starch;
+ struct pim_upstream *starup = up->parent;
+ int output_intf = 0;
+
+ if (!up->rpf.source_nexthop.interface)
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: up %s RPF is not present", __func__,
+ up->sg_str);
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ if (!ifp->info)
+ continue;
+
+ ch = pim_ifchannel_find(ifp, &up->sg);
+
+ if (starup)
+ starch = pim_ifchannel_find(ifp, &starup->sg);
+ else
+ starch = NULL;
+
+ if (!ch && !starch)
+ continue;
+
+ pim_ifp = ifp->info;
+ if (PIM_I_am_DualActive(pim_ifp)
+ && PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags)
+ && (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)
+ || !PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags)))
+ continue;
+ if (pim_upstream_evaluate_join_desired_interface(up, ch,
+ starch)) {
+ int flag = 0;
+
+ if (!ch)
+ flag = PIM_OIF_FLAG_PROTO_STAR;
+ else {
+ if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags))
+ flag = PIM_OIF_FLAG_PROTO_GM;
+ if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))
+ flag |= PIM_OIF_FLAG_PROTO_PIM;
+ if (starch)
+ flag |= PIM_OIF_FLAG_PROTO_STAR;
+ }
+
+ pim_channel_add_oif(up->channel_oil, ifp, flag,
+ __func__);
+ output_intf++;
+ }
+ }
+
+ return output_intf;
+}
+
+/*
+ * For a given upstream, determine the inherited_olist
+ * and apply it.
+ *
+ * inherited_olist(S,G,rpt) =
+ * ( joins(*,*,RP(G)) (+) joins(*,G) (-) prunes(S,G,rpt) )
+ * (+) ( pim_include(*,G) (-) pim_exclude(S,G))
+ * (-) ( lost_assert(*,G) (+) lost_assert(S,G,rpt) )
+ *
+ * inherited_olist(S,G) =
+ * inherited_olist(S,G,rpt) (+)
+ * joins(S,G) (+) pim_include(S,G) (-) lost_assert(S,G)
+ *
+ * return 1 if there are any output interfaces
+ * return 0 if there are not any output interfaces
+ */
+int pim_upstream_inherited_olist(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ int output_intf = pim_upstream_inherited_olist_decide(pim, up);
+
+ /*
+ * If we have output_intf switch state to Join and work like normal
+ * If we don't have an output_intf that means we are probably a
+ * switch on a stick so turn on forwarding to just accept the
+ * incoming packets so we don't bother the other stuff!
+ */
+ pim_upstream_update_join_desired(pim, up);
+
+ if (!output_intf)
+ forward_on(up);
+
+ return output_intf;
+}
+
+int pim_upstream_empty_inherited_olist(struct pim_upstream *up)
+{
+ return pim_channel_oil_empty(up->channel_oil);
+}
+
+/*
+ * When we have a new neighbor,
+ * find upstreams that don't have their rpf_addr
+ * set and see if the new neighbor allows
+ * the join to be sent
+ */
+void pim_upstream_find_new_rpf(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+ struct pim_rpf old;
+ enum pim_rpf_result rpf_result;
+
+ /*
+ * Scan all (S,G) upstreams searching for RPF'(S,G)=neigh_addr
+ */
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_is_any(up->upstream_addr)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: RP not configured for Upstream %s",
+ __func__, up->sg_str);
+ continue;
+ }
+
+ if (pim_rpf_addr_is_inaddr_any(&up->rpf)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: Upstream %s without a path to send join, checking",
+ __func__, up->sg_str);
+ old.source_nexthop.interface =
+ up->rpf.source_nexthop.interface;
+ rpf_result = pim_rpf_update(pim, up, &old, __func__);
+ if (rpf_result == PIM_RPF_CHANGED ||
+ (rpf_result == PIM_RPF_FAILURE &&
+ old.source_nexthop.interface))
+ pim_zebra_upstream_rpf_changed(pim, up, &old);
+ /* update kernel multicast forwarding cache (MFC) */
+ pim_upstream_mroute_iif_update(up->channel_oil,
+ __func__);
+ }
+ }
+ pim_zebra_update_all_interfaces(pim);
+}
+
+unsigned int pim_upstream_hash_key(const void *arg)
+{
+ const struct pim_upstream *up = arg;
+
+ return pim_sgaddr_hash(up->sg, 0);
+}
+
+void pim_upstream_terminate(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+
+ while ((up = rb_pim_upstream_first(&pim->upstream_head))) {
+ if (pim_upstream_del(pim, up, __func__))
+ pim_upstream_timers_stop(up);
+ }
+
+ rb_pim_upstream_fini(&pim->upstream_head);
+
+ if (pim->upstream_sg_wheel)
+ wheel_delete(pim->upstream_sg_wheel);
+ pim->upstream_sg_wheel = NULL;
+}
+
+bool pim_upstream_equal(const void *arg1, const void *arg2)
+{
+ const struct pim_upstream *up1 = (const struct pim_upstream *)arg1;
+ const struct pim_upstream *up2 = (const struct pim_upstream *)arg2;
+
+ return !pim_sgaddr_cmp(up1->sg, up2->sg);
+}
+
+/* rfc4601:section-4.2:"Data Packet Forwarding Rules" defines
+ * the cases where kat has to be restarted on rxing traffic -
+ *
+ * if( DirectlyConnected(S) == true AND iif == RPF_interface(S) ) {
+ * set KeepaliveTimer(S,G) to Keepalive_Period
+ * # Note: a register state transition or UpstreamJPState(S,G)
+ * # transition may happen as a result of restarting
+ * # KeepaliveTimer, and must be dealt with here.
+ * }
+ * if( iif == RPF_interface(S) AND UpstreamJPState(S,G) == Joined AND
+ * inherited_olist(S,G) != NULL ) {
+ * set KeepaliveTimer(S,G) to Keepalive_Period
+ * }
+ */
+static bool pim_upstream_kat_start_ok(struct pim_upstream *up)
+{
+ struct channel_oil *c_oil = up->channel_oil;
+ struct interface *ifp = up->rpf.source_nexthop.interface;
+ struct pim_interface *pim_ifp;
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ /* "iif == RPF_interface(S)" check is not easy to do as the info
+ * we get from the kernel/ASIC is really a "lookup/key hit".
+ * So we will do an approximate check here to avoid starting KAT
+ * because of (S,G,rpt) forwarding on a non-LHR.
+ */
+ if (!ifp)
+ return false;
+
+ pim_ifp = ifp->info;
+ if (pim_ifp->mroute_vif_index != *oil_incoming_vif(c_oil))
+ return false;
+
+ if (pim_if_connected_to_source(up->rpf.source_nexthop.interface,
+ up->sg.src)) {
+ return true;
+ }
+
+ if ((up->join_state == PIM_UPSTREAM_JOINED)
+ && !pim_upstream_empty_inherited_olist(up)) {
+ if (I_am_RP(pim, up->sg.grp))
+ return true;
+ }
+
+ return false;
+}
+
+static bool pim_upstream_sg_running_proc(struct pim_upstream *up)
+{
+ bool rv = false;
+ struct pim_instance *pim = up->pim;
+
+ if (!up->channel_oil->installed)
+ return rv;
+
+ pim_mroute_update_counters(up->channel_oil);
+
+ // Have we seen packets?
+ if ((up->channel_oil->cc.oldpktcnt >= up->channel_oil->cc.pktcnt)
+ && (up->channel_oil->cc.lastused / 100 > 30)) {
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug(
+ "%s[%s]: %s old packet count is equal or lastused is greater than 30, (%ld,%ld,%lld)",
+ __func__, up->sg_str, pim->vrf->name,
+ up->channel_oil->cc.oldpktcnt,
+ up->channel_oil->cc.pktcnt,
+ up->channel_oil->cc.lastused / 100);
+ }
+ return rv;
+ }
+
+ if (pim_upstream_kat_start_ok(up)) {
+ /* Add a source reference to the stream if
+ * one doesn't already exist */
+ if (!PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "source reference created on kat restart %s[%s]",
+ up->sg_str, pim->vrf->name);
+
+ pim_upstream_ref(up, PIM_UPSTREAM_FLAG_MASK_SRC_STREAM,
+ __func__);
+ PIM_UPSTREAM_FLAG_SET_SRC_STREAM(up->flags);
+ pim_upstream_fhr_kat_start(up);
+ }
+ pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
+ rv = true;
+ } else if (PIM_UPSTREAM_FLAG_TEST_SRC_LHR(up->flags)) {
+ pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
+ rv = true;
+ }
+
+ if ((up->sptbit != PIM_UPSTREAM_SPTBIT_TRUE) &&
+ (up->rpf.source_nexthop.interface)) {
+ pim_upstream_set_sptbit(up, up->rpf.source_nexthop.interface);
+ pim_upstream_update_could_assert(up);
+ }
+
+ return rv;
+}
+
+/*
+ * Code to check and see if we've received packets on a S,G mroute
+ * and if so to set the SPT bit appropriately
+ */
+static void pim_upstream_sg_running(void *arg)
+{
+ struct pim_upstream *up = (struct pim_upstream *)arg;
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ // No packet can have arrived here if this is the case
+ if (!up->channel_oil->installed) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("%s: %s[%s] is not installed in mroute",
+ __func__, up->sg_str, pim->vrf->name);
+ return;
+ }
+
+ /*
+ * This is a bit of a hack
+ * We've noted that we should rescan but
+ * we've missed the window for doing so in
+ * pim_zebra.c for some reason. I am
+ * only doing this at this point in time
+ * to get us up and working for the moment
+ */
+ if (up->channel_oil->oil_inherited_rescan) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: Handling unscanned inherited_olist for %s[%s]",
+ __func__, up->sg_str, pim->vrf->name);
+ pim_upstream_inherited_olist_decide(pim, up);
+ up->channel_oil->oil_inherited_rescan = 0;
+ }
+
+ pim_upstream_sg_running_proc(up);
+}
+
+void pim_upstream_add_lhr_star_pimreg(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!pim_addr_is_any(up->sg.src))
+ continue;
+
+ if (!PIM_UPSTREAM_FLAG_TEST_CAN_BE_LHR(up->flags))
+ continue;
+
+ pim_channel_add_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM, __func__);
+ }
+}
+
+void pim_upstream_spt_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *pl)
+{
+ const char *pname = prefix_list_name(pl);
+
+ if (pim->spt.plist && strcmp(pim->spt.plist, pname) == 0) {
+ pim_upstream_remove_lhr_star_pimreg(pim, pname);
+ }
+}
+
+/*
+ * nlist -> The new prefix list
+ *
+ * Per Group Application of pimreg to the OIL
+ * If the prefix list tells us DENY then
+ * we need to Switchover to SPT immediate
+ * so add the pimreg.
+ * If the prefix list tells us to ACCEPT than
+ * we need to Never do the SPT so remove
+ * the interface
+ *
+ */
+void pim_upstream_remove_lhr_star_pimreg(struct pim_instance *pim,
+ const char *nlist)
+{
+ struct pim_upstream *up;
+ struct prefix_list *np;
+ struct prefix g;
+ enum prefix_list_type apply_new;
+
+ np = prefix_list_lookup(PIM_AFI, nlist);
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (!pim_addr_is_any(up->sg.src))
+ continue;
+
+ if (!PIM_UPSTREAM_FLAG_TEST_CAN_BE_LHR(up->flags))
+ continue;
+
+ if (!nlist) {
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM, __func__);
+ continue;
+ }
+ pim_addr_to_prefix(&g, up->sg.grp);
+ apply_new = prefix_list_apply_ext(np, NULL, &g, true);
+ if (apply_new == PREFIX_DENY)
+ pim_channel_add_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM, __func__);
+ else
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_GM, __func__);
+ }
+}
+
+void pim_upstream_init(struct pim_instance *pim)
+{
+ char name[64];
+
+ snprintf(name, sizeof(name), "PIM %s Timer Wheel", pim->vrf->name);
+ pim->upstream_sg_wheel =
+ wheel_init(router->master, 31000, 100, pim_upstream_hash_key,
+ pim_upstream_sg_running, name);
+
+ rb_pim_upstream_init(&pim->upstream_head);
+}
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
new file mode 100644
index 0000000..4e0926e
--- /dev/null
+++ b/pimd/pim_upstream.h
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_UPSTREAM_H
+#define PIM_UPSTREAM_H
+
+#include <zebra.h>
+#include <prefix.h>
+#include "plist.h"
+
+#include "pim_rpf.h"
+#include "pim_str.h"
+#include "pim_ifchannel.h"
+
+#define PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED (1 << 0)
+#define PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED (1 << 1)
+#define PIM_UPSTREAM_FLAG_MASK_FHR (1 << 2)
+#define PIM_UPSTREAM_FLAG_MASK_SRC_IGMP (1 << 3)
+#define PIM_UPSTREAM_FLAG_MASK_SRC_PIM (1 << 4)
+#define PIM_UPSTREAM_FLAG_MASK_SRC_STREAM (1 << 5)
+#define PIM_UPSTREAM_FLAG_MASK_SRC_MSDP (1 << 6)
+#define PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE (1 << 7)
+#define PIM_UPSTREAM_FLAG_MASK_SRC_LHR (1 << 8)
+/* In the case of pim vxlan we prime the pump by registering the
+ * vxlan source and keeping the SPT (FHR-RP) alive by sending periodic
+ * NULL registers. So we need to prevent KAT expiry because of the
+ * lack of BUM traffic.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY (1 << 9)
+/* for pim vxlan we need to pin the IIF to lo or MLAG-ISL on the
+ * originating VTEP. This flag allows that by setting IIF to the
+ * value specified and preventing next-hop-tracking on the entry
+ */
+#define PIM_UPSTREAM_FLAG_MASK_STATIC_IIF (1 << 10)
+#define PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL (1 << 11)
+/* Disable pimreg encasulation for a flow */
+#define PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA (1 << 12)
+/* For some MDTs we need to register the router as a source even
+ * if the not DR or directly connected on the IIF. This is typically
+ * needed on a VxLAN-AA (MLAG) setup.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG (1 << 13)
+/* VxLAN origination mroute - SG was registered by EVPN where S is the
+ * local VTEP IP and G is the BUM multicast group address
+ */
+#define PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG (1 << 14)
+/* VxLAN termination mroute - *G entry where G is the BUM multicast group
+ * address
+ */
+#define PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM (1 << 15)
+/* MLAG mroute - synced to the MLAG peer and subject to DF (designated
+ * forwarder) election
+ */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN (1 << 16)
+/* MLAG mroute that lost the DF election with peer and is installed in
+ * a dormant state i.e. MLAG OIFs are removed from the MFC.
+ * In most cases the OIL is empty (but not not always) simply
+ * blackholing the traffic pulled down to the LHR.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF (1 << 17)
+/* MLAG mroute rxed from the peer MLAG switch */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_PEER (1 << 18)
+/*
+ * We are creating a non-joined upstream data structure
+ * for this S,G as that we want to have a channel oil
+ * associated with an upstream
+ */
+#define PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE (1 << 19)
+/* By default as SG entry will use the SPT for forwarding traffic
+ * unless it was setup as a result of a Prune(S,G,rpt) from a
+ * downstream router and has JoinDesired(S,G) as False.
+ * This flag is only relevant for (S,G) entries.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_USE_RPT (1 << 20)
+/* PIM Syncs upstream entries to peer Nodes via MLAG in 2 cases.
+ * one is to support plain PIM Redundancy and another one is to support
+ * PIM REdundancy.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE (1 << 21)
+
+
+#define PIM_UPSTREAM_FLAG_ALL 0xFFFFFFFF
+
+#define PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
+#define PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED_UPDATED(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)
+#define PIM_UPSTREAM_FLAG_TEST_FHR(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_FHR)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_PIM(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_STREAM)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
+#define PIM_UPSTREAM_FLAG_TEST_SEND_SG_RPT_PRUNE(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_LHR(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_TEST_DISABLE_KAT_EXPIRY(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_TEST_ALLOW_IIF_IN_OIL(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_TEST_NO_PIMREG_DATA(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_TEST_FORCE_PIMREG(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_TERM(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN(flags) ((flags) & (PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG | PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM))
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_NOCACHE(flags) ((flags) &PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE)
+#define PIM_UPSTREAM_FLAG_TEST_USE_RPT(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_USE_RPT)
+#define PIM_UPSTREAM_FLAG_TEST_CAN_BE_LHR(flags) ((flags) & (PIM_UPSTREAM_FLAG_MASK_SRC_IGMP | PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM))
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(flags) ((flags)&PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
+
+#define PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
+#define PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED_UPDATED(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)
+#define PIM_UPSTREAM_FLAG_SET_FHR(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_FHR)
+#define PIM_UPSTREAM_FLAG_SET_SRC_IGMP(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+#define PIM_UPSTREAM_FLAG_SET_SRC_PIM(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+#define PIM_UPSTREAM_FLAG_SET_SRC_STREAM(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_STREAM)
+#define PIM_UPSTREAM_FLAG_SET_SRC_MSDP(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
+#define PIM_UPSTREAM_FLAG_SET_SEND_SG_RPT_PRUNE(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
+#define PIM_UPSTREAM_FLAG_SET_SRC_LHR(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_SET_DISABLE_KAT_EXPIRY(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_SET_STATIC_IIF(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_SET_ALLOW_IIF_IN_OIL(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_SET_NO_PIMREG_DATA(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_SET_FORCE_PIMREG(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_ORIG(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_TERM(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_VXLAN(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_PEER(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
+#define PIM_UPSTREAM_FLAG_SET_USE_RPT(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_USE_RPT)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_INTERFACE(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
+
+#define PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
+#define PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED_UPDATED(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)
+#define PIM_UPSTREAM_FLAG_UNSET_FHR(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_FHR)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_IGMP(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_STREAM(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_STREAM)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_MSDP(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
+#define PIM_UPSTREAM_FLAG_UNSET_SEND_SG_RPT_PRUNE(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_LHR(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_UNSET_DISABLE_KAT_EXPIRY(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_UNSET_STATIC_IIF(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_UNSET_ALLOW_IIF_IN_OIL(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_UNSET_NO_PIMREG_DATA(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_UNSET_FORCE_PIMREG(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_VXLAN_ORIG(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_VXLAN_TERM(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_VXLAN(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_NON_DF(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_PEER(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_NOCACHE(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE)
+#define PIM_UPSTREAM_FLAG_UNSET_USE_RPT(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_USE_RPT)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_INTERFACE(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
+
+/* The RPF cost is incremented by 10 if the RPF interface is the peerlink-rif.
+ * This is used to force the MLAG switch with the lowest cost to the RPF
+ * to become the MLAG DF.
+ */
+#define PIM_UPSTREAM_MLAG_PEERLINK_PLUS_METRIC 10
+
+enum pim_upstream_state {
+ PIM_UPSTREAM_NOTJOINED,
+ PIM_UPSTREAM_JOINED,
+};
+
+enum pim_reg_state {
+ PIM_REG_NOINFO,
+ PIM_REG_JOIN,
+ PIM_REG_JOIN_PENDING,
+ PIM_REG_PRUNE,
+};
+
+enum pim_upstream_sptbit {
+ PIM_UPSTREAM_SPTBIT_FALSE,
+ PIM_UPSTREAM_SPTBIT_TRUE
+};
+
+struct pim_up_mlag {
+ /* MRIB.metric(S) from the peer switch. This is used for DF election
+ * and switch with the lowest cost wins.
+ */
+ uint32_t peer_mrib_metric;
+};
+
+PREDECL_RBTREE_UNIQ(rb_pim_upstream);
+/*
+ Upstream (S,G) channel in Joined state
+ (S,G) in the "Not Joined" state is not represented
+ See RFC 4601: 4.5.7. Sending (S,G) Join/Prune Message
+
+ upstream_addr : Who we are talking to.
+ For (*, G), upstream_addr is RP address or INADDR_ANY(if RP not configured)
+ For (S, G), upstream_addr is source address
+
+ rpf: contains the nexthop information to whom we are talking to.
+
+ join_state: JOINED/NOTJOINED
+
+ In the case when FRR receives IGMP/PIM (*, G) join for group G and RP is not
+ configured, then create a pim_upstream with the below information.
+ pim_upstream->upstream address: INADDR_ANY
+ pim_upstream->rpf: Unknown
+ pim_upstream->state: NOTJOINED
+
+ When a new RP gets configured for G, find the corresponding pim upstream (*,G)
+ entries and update the upstream address as new RP address if it the better one
+ for the group G.
+
+ When RP becomes reachable, populate the nexthop information in
+ pim_upstream->rpf and update the state to JOINED.
+
+*/
+struct pim_upstream {
+ struct pim_instance *pim;
+ struct rb_pim_upstream_item upstream_rb;
+ struct pim_upstream *parent;
+ pim_addr upstream_addr; /* Who we are talking to */
+ pim_addr upstream_register; /*Who we received a register from*/
+ pim_sgaddr sg; /* (S,G) group key */
+ char sg_str[PIM_SG_LEN];
+ uint32_t flags;
+ struct channel_oil *channel_oil;
+ struct list *sources;
+ struct list *ifchannels;
+ /* Counter for Dual active ifchannels*/
+ uint32_t dualactive_ifchannel_count;
+
+ enum pim_upstream_state join_state;
+ enum pim_reg_state reg_state;
+ enum pim_upstream_sptbit sptbit;
+
+ int ref_count;
+
+ struct pim_rpf rpf;
+
+ struct pim_up_mlag mlag;
+
+ struct event *t_join_timer;
+
+ /*
+ * RST(S,G)
+ */
+ struct event *t_rs_timer;
+#define PIM_REGISTER_SUPPRESSION_PERIOD (60)
+#define PIM_REGISTER_PROBE_PERIOD (5)
+
+ /*
+ * KAT(S,G)
+ */
+ struct event *t_ka_timer;
+#define PIM_KEEPALIVE_PERIOD (210)
+#define PIM_RP_KEEPALIVE_PERIOD \
+ (3 * router->register_suppress_time + router->register_probe_time)
+
+ /* on the RP we restart a timer to indicate if registers are being rxed
+ * for
+ * SG. This is needed by MSDP to determine its local SA cache */
+ struct event *t_msdp_reg_timer;
+#define PIM_MSDP_REG_RXED_PERIOD (3 * (1.5 * router->register_suppress_time))
+
+ int64_t state_transition; /* Record current state uptime */
+};
+
+static inline bool pim_upstream_is_kat_running(struct pim_upstream *up)
+{
+ return (up->t_ka_timer != NULL);
+}
+
+static inline bool pim_up_mlag_is_local(struct pim_upstream *up)
+{
+ /* XXX: extend this to also return true if the channel-oil has
+ * any AA devices
+ */
+ return (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN
+ | PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE));
+}
+
+struct pim_upstream *pim_upstream_find(struct pim_instance *pim,
+ pim_sgaddr *sg);
+struct pim_upstream *pim_upstream_find_or_add(pim_sgaddr *sg,
+ struct interface *ifp, int flags,
+ const char *name);
+struct pim_upstream *pim_upstream_add(struct pim_instance *pim, pim_sgaddr *sg,
+ struct interface *ifp, int flags,
+ const char *name,
+ struct pim_ifchannel *ch);
+void pim_upstream_ref(struct pim_upstream *up,
+ int flags, const char *name);
+struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
+ struct pim_upstream *up,
+ const char *name);
+
+bool pim_upstream_evaluate_join_desired(struct pim_instance *pim,
+ struct pim_upstream *up);
+int pim_upstream_evaluate_join_desired_interface(struct pim_upstream *up,
+ struct pim_ifchannel *ch,
+ struct pim_ifchannel *starch);
+int pim_upstream_eval_inherit_if(struct pim_upstream *up,
+ struct pim_ifchannel *ch,
+ struct pim_ifchannel *starch);
+void pim_upstream_update_join_desired(struct pim_instance *pim,
+ struct pim_upstream *up);
+
+void pim_update_suppress_timers(uint32_t suppress_time);
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
+ int holdtime);
+
+void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
+ struct pim_upstream *up);
+
+void pim_upstream_join_timer_restart(struct pim_upstream *up,
+ struct pim_rpf *old);
+void pim_upstream_rpf_genid_changed(struct pim_instance *pim,
+ pim_addr neigh_addr);
+void pim_upstream_rpf_interface_changed(struct pim_upstream *up,
+ struct interface *old_rpf_ifp);
+
+void pim_upstream_update_could_assert(struct pim_upstream *up);
+void pim_upstream_update_my_assert_metric(struct pim_upstream *up);
+
+void pim_upstream_keep_alive_timer_start(struct pim_upstream *up,
+ uint32_t time);
+
+int pim_upstream_switch_to_spt_desired_on_rp(struct pim_instance *pim,
+ pim_sgaddr *sg);
+#define SwitchToSptDesiredOnRp(pim, sg) pim_upstream_switch_to_spt_desired_on_rp (pim, sg)
+int pim_upstream_is_sg_rpt(struct pim_upstream *up);
+
+void pim_upstream_set_sptbit(struct pim_upstream *up,
+ struct interface *incoming);
+
+void pim_upstream_start_register_stop_timer(struct pim_upstream *up,
+ int null_register);
+
+void pim_upstream_send_join(struct pim_upstream *up);
+
+void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
+ enum pim_upstream_state new_state);
+
+const char *pim_upstream_state2str(enum pim_upstream_state join_state);
+#define PIM_REG_STATE_STR_LEN 12
+const char *pim_reg_state2str(enum pim_reg_state state, char *state_str,
+ size_t state_str_len);
+
+int pim_upstream_inherited_olist_decide(struct pim_instance *pim,
+ struct pim_upstream *up);
+int pim_upstream_inherited_olist(struct pim_instance *pim,
+ struct pim_upstream *up);
+int pim_upstream_empty_inherited_olist(struct pim_upstream *up);
+
+void pim_upstream_find_new_rpf(struct pim_instance *pim);
+void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up);
+
+void pim_upstream_init(struct pim_instance *pim);
+void pim_upstream_terminate(struct pim_instance *pim);
+
+void join_timer_start(struct pim_upstream *up);
+int pim_upstream_compare(const struct pim_upstream *up1,
+ const struct pim_upstream *up2);
+DECLARE_RBTREE_UNIQ(rb_pim_upstream, struct pim_upstream, upstream_rb,
+ pim_upstream_compare);
+
+void pim_upstream_register_reevaluate(struct pim_instance *pim);
+
+void pim_upstream_add_lhr_star_pimreg(struct pim_instance *pim);
+void pim_upstream_remove_lhr_star_pimreg(struct pim_instance *pim,
+ const char *nlist);
+
+void pim_upstream_spt_prefix_list_update(struct pim_instance *pim,
+ struct prefix_list *pl);
+
+unsigned int pim_upstream_hash_key(const void *arg);
+bool pim_upstream_equal(const void *arg1, const void *arg2);
+struct pim_upstream *pim_upstream_keep_alive_timer_proc(
+ struct pim_upstream *up);
+void pim_upstream_fill_static_iif(struct pim_upstream *up,
+ struct interface *incoming);
+void pim_upstream_update_use_rpt(struct pim_upstream *up,
+ bool update_mroute);
+uint32_t pim_up_mlag_local_cost(struct pim_upstream *up);
+uint32_t pim_up_mlag_peer_cost(struct pim_upstream *up);
+void pim_upstream_reeval_use_rpt(struct pim_instance *pim);
+int pim_upstream_could_register(struct pim_upstream *up);
+#endif /* PIM_UPSTREAM_H */
diff --git a/pimd/pim_util.c b/pimd/pim_util.c
new file mode 100644
index 0000000..657e84a
--- /dev/null
+++ b/pimd/pim_util.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "plist.h"
+
+#include "pim_util.h"
+
+/*
+ RFC 3376: 4.1.7. QQIC (Querier's Query Interval Code)
+
+ If QQIC < 128, QQI = QQIC
+ If QQIC >= 128, QQI = (mant | 0x10) << (exp + 3)
+
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |1| exp | mant |
+ +-+-+-+-+-+-+-+-+
+
+ Since exp=0..7 then (exp+3)=3..10, then QQI has
+ one of the following bit patterns:
+
+ exp=0: QQI = 0000.0000.1MMM.M000
+ exp=1: QQI = 0000.0001.MMMM.0000
+ ...
+ exp=6: QQI = 001M.MMM0.0000.0000
+ exp=7: QQI = 01MM.MM00.0000.0000
+ --------- ---------
+ 0x4 0x0 0x0 0x0
+*/
+uint8_t igmp_msg_encode16to8(uint16_t value)
+{
+ uint8_t code;
+
+ if (value < 128) {
+ code = value;
+ } else {
+ uint16_t mask = 0x4000;
+ uint8_t exp;
+ uint16_t mant;
+ for (exp = 7; exp > 0; --exp) {
+ if (mask & value)
+ break;
+ mask >>= 1;
+ }
+ mant = 0x000F & (value >> (exp + 3));
+ code = ((uint8_t)1 << 7) | ((uint8_t)exp << 4) | (uint8_t)mant;
+ }
+
+ return code;
+}
+
+/*
+ RFC 3376: 4.1.7. QQIC (Querier's Query Interval Code)
+
+ If QQIC < 128, QQI = QQIC
+ If QQIC >= 128, QQI = (mant | 0x10) << (exp + 3)
+
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |1| exp | mant |
+ +-+-+-+-+-+-+-+-+
+*/
+uint16_t igmp_msg_decode8to16(uint8_t code)
+{
+ uint16_t value;
+
+ if (code < 128) {
+ value = code;
+ } else {
+ uint16_t mant = (code & 0x0F);
+ uint8_t exp = (code & 0x70) >> 4;
+ value = (mant | 0x10) << (exp + 3);
+ }
+
+ return value;
+}
+
+void pim_pkt_dump(const char *label, const uint8_t *buf, int size)
+{
+ zlog_debug("%s: pkt dump size=%d", label, size);
+ zlog_hexdump(buf, size);
+}
+
+int pim_is_group_224_0_0_0_24(struct in_addr group_addr)
+{
+ static int first = 1;
+ static struct prefix group_224;
+ struct prefix group;
+
+ if (first) {
+ if (!str2prefix("224.0.0.0/24", &group_224))
+ return 0;
+ first = 0;
+ }
+
+ group.family = AF_INET;
+ group.u.prefix4 = group_addr;
+ group.prefixlen = IPV4_MAX_BITLEN;
+
+ return prefix_match(&group_224, &group);
+}
+
+int pim_is_group_224_4(struct in_addr group_addr)
+{
+ static int first = 1;
+ static struct prefix group_all;
+ struct prefix group;
+
+ if (first) {
+ if (!str2prefix("224.0.0.0/4", &group_all))
+ return 0;
+ first = 0;
+ }
+
+ group.family = AF_INET;
+ group.u.prefix4 = group_addr;
+ group.prefixlen = IPV4_MAX_BITLEN;
+
+ return prefix_match(&group_all, &group);
+}
+
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp)
+{
+ struct prefix grp_pfx;
+ struct prefix_list *pl;
+
+ if (!pim_ifp->boundary_oil_plist)
+ return false;
+
+ pim_addr_to_prefix(&grp_pfx, *grp);
+
+ pl = prefix_list_lookup(PIM_AFI, pim_ifp->boundary_oil_plist);
+ return pl ? prefix_list_apply_ext(pl, NULL, &grp_pfx, true) ==
+ PREFIX_DENY
+ : false;
+}
+
+
+/* This function returns all multicast group */
+int pim_get_all_mcast_group(struct prefix *prefix)
+{
+#if PIM_IPV == 4
+ if (!str2prefix("224.0.0.0/4", prefix))
+ return 0;
+#else
+ if (!str2prefix("FF00::0/8", prefix))
+ return 0;
+#endif
+ return 1;
+}
+
+bool pim_addr_is_multicast(pim_addr addr)
+{
+#if PIM_IPV == 4
+ if (IN_MULTICAST(ntohl(addr.s_addr)))
+ return true;
+#else
+ if (IN6_IS_ADDR_MULTICAST(&addr))
+ return true;
+#endif
+ return false;
+}
diff --git a/pimd/pim_util.h b/pimd/pim_util.h
new file mode 100644
index 0000000..c882fe4
--- /dev/null
+++ b/pimd/pim_util.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_UTIL_H
+#define PIM_UTIL_H
+
+#include <stdint.h>
+
+#include <zebra.h>
+
+#include "checksum.h"
+#include "pimd.h"
+#include "pim_iface.h"
+
+uint8_t igmp_msg_encode16to8(uint16_t value);
+uint16_t igmp_msg_decode8to16(uint8_t code);
+
+void pim_pkt_dump(const char *label, const uint8_t *buf, int size);
+
+int pim_is_group_224_0_0_0_24(struct in_addr group_addr);
+int pim_is_group_224_4(struct in_addr group_addr);
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp);
+int pim_get_all_mcast_group(struct prefix *prefix);
+bool pim_addr_is_multicast(pim_addr addr);
+#endif /* PIM_UTIL_H */
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
new file mode 100644
index 0000000..0f6547e
--- /dev/null
+++ b/pimd/pim_vty.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+#include "linklist.h"
+#include "prefix.h"
+#include "vty.h"
+#include "vrf.h"
+#include "plist.h"
+
+#include "pimd.h"
+#include "pim_vty.h"
+#include "pim_iface.h"
+#include "pim_str.h"
+#include "pim_ssmpingd.h"
+#include "pim_pim.h"
+#include "pim_oil.h"
+#include "pim_static.h"
+#include "pim_rp.h"
+#include "pim_msdp.h"
+#include "pim_ssm.h"
+#include "pim_bfd.h"
+#include "pim_bsm.h"
+#include "pim_vxlan.h"
+#include "pim6_mld.h"
+
+int pim_debug_config_write(struct vty *vty)
+{
+ int writes = 0;
+
+ if (PIM_DEBUG_MSDP_EVENTS) {
+ vty_out(vty, "debug msdp events\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_MSDP_PACKETS) {
+ vty_out(vty, "debug msdp packets\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_MSDP_INTERNAL) {
+ vty_out(vty, "debug msdp internal\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_GM_EVENTS) {
+ vty_out(vty, "debug " GM_AF_DBG " events\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_GM_PACKETS) {
+ vty_out(vty, "debug " GM_AF_DBG " packets\n");
+ ++writes;
+ }
+ /* PIM_DEBUG_GM_TRACE catches _DETAIL too */
+ if (router->debugs & PIM_MASK_GM_TRACE) {
+ vty_out(vty, "debug " GM_AF_DBG " trace\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_GM_TRACE_DETAIL) {
+ vty_out(vty, "debug " GM_AF_DBG " trace detail\n");
+ ++writes;
+ }
+
+ /* PIM_DEBUG_MROUTE catches _DETAIL too */
+ if (router->debugs & PIM_MASK_MROUTE) {
+ vty_out(vty, "debug " PIM_MROUTE_DBG "\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_MROUTE_DETAIL) {
+ vty_out(vty, "debug " PIM_MROUTE_DBG " detail\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_MTRACE) {
+ vty_out(vty, "debug mtrace\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_EVENTS) {
+ vty_out(vty, "debug " PIM_AF_DBG " events\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_PIM_PACKETS) {
+ vty_out(vty, "debug " PIM_AF_DBG " packets\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
+ vty_out(vty, "debug " PIM_AF_DBG " packet-dump send\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_PIM_PACKETDUMP_RECV) {
+ vty_out(vty, "debug " PIM_AF_DBG " packet-dump receive\n");
+ ++writes;
+ }
+
+ /* PIM_DEBUG_PIM_TRACE catches _DETAIL too */
+ if (router->debugs & PIM_MASK_PIM_TRACE) {
+ vty_out(vty, "debug " PIM_AF_DBG " trace\n");
+ ++writes;
+ }
+ if (PIM_DEBUG_PIM_TRACE_DETAIL) {
+ vty_out(vty, "debug " PIM_AF_DBG " trace detail\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_ZEBRA) {
+ vty_out(vty, "debug " PIM_AF_DBG " zebra\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_MLAG) {
+ vty_out(vty, "debug pim mlag\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_BSM) {
+ vty_out(vty, "debug " PIM_AF_DBG " bsm\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_VXLAN) {
+ vty_out(vty, "debug " PIM_AF_DBG " vxlan\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_SSMPINGD) {
+ vty_out(vty, "debug ssmpingd\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_HELLO) {
+ vty_out(vty, "debug " PIM_AF_DBG " packets hello\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_J_P) {
+ vty_out(vty, "debug " PIM_AF_DBG " packets joins\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_REG) {
+ vty_out(vty, "debug " PIM_AF_DBG " packets register\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_STATIC) {
+ vty_out(vty, "debug pim static\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_NHT) {
+ vty_out(vty, "debug " PIM_AF_DBG " nht\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ vty_out(vty, "debug pim nht rp\n");
+ ++writes;
+ }
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL) {
+ vty_out(vty, "debug " PIM_AF_DBG " nht detail\n");
+ ++writes;
+ }
+
+ return writes;
+}
+
+int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
+{
+ int writes = 0;
+ struct pim_ssm *ssm = pim->ssm_info;
+ char spaces[10];
+
+ if (pim->vrf->vrf_id == VRF_DEFAULT)
+ snprintf(spaces, sizeof(spaces), "%s", "");
+ else
+ snprintf(spaces, sizeof(spaces), "%s", " ");
+
+ writes += pim_msdp_peer_config_write(vty, pim, spaces);
+ writes += pim_msdp_config_write(pim, vty, spaces);
+
+ if (!pim->send_v6_secondary) {
+ vty_out(vty, "%sno ip pim send-v6-secondary\n", spaces);
+ ++writes;
+ }
+
+ writes += pim_rp_config_write(pim, vty, spaces);
+
+ if (pim->vrf->vrf_id == VRF_DEFAULT) {
+ if (router->register_suppress_time
+ != PIM_REGISTER_SUPPRESSION_TIME_DEFAULT) {
+ vty_out(vty, "%s" PIM_AF_NAME " pim register-suppress-time %d\n",
+ spaces, router->register_suppress_time);
+ ++writes;
+ }
+ if (router->t_periodic != PIM_DEFAULT_T_PERIODIC) {
+ vty_out(vty, "%s" PIM_AF_NAME " pim join-prune-interval %d\n",
+ spaces, router->t_periodic);
+ ++writes;
+ }
+
+ if (router->packet_process != PIM_DEFAULT_PACKET_PROCESS) {
+ vty_out(vty, "%s" PIM_AF_NAME " pim packets %d\n", spaces,
+ router->packet_process);
+ ++writes;
+ }
+ }
+ if (pim->keep_alive_time != PIM_KEEPALIVE_PERIOD) {
+ vty_out(vty, "%s" PIM_AF_NAME " pim keep-alive-timer %d\n",
+ spaces, pim->keep_alive_time);
+ ++writes;
+ }
+ if (pim->rp_keep_alive_time != (unsigned int)PIM_RP_KEEPALIVE_PERIOD) {
+ vty_out(vty, "%s" PIM_AF_NAME " pim rp keep-alive-timer %d\n",
+ spaces, pim->rp_keep_alive_time);
+ ++writes;
+ }
+ if (ssm->plist_name) {
+ vty_out(vty, "%sip pim ssm prefix-list %s\n", spaces,
+ ssm->plist_name);
+ ++writes;
+ }
+ if (pim->register_plist) {
+ vty_out(vty, "%sip pim register-accept-list %s\n", spaces,
+ pim->register_plist);
+ ++writes;
+ }
+ if (pim->spt.switchover == PIM_SPT_INFINITY) {
+ if (pim->spt.plist)
+ vty_out(vty,
+ "%s" PIM_AF_NAME " pim spt-switchover infinity-and-beyond prefix-list %s\n",
+ spaces, pim->spt.plist);
+ else
+ vty_out(vty,
+ "%s" PIM_AF_NAME " pim spt-switchover infinity-and-beyond\n",
+ spaces);
+ ++writes;
+ }
+ if (pim->ecmp_rebalance_enable) {
+ vty_out(vty, "%sip pim ecmp rebalance\n", spaces);
+ ++writes;
+ } else if (pim->ecmp_enable) {
+ vty_out(vty, "%sip pim ecmp\n", spaces);
+ ++writes;
+ }
+
+ if (pim->gm_watermark_limit != 0) {
+#if PIM_IPV == 4
+ vty_out(vty, "%s" PIM_AF_NAME " igmp watermark-warn %u\n",
+ spaces, pim->gm_watermark_limit);
+#else
+ vty_out(vty, "%s" PIM_AF_NAME " mld watermark-warn %u\n",
+ spaces, pim->gm_watermark_limit);
+#endif
+ ++writes;
+ }
+
+ if (pim->ssmpingd_list) {
+ struct listnode *node;
+ struct ssmpingd_sock *ss;
+ ++writes;
+ for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss)) {
+ vty_out(vty, "%s" PIM_AF_NAME " ssmpingd %pPA\n",
+ spaces, &ss->source_addr);
+ ++writes;
+ }
+ }
+
+ if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME
+ || pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME
+ || pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
+ vty_out(vty, "%sip msdp timers %u %u", spaces,
+ pim->msdp.hold_time, pim->msdp.keep_alive);
+ if (pim->msdp.connection_retry
+ != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
+ vty_out(vty, " %u", pim->msdp.connection_retry);
+ vty_out(vty, "\n");
+ }
+
+ return writes;
+}
+
+#if PIM_IPV == 4
+static int gm_config_write(struct vty *vty, int writes,
+ struct pim_interface *pim_ifp)
+{
+ /* IF ip igmp */
+ if (pim_ifp->gm_enable) {
+ vty_out(vty, " ip igmp\n");
+ ++writes;
+ }
+
+ /* ip igmp version */
+ if (pim_ifp->igmp_version != IGMP_DEFAULT_VERSION) {
+ vty_out(vty, " ip igmp version %d\n", pim_ifp->igmp_version);
+ ++writes;
+ }
+
+ /* IF ip igmp query-max-response-time */
+ if (pim_ifp->gm_query_max_response_time_dsec !=
+ GM_QUERY_MAX_RESPONSE_TIME_DSEC) {
+ vty_out(vty, " ip igmp query-max-response-time %d\n",
+ pim_ifp->gm_query_max_response_time_dsec);
+ ++writes;
+ }
+
+ /* IF ip igmp query-interval */
+ if (pim_ifp->gm_default_query_interval != GM_GENERAL_QUERY_INTERVAL) {
+ vty_out(vty, " ip igmp query-interval %d\n",
+ pim_ifp->gm_default_query_interval);
+ ++writes;
+ }
+
+ /* IF ip igmp last-member_query-count */
+ if (pim_ifp->gm_last_member_query_count !=
+ GM_DEFAULT_ROBUSTNESS_VARIABLE) {
+ vty_out(vty, " ip igmp last-member-query-count %d\n",
+ pim_ifp->gm_last_member_query_count);
+ ++writes;
+ }
+
+ /* IF ip igmp last-member_query-interval */
+ if (pim_ifp->gm_specific_query_max_response_time_dsec !=
+ GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC) {
+ vty_out(vty, " ip igmp last-member-query-interval %d\n",
+ pim_ifp->gm_specific_query_max_response_time_dsec);
+ ++writes;
+ }
+
+ /* IF ip igmp join */
+ if (pim_ifp->gm_join_list) {
+ struct listnode *node;
+ struct gm_join *ij;
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
+ if (pim_addr_is_any(ij->source_addr))
+ vty_out(vty, " ip igmp join %pPAs\n",
+ &ij->group_addr);
+ else
+ vty_out(vty, " ip igmp join %pPAs %pPAs\n",
+ &ij->group_addr, &ij->source_addr);
+ ++writes;
+ }
+ }
+
+ return writes;
+}
+#else
+static int gm_config_write(struct vty *vty, int writes,
+ struct pim_interface *pim_ifp)
+{
+ /* IF ipv6 mld */
+ if (pim_ifp->gm_enable) {
+ vty_out(vty, " ipv6 mld\n");
+ ++writes;
+ }
+
+ if (pim_ifp->mld_version != MLD_DEFAULT_VERSION)
+ vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version);
+
+ /* IF ipv6 mld query-max-response-time */
+ if (pim_ifp->gm_query_max_response_time_dsec !=
+ GM_QUERY_MAX_RESPONSE_TIME_DSEC)
+ vty_out(vty, " ipv6 mld query-max-response-time %d\n",
+ pim_ifp->gm_query_max_response_time_dsec);
+
+ if (pim_ifp->gm_default_query_interval != GM_GENERAL_QUERY_INTERVAL)
+ vty_out(vty, " ipv6 mld query-interval %d\n",
+ pim_ifp->gm_default_query_interval);
+
+ /* IF ipv6 mld last-member_query-count */
+ if (pim_ifp->gm_last_member_query_count !=
+ GM_DEFAULT_ROBUSTNESS_VARIABLE)
+ vty_out(vty, " ipv6 mld last-member-query-count %d\n",
+ pim_ifp->gm_last_member_query_count);
+
+ /* IF ipv6 mld last-member_query-interval */
+ if (pim_ifp->gm_specific_query_max_response_time_dsec !=
+ GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC)
+ vty_out(vty, " ipv6 mld last-member-query-interval %d\n",
+ pim_ifp->gm_specific_query_max_response_time_dsec);
+
+ /* IF ipv6 mld join */
+ if (pim_ifp->gm_join_list) {
+ struct listnode *node;
+ struct gm_join *ij;
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
+ if (pim_addr_is_any(ij->source_addr))
+ vty_out(vty, " ipv6 mld join %pPAs\n",
+ &ij->group_addr);
+ else
+ vty_out(vty, " ipv6 mld join %pPAs %pPAs\n",
+ &ij->group_addr, &ij->source_addr);
+ ++writes;
+ }
+ }
+
+ return writes;
+}
+#endif
+
+int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
+ struct pim_instance *pim)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp->pim_enable) {
+ vty_out(vty, " " PIM_AF_NAME " pim\n");
+ ++writes;
+ }
+
+ /* IF ip pim drpriority */
+ if (pim_ifp->pim_dr_priority != PIM_DEFAULT_DR_PRIORITY) {
+ vty_out(vty, " " PIM_AF_NAME " pim drpriority %u\n",
+ pim_ifp->pim_dr_priority);
+ ++writes;
+ }
+
+ /* IF ip pim hello */
+ if (pim_ifp->pim_hello_period != PIM_DEFAULT_HELLO_PERIOD) {
+ vty_out(vty, " " PIM_AF_NAME " pim hello %d", pim_ifp->pim_hello_period);
+ if (pim_ifp->pim_default_holdtime != -1)
+ vty_out(vty, " %d", pim_ifp->pim_default_holdtime);
+ vty_out(vty, "\n");
+ ++writes;
+ }
+
+ writes += gm_config_write(vty, writes, pim_ifp);
+
+ /* update source */
+ if (!pim_addr_is_any(pim_ifp->update_source)) {
+ vty_out(vty, " " PIM_AF_NAME " pim use-source %pPA\n",
+ &pim_ifp->update_source);
+ ++writes;
+ }
+
+ if (pim_ifp->activeactive)
+ vty_out(vty, " " PIM_AF_NAME " pim active-active\n");
+
+ /* boundary */
+ if (pim_ifp->boundary_oil_plist) {
+ vty_out(vty, " " PIM_AF_NAME " multicast boundary oil %s\n",
+ pim_ifp->boundary_oil_plist);
+ ++writes;
+ }
+
+ if (pim_ifp->pim_passive_enable) {
+ vty_out(vty, " " PIM_AF_NAME " pim passive\n");
+ ++writes;
+ }
+
+ writes += pim_static_write_mroute(pim, vty, ifp);
+ pim_bsm_write_config(vty, ifp);
+ ++writes;
+ pim_bfd_write_config(vty, ifp);
+ ++writes;
+
+ return writes;
+}
+
+int pim_interface_config_write(struct vty *vty)
+{
+ struct pim_instance *pim;
+ struct interface *ifp;
+ struct vrf *vrf;
+ int writes = 0;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ if (!pim)
+ continue;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ /* pim is enabled internally/implicitly on the vxlan
+ * termination device ipmr-lo. skip displaying that
+ * config to avoid confusion
+ */
+ if (pim_vxlan_is_term_dev_cfg(pim, ifp))
+ continue;
+
+ /* IF name */
+ if_vty_config_start(vty, ifp);
+
+ ++writes;
+
+ if (ifp->desc) {
+ vty_out(vty, " description %s\n", ifp->desc);
+ ++writes;
+ }
+
+ if (ifp->info) {
+ pim_config_write(vty, writes, ifp, pim);
+ }
+ if_vty_config_end(vty);
+
+ ++writes;
+ }
+ }
+
+ return writes;
+}
diff --git a/pimd/pim_vty.h b/pimd/pim_vty.h
new file mode 100644
index 0000000..84155af
--- /dev/null
+++ b/pimd/pim_vty.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_VTY_H
+#define PIM_VTY_H
+
+#include "vty.h"
+
+struct pim_instance;
+
+int pim_debug_config_write(struct vty *vty);
+int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty);
+int pim_interface_config_write(struct vty *vty);
+int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
+ struct pim_instance *pim);
+#endif /* PIM_VTY_H */
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
new file mode 100644
index 0000000..9650da8
--- /dev/null
+++ b/pimd/pim_vxlan.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ */
+
+#include <zebra.h>
+
+#include <hash.h>
+#include <jhash.h>
+#include <log.h>
+#include <prefix.h>
+#include <vrf.h>
+
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_memory.h"
+#include "pim_oil.h"
+#include "pim_register.h"
+#include "pim_str.h"
+#include "pim_upstream.h"
+#include "pim_ifchannel.h"
+#include "pim_nht.h"
+#include "pim_zebra.h"
+#include "pim_vxlan.h"
+#include "pim_mlag.h"
+
+/* pim-vxlan global info */
+struct pim_vxlan vxlan_info, *pim_vxlan_p = &vxlan_info;
+
+static void pim_vxlan_work_timer_setup(bool start);
+static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
+ struct interface *ifp);
+
+/*
+ * The rp info has gone from no path to having a
+ * path. Let's immediately send out the null pim register
+ * as that else we will be sitting for up to 60 seconds waiting
+ * for it too pop. Which is not cool.
+ */
+void pim_vxlan_rp_info_is_alive(struct pim_instance *pim,
+ struct pim_rpf *rpg_changed)
+{
+ struct listnode *listnode;
+ struct pim_vxlan_sg *vxlan_sg;
+ struct pim_rpf *rpg;
+
+ /*
+ * No vxlan here, move along, nothing to see
+ */
+ if (!vxlan_info.work_list)
+ return;
+
+ for (listnode = vxlan_info.work_list->head; listnode;
+ listnode = listnode->next) {
+ vxlan_sg = listgetdata(listnode);
+
+ rpg = RP(pim, vxlan_sg->up->sg.grp);
+
+ /*
+ * If the rp is the same we should send
+ */
+ if (rpg == rpg_changed) {
+ zlog_debug("VXLAN RP INFO is alive sending");
+ pim_null_register_send(vxlan_sg->up);
+ }
+ }
+}
+
+/*************************** vxlan work list **********************************
+ * A work list is maintained for staggered generation of pim null register
+ * messages for vxlan SG entries that are in a reg_join state.
+ *
+ * A max of 500 NULL registers are generated at one shot. If paused reg
+ * generation continues on the next second and so on till all register
+ * messages have been sent out. And the process is restarted every 60s.
+ *
+ * purpose of this null register generation is to setup the SPT and maintain
+ * independent of the presence of overlay BUM traffic.
+ ****************************************************************************/
+static void pim_vxlan_do_reg_work(void)
+{
+ struct listnode *listnode;
+ int work_cnt = 0;
+ struct pim_vxlan_sg *vxlan_sg;
+ static int sec_count;
+
+ ++sec_count;
+
+ if (sec_count > PIM_VXLAN_NULL_REG_INTERVAL) {
+ sec_count = 0;
+ listnode = vxlan_info.next_work ?
+ vxlan_info.next_work :
+ vxlan_info.work_list->head;
+ if (PIM_DEBUG_VXLAN && listnode)
+ zlog_debug("vxlan SG work %s",
+ vxlan_info.next_work ? "continues" : "starts");
+ } else {
+ listnode = vxlan_info.next_work;
+ }
+
+ for (; listnode; listnode = listnode->next) {
+ vxlan_sg = (struct pim_vxlan_sg *)listnode->data;
+
+ if (vxlan_sg->up && (vxlan_sg->up->reg_state == PIM_REG_JOIN)) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s periodic NULL register",
+ vxlan_sg->sg_str);
+
+ /*
+ * If we are on the work queue *and* the rpf
+ * has been lost on the vxlan_sg->up let's
+ * make sure that we don't send it.
+ */
+ if (vxlan_sg->up->rpf.source_nexthop.interface) {
+ pim_null_register_send(vxlan_sg->up);
+ ++work_cnt;
+ }
+ }
+
+ if (work_cnt > vxlan_info.max_work_cnt) {
+ vxlan_info.next_work = listnode->next;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %d work items proc and pause",
+ work_cnt);
+ return;
+ }
+ }
+
+ if (work_cnt) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %d work items proc", work_cnt);
+ }
+ vxlan_info.next_work = NULL;
+}
+
+/* Staggered work related info is initialized when the first work comes
+ * along
+ */
+static void pim_vxlan_init_work(void)
+{
+ if (vxlan_info.flags & PIM_VXLANF_WORK_INITED)
+ return;
+
+ vxlan_info.max_work_cnt = PIM_VXLAN_WORK_MAX;
+ vxlan_info.flags |= PIM_VXLANF_WORK_INITED;
+ vxlan_info.work_list = list_new();
+ pim_vxlan_work_timer_setup(true/* start */);
+}
+
+static void pim_vxlan_add_work(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (vxlan_sg->flags & PIM_VXLAN_SGF_DEL_IN_PROG) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s skip work list; del-in-prog",
+ vxlan_sg->sg_str);
+ return;
+ }
+
+ pim_vxlan_init_work();
+
+ /* already a part of the work list */
+ if (vxlan_sg->work_node)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s work list add",
+ vxlan_sg->sg_str);
+ vxlan_sg->work_node = listnode_add(vxlan_info.work_list, vxlan_sg);
+ /* XXX: adjust max_work_cnt if needed */
+}
+
+static void pim_vxlan_del_work(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!vxlan_sg->work_node)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s work list del",
+ vxlan_sg->sg_str);
+
+ if (vxlan_sg->work_node == vxlan_info.next_work)
+ vxlan_info.next_work = vxlan_sg->work_node->next;
+
+ list_delete_node(vxlan_info.work_list, vxlan_sg->work_node);
+ vxlan_sg->work_node = NULL;
+}
+
+void pim_vxlan_update_sg_reg_state(struct pim_instance *pim,
+ struct pim_upstream *up, bool reg_join)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, &up->sg);
+ if (!vxlan_sg)
+ return;
+
+ /* add the vxlan sg entry to a work list for periodic reg joins.
+ * the entry will stay in the list as long as the register state is
+ * PIM_REG_JOIN
+ */
+ if (reg_join)
+ pim_vxlan_add_work(vxlan_sg);
+ else
+ pim_vxlan_del_work(vxlan_sg);
+}
+
+static void pim_vxlan_work_timer_cb(struct event *t)
+{
+ pim_vxlan_do_reg_work();
+ pim_vxlan_work_timer_setup(true /* start */);
+}
+
+/* global 1second timer used for periodic processing */
+static void pim_vxlan_work_timer_setup(bool start)
+{
+ EVENT_OFF(vxlan_info.work_timer);
+ if (start)
+ event_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
+ PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
+}
+
+/**************************** vxlan origination mroutes ***********************
+ * For every (local-vtep-ip, bum-mcast-grp) registered by evpn an origination
+ * mroute is setup by pimd. The purpose of this mroute is to forward vxlan
+ * encapsulated BUM (broadcast, unknown-unicast and unknown-multicast packets
+ * over the underlay.)
+ *
+ * Sample mroute (single VTEP):
+ * (27.0.0.7, 239.1.1.100) Iif: lo Oifs: uplink-1
+ *
+ * Sample mroute (anycast VTEP):
+ * (36.0.0.9, 239.1.1.100) Iif: peerlink-3.4094\
+ * Oifs: peerlink-3.4094 uplink-1
+ ***************************************************************************/
+static void pim_vxlan_orig_mr_up_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up = vxlan_sg->up;
+
+ if (!up)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up del",
+ vxlan_sg->sg_str);
+
+ vxlan_sg->up = NULL;
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG) {
+ /* clear out all the vxlan properties */
+ up->flags &= ~(PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG |
+ PIM_UPSTREAM_FLAG_MASK_STATIC_IIF |
+ PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY |
+ PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG |
+ PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA |
+ PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL);
+
+ /* We bring things to a grinding halt by force expirying
+ * the kat. Doing this will also remove the reference we
+ * created as a "vxlan" source and delete the upstream entry
+ * if there are no other references.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
+ EVENT_OFF(up->t_ka_timer);
+ up = pim_upstream_keep_alive_timer_proc(up);
+ } else {
+ /* this is really unexpected as we force vxlan
+ * origination mroutes active sources but just in
+ * case
+ */
+ up = pim_upstream_del(vxlan_sg->pim, up, __func__);
+ }
+ /* if there are other references register the source
+ * for nht
+ */
+ if (up) {
+ enum pim_rpf_result r;
+
+ r = pim_rpf_update(vxlan_sg->pim, up, NULL, __func__);
+ if (r == PIM_RPF_FAILURE) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug(
+ "vxlan SG %s rpf_update failure",
+ vxlan_sg->sg_str);
+ }
+ }
+ }
+}
+
+static void pim_vxlan_orig_mr_up_iif_update(struct pim_vxlan_sg *vxlan_sg)
+{
+ /* update MFC with the new IIF */
+ pim_upstream_fill_static_iif(vxlan_sg->up, vxlan_sg->iif);
+ pim_upstream_mroute_iif_update(vxlan_sg->up->channel_oil, __func__);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up updated with iif %s",
+ vxlan_sg->sg_str,
+ vxlan_sg->iif?vxlan_sg->iif->name:"-");
+
+}
+
+/* For every VxLAN BUM multicast group we setup a SG-up that has the following
+ * "forced properties" -
+ * 1. Directly connected on a DR interface i.e. we must act as an FHR
+ * 2. We prime the pump i.e. no multicast data is needed to register this
+ * source with the FHR. To do that we send periodic null registers if
+ * the SG entry is in a register-join state. We also prevent expiry of
+ * KAT.
+ * 3. As this SG is setup without data there is no need to register encapsulate
+ * data traffic. This encapsulation is explicitly skipped for the following
+ * reasons -
+ * a) Many levels of encapsulation are needed creating MTU disc challenges.
+ * Overlay BUM is encapsulated in a vxlan/UDP/IP header and then
+ * encapsulated again in a pim-register header.
+ * b) On a vxlan-aa setup both switches rx a copy of each BUM packet. if
+ * they both reg encapsulated traffic the RP will accept the duplicates
+ * as there are no RPF checks for this encapsulated data.
+ * a), b) can be workarounded if needed, but there is really no need because
+ * of (2) i.e. the pump is primed without data.
+ */
+static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up;
+ struct pim_interface *term_ifp;
+ int flags = 0;
+ struct pim_instance *pim = vxlan_sg->pim;
+
+ if (vxlan_sg->up) {
+ /* nothing to do */
+ return;
+ }
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up add with iif %s",
+ vxlan_sg->sg_str,
+ vxlan_sg->iif?vxlan_sg->iif->name:"-");
+
+ PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_ORIG(flags);
+ /* pin the IIF to lo or peerlink-subinterface and disable NHT */
+ PIM_UPSTREAM_FLAG_SET_STATIC_IIF(flags);
+ /* Fake traffic by setting SRC_STREAM and starting KAT */
+ /* We intentionally skip updating ref count for SRC_STREAM/FHR.
+ * Setting SRC_VXLAN should have already created a reference
+ * preventing the entry from being deleted
+ */
+ PIM_UPSTREAM_FLAG_SET_FHR(flags);
+ PIM_UPSTREAM_FLAG_SET_SRC_STREAM(flags);
+ /* Force pimreg even if non-DR. This is needed on a MLAG setup for
+ * VxLAN AA
+ */
+ PIM_UPSTREAM_FLAG_SET_FORCE_PIMREG(flags);
+ /* prevent KAT expiry. we want the MDT setup even if there is no BUM
+ * traffic
+ */
+ PIM_UPSTREAM_FLAG_SET_DISABLE_KAT_EXPIRY(flags);
+ /* SPT for vxlan BUM groups is primed and maintained via NULL
+ * registers so there is no need to reg-encapsulate
+ * vxlan-encapsulated overlay data traffic
+ */
+ PIM_UPSTREAM_FLAG_SET_NO_PIMREG_DATA(flags);
+ /* On a MLAG setup we force a copy to the MLAG peer while also
+ * accepting traffic from the peer. To do this we set peerlink-rif as
+ * the IIF and also add it to the OIL
+ */
+ PIM_UPSTREAM_FLAG_SET_ALLOW_IIF_IN_OIL(flags);
+
+ /* XXX: todo: defer pim_upstream add if pim is not enabled on the iif */
+ up = pim_upstream_find(vxlan_sg->pim, &vxlan_sg->sg);
+ if (up) {
+ /* if the iif is set to something other than the vxlan_sg->iif
+ * we must dereg the old nexthop and force to new "static"
+ * iif
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
+ pim_delete_tracked_nexthop(vxlan_sg->pim,
+ up->upstream_addr, up, NULL);
+ }
+ /* We are acting FHR; clear out use_rpt setting if any */
+ pim_upstream_update_use_rpt(up, false /*update_mroute*/);
+ pim_upstream_ref(up, flags, __func__);
+ vxlan_sg->up = up;
+ term_ifp = pim_vxlan_get_term_ifp(pim);
+ /* mute termination device on origination mroutes */
+ if (term_ifp)
+ pim_channel_update_oif_mute(up->channel_oil,
+ term_ifp);
+ pim_vxlan_orig_mr_up_iif_update(vxlan_sg);
+ /* mute pimreg on origination mroutes */
+ if (pim->regiface)
+ pim_channel_update_oif_mute(up->channel_oil,
+ pim->regiface->info);
+ } else {
+ up = pim_upstream_add(vxlan_sg->pim, &vxlan_sg->sg,
+ vxlan_sg->iif, flags, __func__, NULL);
+ vxlan_sg->up = up;
+ }
+
+ if (!up) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up add failed",
+ vxlan_sg->sg_str);
+ return;
+ }
+
+ pim_upstream_keep_alive_timer_start(up, vxlan_sg->pim->keep_alive_time);
+
+ /* register the source with the RP */
+ switch (up->reg_state) {
+
+ case PIM_REG_NOINFO:
+ pim_register_join(up);
+ pim_null_register_send(up);
+ break;
+
+ case PIM_REG_JOIN:
+ /* if the pim upstream entry is already in reg-join state
+ * send null_register right away and add to the register
+ * worklist
+ */
+ pim_null_register_send(up);
+ pim_vxlan_update_sg_reg_state(pim, up, true);
+ break;
+
+ case PIM_REG_JOIN_PENDING:
+ case PIM_REG_PRUNE:
+ break;
+ }
+
+ /* update the inherited OIL */
+ pim_upstream_inherited_olist(vxlan_sg->pim, up);
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+}
+
+static void pim_vxlan_orig_mr_oif_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!vxlan_sg->up || !vxlan_sg->orig_oif)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s add",
+ vxlan_sg->sg_str, vxlan_sg->orig_oif->name);
+
+ vxlan_sg->flags |= PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_channel_add_oif(vxlan_sg->up->channel_oil,
+ vxlan_sg->orig_oif, PIM_OIF_FLAG_PROTO_VXLAN,
+ __func__);
+}
+
+static void pim_vxlan_orig_mr_oif_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct interface *orig_oif;
+
+ orig_oif = vxlan_sg->orig_oif;
+ vxlan_sg->orig_oif = NULL;
+
+ if (!(vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s del",
+ vxlan_sg->sg_str, orig_oif->name);
+
+ vxlan_sg->flags &= ~PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_channel_del_oif(vxlan_sg->up->channel_oil,
+ orig_oif, PIM_OIF_FLAG_PROTO_VXLAN, __func__);
+}
+
+static inline struct interface *pim_vxlan_orig_mr_oif_get(
+ struct pim_instance *pim)
+{
+ return (vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) ?
+ pim->vxlan.peerlink_rif : NULL;
+}
+
+/* Single VTEPs: IIF for the vxlan-origination-mroutes is lo or vrf-dev (if
+ * the mroute is in a non-default vrf).
+ * Anycast VTEPs: IIF is the MLAG ISL/peerlink.
+ */
+static inline struct interface *pim_vxlan_orig_mr_iif_get(
+ struct pim_instance *pim)
+{
+ return ((vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) &&
+ pim->vxlan.peerlink_rif) ?
+ pim->vxlan.peerlink_rif : pim->vxlan.default_iif;
+}
+
+static bool pim_vxlan_orig_mr_add_is_ok(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_interface *pim_ifp;
+
+ vxlan_sg->iif = pim_vxlan_orig_mr_iif_get(vxlan_sg->pim);
+ if (!vxlan_sg->iif)
+ return false;
+
+ pim_ifp = (struct pim_interface *)vxlan_sg->iif->info;
+ if (!pim_ifp || (pim_ifp->mroute_vif_index < 0))
+ return false;
+
+ return true;
+}
+
+static void pim_vxlan_orig_mr_install(struct pim_vxlan_sg *vxlan_sg)
+{
+ pim_vxlan_orig_mr_up_add(vxlan_sg);
+
+ vxlan_sg->orig_oif = pim_vxlan_orig_mr_oif_get(vxlan_sg->pim);
+ pim_vxlan_orig_mr_oif_add(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!pim_vxlan_orig_mr_add_is_ok(vxlan_sg))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig-mr add", vxlan_sg->sg_str);
+
+ pim_vxlan_orig_mr_install(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig-mr del", vxlan_sg->sg_str);
+
+ pim_vxlan_orig_mr_oif_del(vxlan_sg);
+ pim_vxlan_orig_mr_up_del(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_iif_update(struct hash_bucket *bucket, void *arg)
+{
+ struct interface *ifp;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
+ struct interface *old_iif = vxlan_sg->iif;
+
+ if (!pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ ifp = pim_vxlan_orig_mr_iif_get(vxlan_sg->pim);
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s iif changed from %s to %s",
+ vxlan_sg->sg_str,
+ old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ if (pim_vxlan_orig_mr_add_is_ok(vxlan_sg)) {
+ if (vxlan_sg->up) {
+ /* upstream exists but iif changed */
+ pim_vxlan_orig_mr_up_iif_update(vxlan_sg);
+ } else {
+ /* install mroute */
+ pim_vxlan_orig_mr_install(vxlan_sg);
+ }
+ } else {
+ pim_vxlan_orig_mr_del(vxlan_sg);
+ }
+}
+
+/**************************** vxlan termination mroutes ***********************
+ * For every bum-mcast-grp registered by evpn a *G termination
+ * mroute is setup by pimd. The purpose of this mroute is to pull down vxlan
+ * packets with the bum-mcast-grp dip from the underlay and terminate the
+ * tunnel. This is done by including the vxlan termination device (ipmr-lo) in
+ * its OIL. The vxlan de-capsulated packets are subject to subsequent overlay
+ * bridging.
+ *
+ * Sample mroute:
+ * (0.0.0.0, 239.1.1.100) Iif: uplink-1 Oifs: ipmr-lo, uplink-1
+ *****************************************************************************/
+struct pim_interface *pim_vxlan_get_term_ifp(struct pim_instance *pim)
+{
+ return pim->vxlan.term_if ?
+ (struct pim_interface *)pim->vxlan.term_if->info : NULL;
+}
+
+static void pim_vxlan_term_mr_oif_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term-oif %s add",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+
+ if (pim_ifchannel_local_membership_add(vxlan_sg->term_oif,
+ &vxlan_sg->sg, true /*is_vxlan */)) {
+ vxlan_sg->flags |= PIM_VXLAN_SGF_OIF_INSTALLED;
+ /* update the inherited OIL */
+ /* XXX - I don't see the inherited OIL updated when a local
+ * member is added. And that probably needs to be fixed. Till
+ * that happens we do a force update on the inherited OIL
+ * here.
+ */
+ pim_upstream_inherited_olist(vxlan_sg->pim, vxlan_sg->up);
+ } else {
+ zlog_warn("vxlan SG %s term-oif %s add failed",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+ }
+}
+
+static void pim_vxlan_term_mr_oif_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!(vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s del",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+
+ vxlan_sg->flags &= ~PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_ifchannel_local_membership_del(vxlan_sg->term_oif, &vxlan_sg->sg);
+ /* update the inherited OIL */
+ /* XXX - I don't see the inherited OIL updated when a local member
+ * is deleted. And that probably needs to be fixed. Till that happens
+ * we do a force update on the inherited OIL here.
+ */
+ pim_upstream_inherited_olist(vxlan_sg->pim, vxlan_sg->up);
+}
+
+static void pim_vxlan_update_sg_entry_mlag(struct pim_instance *pim,
+ struct pim_upstream *up, bool inherit)
+{
+ bool is_df = true;
+
+ if (inherit && up->parent &&
+ PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->parent->flags) &&
+ PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->parent->flags))
+ is_df = false;
+
+ pim_mlag_up_df_role_update(pim, up, is_df, "inherit_xg_df");
+}
+
+/* We run MLAG DF election only on mroutes that have the termination
+ * device ipmr-lo in the immediate OIL. This is only (*, G) entries at the
+ * moment. For (S, G) entries that (with ipmr-lo in the inherited OIL) we
+ * inherit the DF role from the (*, G) entry.
+ */
+void pim_vxlan_inherit_mlag_flags(struct pim_instance *pim,
+ struct pim_upstream *up, bool inherit)
+{
+ struct listnode *listnode;
+ struct pim_upstream *child;
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, listnode,
+ child)) {
+ pim_vxlan_update_sg_entry_mlag(pim,
+ child, true /* inherit */);
+ }
+}
+
+static void pim_vxlan_term_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up;
+ int flags = 0;
+
+ if (vxlan_sg->up) {
+ /* nothing to do */
+ return;
+ }
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute-up add",
+ vxlan_sg->sg_str);
+
+ PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_TERM(flags);
+ /* enable MLAG designated-forwarder election on termination mroutes */
+ PIM_UPSTREAM_FLAG_SET_MLAG_VXLAN(flags);
+
+ up = pim_upstream_add(vxlan_sg->pim, &vxlan_sg->sg, NULL /* iif */,
+ flags, __func__, NULL);
+ vxlan_sg->up = up;
+
+ if (!up) {
+ zlog_warn("vxlan SG %s term mroute-up add failed",
+ vxlan_sg->sg_str);
+ return;
+ }
+
+ /* update existing SG entries with the parent's MLAG flag */
+ pim_vxlan_inherit_mlag_flags(vxlan_sg->pim, up, true /*enable*/);
+}
+
+static void pim_vxlan_term_mr_up_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up = vxlan_sg->up;
+
+ if (!up)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute-up del",
+ vxlan_sg->sg_str);
+ vxlan_sg->up = NULL;
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM) {
+ /* update SG entries that are inheriting from this XG entry */
+ pim_vxlan_inherit_mlag_flags(vxlan_sg->pim, up,
+ false /*enable*/);
+ /* clear out all the vxlan related flags */
+ up->flags &= ~(PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM |
+ PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN);
+ pim_mlag_up_local_del(vxlan_sg->pim, up);
+ pim_upstream_del(vxlan_sg->pim, up, __func__);
+ }
+}
+
+static void pim_vxlan_term_mr_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute add", vxlan_sg->sg_str);
+
+ vxlan_sg->term_oif = vxlan_sg->pim->vxlan.term_if;
+ if (!vxlan_sg->term_oif)
+ /* defer termination mroute till we have a termination device */
+ return;
+
+ pim_vxlan_term_mr_up_add(vxlan_sg);
+ /* set up local membership for the term-oif */
+ pim_vxlan_term_mr_oif_add(vxlan_sg);
+}
+
+static void pim_vxlan_term_mr_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute del", vxlan_sg->sg_str);
+
+ /* remove local membership associated with the term oif */
+ pim_vxlan_term_mr_oif_del(vxlan_sg);
+ /* remove references to the upstream entry */
+ pim_vxlan_term_mr_up_del(vxlan_sg);
+}
+
+/************************** vxlan SG cache management ************************/
+static unsigned int pim_vxlan_sg_hash_key_make(const void *p)
+{
+ const struct pim_vxlan_sg *vxlan_sg = p;
+
+ return pim_sgaddr_hash(vxlan_sg->sg, 0);
+}
+
+static bool pim_vxlan_sg_hash_eq(const void *p1, const void *p2)
+{
+ const struct pim_vxlan_sg *sg1 = p1;
+ const struct pim_vxlan_sg *sg2 = p2;
+
+ return !pim_sgaddr_cmp(sg1->sg, sg2->sg);
+}
+
+static struct pim_vxlan_sg *pim_vxlan_sg_new(struct pim_instance *pim,
+ pim_sgaddr *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = XCALLOC(MTYPE_PIM_VXLAN_SG, sizeof(*vxlan_sg));
+
+ vxlan_sg->pim = pim;
+ vxlan_sg->sg = *sg;
+ snprintfrr(vxlan_sg->sg_str, sizeof(vxlan_sg->sg_str), "%pSG", sg);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s alloc", vxlan_sg->sg_str);
+
+ vxlan_sg = hash_get(pim->vxlan.sg_hash, vxlan_sg, hash_alloc_intern);
+
+ /* we register with the MLAG daemon in the first VxLAN SG and never
+ * de-register during that life of the pimd
+ */
+ if (pim->vxlan.sg_hash->count == 1) {
+ vxlan_mlag.flags |= PIM_VXLAN_MLAGF_DO_REG;
+ pim_mlag_register();
+ }
+
+ return vxlan_sg;
+}
+
+struct pim_vxlan_sg *pim_vxlan_sg_find(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct pim_vxlan_sg lookup;
+
+ lookup.sg = *sg;
+ return hash_lookup(pim->vxlan.sg_hash, &lookup);
+}
+
+struct pim_vxlan_sg *pim_vxlan_sg_add(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, sg);
+ if (vxlan_sg)
+ return vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_new(pim, sg);
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ pim_vxlan_orig_mr_add(vxlan_sg);
+ else
+ pim_vxlan_term_mr_add(vxlan_sg);
+
+ return vxlan_sg;
+}
+
+static void pim_vxlan_sg_del_item(struct pim_vxlan_sg *vxlan_sg)
+{
+ vxlan_sg->flags |= PIM_VXLAN_SGF_DEL_IN_PROG;
+
+ pim_vxlan_del_work(vxlan_sg);
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ pim_vxlan_orig_mr_del(vxlan_sg);
+ else
+ pim_vxlan_term_mr_del(vxlan_sg);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s free", vxlan_sg->sg_str);
+
+ XFREE(MTYPE_PIM_VXLAN_SG, vxlan_sg);
+}
+
+void pim_vxlan_sg_del(struct pim_instance *pim, pim_sgaddr *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, sg);
+ if (!vxlan_sg)
+ return;
+
+ hash_release(pim->vxlan.sg_hash, vxlan_sg);
+ pim_vxlan_sg_del_item(vxlan_sg);
+}
+
+/******************************* MLAG handling *******************************/
+bool pim_vxlan_do_mlag_reg(void)
+{
+ return (vxlan_mlag.flags & PIM_VXLAN_MLAGF_DO_REG);
+}
+
+/* The peerlink sub-interface is added as an OIF to the origination-mroute.
+ * This is done to send a copy of the multicast-vxlan encapsulated traffic
+ * to the MLAG peer which may mroute it over the underlay if there are any
+ * interested receivers.
+ */
+static void pim_vxlan_sg_peerlink_oif_update(struct hash_bucket *bucket,
+ void *arg)
+{
+ struct interface *new_oif = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
+
+ if (!pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ if (vxlan_sg->orig_oif == new_oif)
+ return;
+
+ pim_vxlan_orig_mr_oif_del(vxlan_sg);
+
+ vxlan_sg->orig_oif = new_oif;
+ pim_vxlan_orig_mr_oif_add(vxlan_sg);
+}
+
+/* In the case of anycast VTEPs the VTEP-PIP must be used as the
+ * register source.
+ */
+bool pim_vxlan_get_register_src(struct pim_instance *pim,
+ struct pim_upstream *up, struct in_addr *src_p)
+{
+ if (!(vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED))
+ return true;
+
+ /* if address is not available suppress the pim-register */
+ if (vxlan_mlag.reg_addr.s_addr == INADDR_ANY)
+ return false;
+
+ *src_p = vxlan_mlag.reg_addr;
+ return true;
+}
+
+void pim_vxlan_mlag_update(bool enable, bool peer_state, uint32_t role,
+ struct interface *peerlink_rif,
+ struct in_addr *reg_addr)
+{
+ struct pim_instance *pim;
+ char addr_buf[INET_ADDRSTRLEN];
+ struct pim_interface *pim_ifp = NULL;
+
+ if (PIM_DEBUG_VXLAN) {
+ inet_ntop(AF_INET, reg_addr,
+ addr_buf, INET_ADDRSTRLEN);
+ zlog_debug("vxlan MLAG update %s state %s role %d rif %s addr %s",
+ enable ? "enable" : "disable",
+ peer_state ? "up" : "down",
+ role,
+ peerlink_rif ? peerlink_rif->name : "-",
+ addr_buf);
+ }
+
+ /* XXX: for now vxlan termination is only possible in the default VRF
+ * when that changes this will need to change to iterate all VRFs
+ */
+ pim = pim_get_pim_instance(VRF_DEFAULT);
+
+ if (!pim) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: Unable to find pim instance", __func__);
+ return;
+ }
+
+ if (enable)
+ vxlan_mlag.flags |= PIM_VXLAN_MLAGF_ENABLED;
+ else
+ vxlan_mlag.flags &= ~PIM_VXLAN_MLAGF_ENABLED;
+
+ if (vxlan_mlag.peerlink_rif != peerlink_rif)
+ vxlan_mlag.peerlink_rif = peerlink_rif;
+
+ vxlan_mlag.reg_addr = *reg_addr;
+ vxlan_mlag.peer_state = peer_state;
+ vxlan_mlag.role = role;
+
+ /* process changes */
+ if (vxlan_mlag.peerlink_rif)
+ pim_ifp = (struct pim_interface *)vxlan_mlag.peerlink_rif->info;
+ if ((vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) &&
+ pim_ifp && (pim_ifp->mroute_vif_index > 0))
+ pim_vxlan_set_peerlink_rif(pim, peerlink_rif);
+ else
+ pim_vxlan_set_peerlink_rif(pim, NULL);
+}
+
+/****************************** misc callbacks *******************************/
+static void pim_vxlan_set_default_iif(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct interface *old_iif;
+
+ if (pim->vxlan.default_iif == ifp)
+ return;
+
+ old_iif = pim->vxlan.default_iif;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan default iif changed from %s to %s",
+ __func__, old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ old_iif = pim_vxlan_orig_mr_iif_get(pim);
+ pim->vxlan.default_iif = ifp;
+ ifp = pim_vxlan_orig_mr_iif_get(pim);
+ if (old_iif == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan orig iif changed from %s to %s", __func__,
+ old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ /* add/del upstream entries for the existing vxlan SG when the
+ * interface becomes available
+ */
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_orig_mr_iif_update, NULL);
+}
+
+static void pim_vxlan_up_cost_update(struct pim_instance *pim,
+ struct pim_upstream *up,
+ struct interface *old_peerlink_rif)
+{
+ if (!PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->flags))
+ return;
+
+ if (up->rpf.source_nexthop.interface &&
+ ((up->rpf.source_nexthop.interface ==
+ pim->vxlan.peerlink_rif) ||
+ (up->rpf.source_nexthop.interface ==
+ old_peerlink_rif))) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("RPF cost adjust for %s on peerlink-rif (old: %s, new: %s) change",
+ up->sg_str,
+ old_peerlink_rif ?
+ old_peerlink_rif->name : "-",
+ pim->vxlan.peerlink_rif ?
+ pim->vxlan.peerlink_rif->name : "-");
+ pim_mlag_up_local_add(pim, up);
+ }
+}
+
+static void pim_vxlan_term_mr_cost_update(struct hash_bucket *bucket, void *arg)
+{
+ struct interface *old_peerlink_rif = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
+ struct pim_upstream *up;
+ struct listnode *listnode;
+ struct pim_upstream *child;
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ /* Lookup all XG and SG entries with RPF-interface peerlink_rif */
+ up = vxlan_sg->up;
+ if (!up)
+ return;
+
+ pim_vxlan_up_cost_update(vxlan_sg->pim, up,
+ old_peerlink_rif);
+
+ for (ALL_LIST_ELEMENTS_RO(up->sources, listnode,
+ child))
+ pim_vxlan_up_cost_update(vxlan_sg->pim, child,
+ old_peerlink_rif);
+}
+
+static void pim_vxlan_sg_peerlink_rif_update(struct hash_bucket *bucket,
+ void *arg)
+{
+ pim_vxlan_orig_mr_iif_update(bucket, NULL);
+ pim_vxlan_term_mr_cost_update(bucket, arg);
+}
+
+static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct interface *old_iif;
+ struct interface *new_iif;
+ struct interface *old_oif;
+ struct interface *new_oif;
+
+ if (pim->vxlan.peerlink_rif == ifp)
+ return;
+
+ old_iif = pim->vxlan.peerlink_rif;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan peerlink_rif changed from %s to %s",
+ __func__, old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ old_iif = pim_vxlan_orig_mr_iif_get(pim);
+ old_oif = pim_vxlan_orig_mr_oif_get(pim);
+ pim->vxlan.peerlink_rif = ifp;
+
+ new_iif = pim_vxlan_orig_mr_iif_get(pim);
+ if (old_iif != new_iif) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan orig iif changed from %s to %s",
+ __func__, old_iif ? old_iif->name : "-",
+ new_iif ? new_iif->name : "-");
+
+ /* add/del upstream entries for the existing vxlan SG when the
+ * interface becomes available
+ */
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_sg_peerlink_rif_update,
+ old_iif);
+ }
+
+ new_oif = pim_vxlan_orig_mr_oif_get(pim);
+ if (old_oif != new_oif) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan orig oif changed from %s to %s",
+ __func__, old_oif ? old_oif->name : "-",
+ new_oif ? new_oif->name : "-");
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_sg_peerlink_oif_update,
+ new_oif);
+ }
+}
+
+static void pim_vxlan_term_mr_oif_update(struct hash_bucket *bucket, void *arg)
+{
+ struct interface *ifp = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data;
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ if (vxlan_sg->term_oif == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term oif changed from %s to %s",
+ vxlan_sg->sg_str,
+ vxlan_sg->term_oif ? vxlan_sg->term_oif->name : "-",
+ ifp ? ifp->name : "-");
+
+ pim_vxlan_term_mr_del(vxlan_sg);
+ vxlan_sg->term_oif = ifp;
+ pim_vxlan_term_mr_add(vxlan_sg);
+}
+
+static void pim_vxlan_term_oif_update(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ if (pim->vxlan.term_if == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan term oif changed from %s to %s",
+ pim->vxlan.term_if ? pim->vxlan.term_if->name : "-",
+ ifp ? ifp->name : "-");
+
+ pim->vxlan.term_if = ifp;
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_term_mr_oif_update, ifp);
+}
+
+void pim_vxlan_add_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+
+ if (pim->vrf->vrf_id != VRF_DEFAULT)
+ return;
+
+ if (if_is_loopback(ifp))
+ pim_vxlan_set_default_iif(pim, ifp);
+
+ if (vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED &&
+ (ifp == vxlan_mlag.peerlink_rif))
+ pim_vxlan_set_peerlink_rif(pim, ifp);
+
+ if (pim->vxlan.term_if_cfg == ifp)
+ pim_vxlan_term_oif_update(pim, ifp);
+}
+
+void pim_vxlan_del_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+
+ if (pim->vrf->vrf_id != VRF_DEFAULT)
+ return;
+
+ if (pim->vxlan.default_iif == ifp)
+ pim_vxlan_set_default_iif(pim, NULL);
+
+ if (pim->vxlan.peerlink_rif == ifp)
+ pim_vxlan_set_peerlink_rif(pim, NULL);
+
+ if (pim->vxlan.term_if == ifp)
+ pim_vxlan_term_oif_update(pim, NULL);
+}
+
+/* enable pim implicitly on the termination device add */
+void pim_vxlan_add_term_dev(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ if (pim->vxlan.term_if_cfg == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan term oif cfg changed from %s to %s",
+ pim->vxlan.term_if_cfg ?
+ pim->vxlan.term_if_cfg->name : "-",
+ ifp->name);
+
+ pim->vxlan.term_if_cfg = ifp;
+
+ /* enable pim on the term ifp */
+ pim_ifp = (struct pim_interface *)ifp->info;
+ if (pim_ifp) {
+ pim_ifp->pim_enable = true;
+ /* ifp is already oper up; activate it as a term dev */
+ if (pim_ifp->mroute_vif_index >= 0)
+ pim_vxlan_term_oif_update(pim, ifp);
+ } else {
+ /* ensure that pimreg exists before using the newly created
+ * vxlan termination device
+ */
+ pim_if_create_pimreg(pim);
+ (void)pim_if_new(ifp, false /*igmp*/, true /*pim*/,
+ false /*pimreg*/, true /*vxlan_term*/);
+ }
+}
+
+/* disable pim implicitly, if needed, on the termination device deletion */
+void pim_vxlan_del_term_dev(struct pim_instance *pim)
+{
+ struct interface *ifp = pim->vxlan.term_if_cfg;
+ struct pim_interface *pim_ifp;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan term oif cfg changed from %s to -",
+ ifp->name);
+
+ pim->vxlan.term_if_cfg = NULL;
+
+ pim_ifp = (struct pim_interface *)ifp->info;
+ if (pim_ifp) {
+ pim_ifp->pim_enable = false;
+ if (!pim_ifp->gm_enable)
+ pim_if_delete(ifp);
+ }
+}
+
+void pim_vxlan_init(struct pim_instance *pim)
+{
+ char hash_name[64];
+
+ snprintf(hash_name, sizeof(hash_name),
+ "PIM %s vxlan SG hash", pim->vrf->name);
+ pim->vxlan.sg_hash = hash_create(pim_vxlan_sg_hash_key_make,
+ pim_vxlan_sg_hash_eq, hash_name);
+}
+
+void pim_vxlan_exit(struct pim_instance *pim)
+{
+ hash_clean_and_free(&pim->vxlan.sg_hash,
+ (void (*)(void *))pim_vxlan_sg_del_item);
+}
+
+void pim_vxlan_terminate(void)
+{
+ pim_vxlan_work_timer_setup(false);
+}
diff --git a/pimd/pim_vxlan.h b/pimd/pim_vxlan.h
new file mode 100644
index 0000000..5039bf6
--- /dev/null
+++ b/pimd/pim_vxlan.h
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ */
+
+#ifndef PIM_VXLAN_H
+#define PIM_VXLAN_H
+
+#include "pim_instance.h"
+
+/* global timer used for miscellaneous staggered processing */
+#define PIM_VXLAN_WORK_TIME 1
+/* number of SG entries processed at one shot */
+#define PIM_VXLAN_WORK_MAX 500
+/* frequency of periodic NULL registers */
+#define PIM_VXLAN_NULL_REG_INTERVAL 60 /* seconds */
+
+#define vxlan_mlag (vxlan_info.mlag)
+
+enum pim_vxlan_sg_flags {
+ PIM_VXLAN_SGF_NONE = 0,
+ PIM_VXLAN_SGF_DEL_IN_PROG = (1 << 0),
+ PIM_VXLAN_SGF_OIF_INSTALLED = (1 << 1)
+};
+
+struct pim_vxlan_sg {
+ struct pim_instance *pim;
+
+ /* key */
+ pim_sgaddr sg;
+ char sg_str[PIM_SG_LEN];
+
+ enum pim_vxlan_sg_flags flags;
+ struct pim_upstream *up;
+ struct listnode *work_node; /* to pim_vxlan.work_list */
+
+ /* termination info (only applicable to termination XG mroutes)
+ * term_if - termination device ipmr-lo is added to the OIL
+ * as local/IGMP membership to allow termination of vxlan traffic
+ */
+ struct interface *term_oif;
+
+ /* origination info
+ * iif - lo/vrf or peerlink (on MLAG setups)
+ * peerlink_oif - added to the OIL to send encapsulated BUM traffic to
+ * the MLAG peer switch
+ */
+ struct interface *iif;
+ /* on a MLAG setup the peerlink is added as a static OIF */
+ struct interface *orig_oif;
+};
+
+enum pim_vxlan_mlag_flags {
+ PIM_VXLAN_MLAGF_NONE = 0,
+ PIM_VXLAN_MLAGF_ENABLED = (1 << 0),
+ PIM_VXLAN_MLAGF_DO_REG = (1 << 1)
+};
+
+struct pim_vxlan_mlag {
+ enum pim_vxlan_mlag_flags flags;
+ /* XXX - remove this variable from here */
+ int role;
+ bool peer_state;
+ /* routed interface setup on top of MLAG peerlink */
+ struct interface *peerlink_rif;
+ struct in_addr reg_addr;
+};
+
+enum pim_vxlan_flags {
+ PIM_VXLANF_NONE = 0,
+ PIM_VXLANF_WORK_INITED = (1 << 0)
+};
+
+struct pim_vxlan {
+ enum pim_vxlan_flags flags;
+
+ struct event *work_timer;
+ struct list *work_list;
+ struct listnode *next_work;
+ int max_work_cnt;
+
+ struct pim_vxlan_mlag mlag;
+};
+
+/* zebra adds-
+ * 1. one (S, G) entry where S=local-VTEP-IP and G==BUM-mcast-grp for
+ * each BUM MDT. This is the origination entry.
+ * 2. and one (*, G) entry each MDT. This is the termination place holder.
+ *
+ * Note: This doesn't mean that only (*, G) mroutes are used for tunnel
+ * termination. (S, G) mroutes with ipmr-lo in the OIL can also be
+ * used for tunnel termiation if SPT switchover happens; however such
+ * SG entries are created by traffic and will NOT be a part of the vxlan SG
+ * database.
+ */
+static inline bool pim_vxlan_is_orig_mroute(struct pim_vxlan_sg *vxlan_sg)
+{
+ return !pim_addr_is_any(vxlan_sg->sg.src);
+}
+
+static inline bool pim_vxlan_is_local_sip(struct pim_upstream *up)
+{
+ return !pim_addr_is_any(up->sg.src) &&
+ up->rpf.source_nexthop.interface &&
+ if_is_loopback(up->rpf.source_nexthop.interface);
+}
+
+static inline bool pim_vxlan_is_term_dev_cfg(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ return pim->vxlan.term_if_cfg == ifp;
+}
+
+extern struct pim_vxlan *pim_vxlan_p;
+extern struct pim_vxlan_sg *pim_vxlan_sg_find(struct pim_instance *pim,
+ pim_sgaddr *sg);
+extern struct pim_vxlan_sg *pim_vxlan_sg_add(struct pim_instance *pim,
+ pim_sgaddr *sg);
+extern void pim_vxlan_sg_del(struct pim_instance *pim, pim_sgaddr *sg);
+extern void pim_vxlan_update_sg_reg_state(struct pim_instance *pim,
+ struct pim_upstream *up, bool reg_join);
+extern struct pim_interface *pim_vxlan_get_term_ifp(struct pim_instance *pim);
+extern void pim_vxlan_add_vif(struct interface *ifp);
+extern void pim_vxlan_del_vif(struct interface *ifp);
+extern void pim_vxlan_add_term_dev(struct pim_instance *pim,
+ struct interface *ifp);
+extern void pim_vxlan_del_term_dev(struct pim_instance *pim);
+extern bool pim_vxlan_get_register_src(struct pim_instance *pim,
+ struct pim_upstream *up, struct in_addr *src_p);
+extern void pim_vxlan_mlag_update(bool enable, bool peer_state, uint32_t role,
+ struct interface *peerlink_rif,
+ struct in_addr *reg_addr);
+extern bool pim_vxlan_do_mlag_reg(void);
+extern void pim_vxlan_inherit_mlag_flags(struct pim_instance *pim,
+ struct pim_upstream *up, bool inherit);
+
+extern void pim_vxlan_rp_info_is_alive(struct pim_instance *pim,
+ struct pim_rpf *rpg_changed);
+
+/* Shutdown of PIM stop the thread */
+extern void pim_vxlan_terminate(void);
+#endif /* PIM_VXLAN_H */
diff --git a/pimd/pim_vxlan_instance.h b/pimd/pim_vxlan_instance.h
new file mode 100644
index 0000000..65a5955
--- /dev/null
+++ b/pimd/pim_vxlan_instance.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ *
+ */
+
+#ifndef PIM_VXLAN_INSTANCE_H
+#define PIM_VXLAN_INSTANCE_H
+
+/* pim termination device is expected to include the substring ipmr-lo */
+#define PIM_VXLAN_TERM_DEV_NAME "ipmr-lo"
+
+struct pim_vxlan_instance {
+ struct hash *sg_hash;
+
+ /* this is lo for default instance and vrf-dev for non-default
+ * instances
+ */
+ struct interface *default_iif;
+
+ /* In a MLAG/VxLAN-AA setup the peerlink sub-interface (ISL-rif) is
+ * used as the IIF in
+ */
+ struct interface *peerlink_rif;
+
+ /* device used by the dataplane to terminate multicast encapsulated
+ * vxlan traffic
+ */
+ struct interface *term_if_cfg;
+ struct interface *term_if;
+};
+
+extern void pim_vxlan_init(struct pim_instance *pim);
+extern void pim_vxlan_exit(struct pim_instance *pim);
+
+#endif /* PIM_VXLAN_INSTANCE_H */
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
new file mode 100644
index 0000000..62f00db
--- /dev/null
+++ b/pimd/pim_zebra.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "if.h"
+#include "log.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "stream.h"
+#include "network.h"
+#include "vty.h"
+#include "plist.h"
+#include "lib/bfd.h"
+
+#include "pimd.h"
+#include "pim_pim.h"
+#include "pim_zebra.h"
+#include "pim_iface.h"
+#include "pim_str.h"
+#include "pim_oil.h"
+#include "pim_rpf.h"
+#include "pim_time.h"
+#include "pim_join.h"
+#include "pim_zlookup.h"
+#include "pim_ifchannel.h"
+#include "pim_rp.h"
+#include "pim_igmpv3.h"
+#include "pim_jp_agg.h"
+#include "pim_nht.h"
+#include "pim_ssm.h"
+#include "pim_vxlan.h"
+#include "pim_mlag.h"
+
+#undef PIM_DEBUG_IFADDR_DUMP
+#define PIM_DEBUG_IFADDR_DUMP
+
+struct zclient *zclient;
+
+
+/* Router-id update message from zebra. */
+static int pim_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
+{
+ struct prefix router_id;
+
+ zebra_router_id_update_read(zclient->ibuf, &router_id);
+
+ return 0;
+}
+
+#ifdef PIM_DEBUG_IFADDR_DUMP
+static void dump_if_address(struct interface *ifp)
+{
+ struct connected *ifc;
+ struct listnode *node;
+
+ zlog_debug("%s %s: interface %s addresses:", __FILE__, __func__,
+ ifp->name);
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
+ struct prefix *p = ifc->address;
+
+ if (p->family != AF_INET)
+ continue;
+
+ zlog_debug("%s %s: interface %s address %pI4 %s", __FILE__,
+ __func__, ifp->name, &p->u.prefix4,
+ CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)
+ ? "secondary"
+ : "primary");
+ }
+}
+#endif
+
+static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS)
+{
+ struct connected *c;
+ struct prefix *p;
+ struct pim_interface *pim_ifp;
+
+ /*
+ zebra api notifies address adds/dels events by using the same call
+ interface_add_read below, see comments in lib/zclient.c
+
+ zebra_interface_address_read(ZEBRA_INTERFACE_ADDRESS_ADD, ...)
+ will add address to interface list by calling
+ connected_add_by_prefix()
+ */
+ c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
+ if (!c)
+ return 0;
+
+ pim_ifp = c->ifp->info;
+ p = c->address;
+
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug("%s: %s(%u) connected IP address %pFX flags %u %s",
+ __func__, c->ifp->name, vrf_id, p, c->flags,
+ CHECK_FLAG(c->flags, ZEBRA_IFA_SECONDARY)
+ ? "secondary"
+ : "primary");
+
+#ifdef PIM_DEBUG_IFADDR_DUMP
+ dump_if_address(c->ifp);
+#endif
+ }
+
+#if PIM_IPV == 4
+ if (p->family != PIM_AF)
+ SET_FLAG(c->flags, ZEBRA_IFA_SECONDARY);
+ else if (!CHECK_FLAG(c->flags, ZEBRA_IFA_SECONDARY)) {
+ /* trying to add primary address? */
+ pim_addr primary_addr = pim_find_primary_addr(c->ifp);
+ pim_addr addr = pim_addr_from_prefix(p);
+
+ if (pim_addr_cmp(primary_addr, addr)) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_warn(
+ "%s: %s : forcing secondary flag on %pFX",
+ __func__, c->ifp->name, p);
+ SET_FLAG(c->flags, ZEBRA_IFA_SECONDARY);
+ }
+ }
+#else /* PIM_IPV != 4 */
+ if (p->family != PIM_AF)
+ return 0;
+#endif
+
+ pim_if_addr_add(c);
+ if (pim_ifp) {
+ struct pim_instance *pim;
+
+ pim = pim_get_pim_instance(vrf_id);
+ if (!pim) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: Unable to find pim instance",
+ __func__);
+ return 0;
+ }
+
+ pim_ifp->pim = pim;
+
+ pim_rp_check_on_if_add(pim_ifp);
+ }
+
+ if (if_is_loopback(c->ifp)) {
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (!if_is_loopback(ifp) && if_is_operative(ifp))
+ pim_if_addr_add_all(ifp);
+ }
+ }
+ return 0;
+}
+
+static int pim_zebra_if_address_del(ZAPI_CALLBACK_ARGS)
+{
+ struct connected *c;
+ struct prefix *p;
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+
+ if (!vrf)
+ return 0;
+
+ /*
+ zebra api notifies address adds/dels events by using the same call
+ interface_add_read below, see comments in lib/zclient.c
+
+ zebra_interface_address_read(ZEBRA_INTERFACE_ADDRESS_DELETE, ...)
+ will remove address from interface list by calling
+ connected_delete_by_prefix()
+ */
+ c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
+ if (!c)
+ return 0;
+
+ p = c->address;
+
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug(
+ "%s: %s(%u) disconnected IP address %pFX flags %u %s",
+ __func__, c->ifp->name, vrf_id, p, c->flags,
+ CHECK_FLAG(c->flags, ZEBRA_IFA_SECONDARY)
+ ? "secondary"
+ : "primary");
+#ifdef PIM_DEBUG_IFADDR_DUMP
+ dump_if_address(c->ifp);
+#endif
+ }
+
+ if (p->family == PIM_AF) {
+ struct pim_instance *pim;
+
+ pim = vrf->info;
+ pim_if_addr_del(c, 0);
+ pim_rp_setup(pim);
+ pim_i_am_rp_re_evaluate(pim);
+ }
+
+ connected_free(&c);
+ return 0;
+}
+
+void pim_zebra_update_all_interfaces(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_iface_upstream_switch *us;
+ struct listnode *node;
+
+ if (!pim_ifp)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->upstream_switch_list, node,
+ us)) {
+ struct pim_rpf rpf;
+
+ rpf.source_nexthop.interface = ifp;
+ rpf.rpf_addr = us->address;
+ pim_joinprune_send(&rpf, us->us);
+ pim_jp_agg_clear_group(us->us);
+ }
+ }
+}
+
+void pim_zebra_upstream_rpf_changed(struct pim_instance *pim,
+ struct pim_upstream *up,
+ struct pim_rpf *old)
+{
+ if (old->source_nexthop.interface) {
+ struct pim_neighbor *nbr;
+
+ nbr = pim_neighbor_find(old->source_nexthop.interface,
+ old->rpf_addr, true);
+
+ if (nbr)
+ pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
+
+ /*
+ * We have detected a case where we might need
+ * to rescan the inherited o_list so do it.
+ */
+ if (up->channel_oil->oil_inherited_rescan) {
+ pim_upstream_inherited_olist_decide(pim, up);
+ up->channel_oil->oil_inherited_rescan = 0;
+ }
+
+ if (up->join_state == PIM_UPSTREAM_JOINED) {
+ /*
+ * If we come up real fast we can be here
+ * where the mroute has not been installed
+ * so install it.
+ */
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil,
+ __func__);
+
+ /*
+ * RFC 4601: 4.5.7. Sending (S,G)
+ * Join/Prune Messages
+ *
+ * Transitions from Joined State
+ *
+ * RPF'(S,G) changes not due to an Assert
+ *
+ * The upstream (S,G) state machine remains
+ * in Joined state. Send Join(S,G) to the new
+ * upstream neighbor, which is the new value
+ * of RPF'(S,G). Send Prune(S,G) to the old
+ * upstream neighbor, which is the old value
+ * of RPF'(S,G). Set the Join Timer (JT) to
+ * expire after t_periodic seconds.
+ */
+ pim_jp_agg_switch_interface(old, &up->rpf, up);
+
+ pim_upstream_join_timer_restart(up, old);
+ } /* up->join_state == PIM_UPSTREAM_JOINED */
+ }
+
+ else {
+ /*
+ * We have detected a case where we might need
+ * to rescan the inherited o_list so do it.
+ */
+ if (up->channel_oil->oil_inherited_rescan) {
+ pim_upstream_inherited_olist_decide(pim, up);
+ up->channel_oil->oil_inherited_rescan = 0;
+ }
+
+ if (up->join_state == PIM_UPSTREAM_JOINED)
+ pim_jp_agg_switch_interface(old, &up->rpf, up);
+
+ if (!up->channel_oil->installed)
+ pim_upstream_mroute_add(up->channel_oil, __func__);
+ }
+
+ /* FIXME can join_desired actually be changed by pim_rpf_update()
+ * returning PIM_RPF_CHANGED ?
+ */
+ pim_upstream_update_join_desired(pim, up);
+}
+
+__attribute__((unused))
+static int pim_zebra_vxlan_sg_proc(ZAPI_CALLBACK_ARGS)
+{
+ struct stream *s;
+ struct pim_instance *pim;
+ pim_sgaddr sg;
+ size_t prefixlen;
+
+ pim = pim_get_pim_instance(vrf_id);
+ if (!pim)
+ return 0;
+
+ s = zclient->ibuf;
+
+ prefixlen = stream_getl(s);
+ stream_get(&sg.src, s, prefixlen);
+ stream_get(&sg.grp, s, prefixlen);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%u:recv SG %s %pSG", vrf_id,
+ (cmd == ZEBRA_VXLAN_SG_ADD) ? "add" : "del", &sg);
+
+ if (cmd == ZEBRA_VXLAN_SG_ADD)
+ pim_vxlan_sg_add(pim, &sg);
+ else
+ pim_vxlan_sg_del(pim, &sg);
+
+ return 0;
+}
+
+__attribute__((unused))
+static void pim_zebra_vxlan_replay(void)
+{
+ struct stream *s = NULL;
+
+ /* Check socket. */
+ if (!zclient || zclient->sock < 0)
+ return;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_VXLAN_SG_REPLAY, VRF_DEFAULT);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ zclient_send_message(zclient);
+}
+
+void pim_scan_oil(struct pim_instance *pim)
+{
+ struct channel_oil *c_oil;
+
+ pim->scan_oil_last = pim_time_monotonic_sec();
+ ++pim->scan_oil_events;
+
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
+ pim_upstream_mroute_iif_update(c_oil, __func__);
+}
+
+static void on_rpf_cache_refresh(struct event *t)
+{
+ struct pim_instance *pim = EVENT_ARG(t);
+
+ /* update kernel multicast forwarding cache (MFC) */
+ pim_scan_oil(pim);
+
+ pim->rpf_cache_refresh_last = pim_time_monotonic_sec();
+ ++pim->rpf_cache_refresh_events;
+
+ // It is called as part of pim_neighbor_add
+ // pim_rp_setup ();
+}
+
+void sched_rpf_cache_refresh(struct pim_instance *pim)
+{
+ ++pim->rpf_cache_refresh_requests;
+
+ pim_rpf_set_refresh_time(pim);
+
+ if (pim->rpf_cache_refresher) {
+ /* Refresh timer is already running */
+ return;
+ }
+
+ /* Start refresh timer */
+
+ if (PIM_DEBUG_ZEBRA) {
+ zlog_debug("%s: triggering %ld msec timer", __func__,
+ router->rpf_cache_refresh_delay_msec);
+ }
+
+ event_add_timer_msec(router->master, on_rpf_cache_refresh, pim,
+ router->rpf_cache_refresh_delay_msec,
+ &pim->rpf_cache_refresher);
+}
+
+static void pim_zebra_connected(struct zclient *zclient)
+{
+#if PIM_IPV == 4
+ /* Send the client registration */
+ bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, router->vrf_id);
+#endif
+
+ zclient_send_reg_requests(zclient, router->vrf_id);
+
+#if PIM_IPV == 4
+ /* request for VxLAN BUM group addresses */
+ pim_zebra_vxlan_replay();
+#endif
+}
+
+static void pim_zebra_capabilities(struct zclient_capabilities *cap)
+{
+ router->mlag_role = cap->role;
+ router->multipath = cap->ecmp;
+}
+
+static zclient_handler *const pim_handlers[] = {
+ [ZEBRA_INTERFACE_ADDRESS_ADD] = pim_zebra_if_address_add,
+ [ZEBRA_INTERFACE_ADDRESS_DELETE] = pim_zebra_if_address_del,
+
+ [ZEBRA_NEXTHOP_UPDATE] = pim_parse_nexthop_update,
+ [ZEBRA_ROUTER_ID_UPDATE] = pim_router_id_update_zebra,
+
+#if PIM_IPV == 4
+ [ZEBRA_VXLAN_SG_ADD] = pim_zebra_vxlan_sg_proc,
+ [ZEBRA_VXLAN_SG_DEL] = pim_zebra_vxlan_sg_proc,
+
+ [ZEBRA_MLAG_PROCESS_UP] = pim_zebra_mlag_process_up,
+ [ZEBRA_MLAG_PROCESS_DOWN] = pim_zebra_mlag_process_down,
+ [ZEBRA_MLAG_FORWARD_MSG] = pim_zebra_mlag_handle_msg,
+#endif
+};
+
+void pim_zebra_init(void)
+{
+ /* Socket for receiving updates from Zebra daemon */
+ zclient = zclient_new(router->master, &zclient_options_default,
+ pim_handlers, array_size(pim_handlers));
+
+ zclient->zebra_capabilities = pim_zebra_capabilities;
+ zclient->zebra_connected = pim_zebra_connected;
+
+ zclient_init(zclient, ZEBRA_ROUTE_PIM, 0, &pimd_privs);
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_notice("%s: zclient socket initialized", __func__);
+ }
+
+ zclient_lookup_new();
+}
+
+void pim_forward_start(struct pim_ifchannel *ch)
+{
+ struct pim_upstream *up = ch->upstream;
+ uint32_t mask = 0;
+
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: (S,G)=%pSG oif=%s (%pPA)", __func__, &ch->sg,
+ ch->interface->name, &up->upstream_addr);
+
+ if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags))
+ mask = PIM_OIF_FLAG_PROTO_GM;
+
+ if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))
+ mask |= PIM_OIF_FLAG_PROTO_PIM;
+
+ pim_channel_add_oif(up->channel_oil, ch->interface,
+ mask, __func__);
+}
+
+void pim_forward_stop(struct pim_ifchannel *ch)
+{
+ struct pim_upstream *up = ch->upstream;
+
+ if (PIM_DEBUG_PIM_TRACE) {
+ zlog_debug("%s: (S,G)=%s oif=%s installed: %d",
+ __func__, ch->sg_str, ch->interface->name,
+ up->channel_oil->installed);
+ }
+
+ /*
+ * If a channel is being removed, check to see if we still need
+ * to inherit the interface. If so make sure it is added in
+ */
+ if (pim_upstream_evaluate_join_desired_interface(up, ch, ch->parent))
+ pim_channel_add_oif(up->channel_oil, ch->interface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ else
+ pim_channel_del_oif(up->channel_oil, ch->interface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+}
+
+void pim_zebra_zclient_update(struct vty *vty)
+{
+ vty_out(vty, "Zclient update socket: ");
+
+ if (zclient) {
+ vty_out(vty, "%d failures=%d\n", zclient->sock, zclient->fail);
+ } else {
+ vty_out(vty, "<null zclient>\n");
+ }
+}
+
+struct zclient *pim_zebra_zclient_get(void)
+{
+ if (zclient)
+ return zclient;
+ else
+ return NULL;
+}
+
+void pim_zebra_interface_set_master(struct interface *vrf,
+ struct interface *ifp)
+{
+ zclient_interface_set_master(zclient, vrf, ifp);
+}
diff --git a/pimd/pim_zebra.h b/pimd/pim_zebra.h
new file mode 100644
index 0000000..0ef63f2
--- /dev/null
+++ b/pimd/pim_zebra.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_ZEBRA_H
+#define PIM_ZEBRA_H
+
+#include <zebra.h>
+#include "zclient.h"
+
+#include "pim_ifchannel.h"
+
+void pim_zebra_init(void);
+void pim_zebra_zclient_update(struct vty *vty);
+
+void pim_scan_oil(struct pim_instance *pim_matcher);
+
+void pim_forward_start(struct pim_ifchannel *ch);
+void pim_forward_stop(struct pim_ifchannel *ch);
+
+void sched_rpf_cache_refresh(struct pim_instance *pim);
+struct zclient *pim_zebra_zclient_get(void);
+
+void pim_zebra_update_all_interfaces(struct pim_instance *pim);
+void pim_zebra_upstream_rpf_changed(struct pim_instance *pim,
+ struct pim_upstream *up,
+ struct pim_rpf *old);
+
+void pim_zebra_interface_set_master(struct interface *vrf,
+ struct interface *ifp);
+#endif /* PIM_ZEBRA_H */
diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c
new file mode 100644
index 0000000..6a026f9
--- /dev/null
+++ b/pimd/pim_zlookup.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "stream.h"
+#include "network.h"
+#include "frrevent.h"
+#include "prefix.h"
+#include "vty.h"
+#include "lib_errors.h"
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_iface.h"
+#include "pim_neighbor.h"
+#include "pim_pim.h"
+#include "pim_str.h"
+#include "pim_oil.h"
+#include "pim_zlookup.h"
+#include "pim_addr.h"
+
+static struct zclient *zlookup = NULL;
+struct event *zlookup_read;
+
+static void zclient_lookup_sched(struct zclient *zlookup, int delay);
+static void zclient_lookup_read_pipe(struct event *thread);
+
+/* Connect to zebra for nexthop lookup. */
+static void zclient_lookup_connect(struct event *t)
+{
+ struct zclient *zlookup;
+
+ zlookup = EVENT_ARG(t);
+
+ if (zlookup->sock >= 0) {
+ return;
+ }
+
+ if (zclient_socket_connect(zlookup) < 0) {
+ ++zlookup->fail;
+ zlog_warn("%s: failure connecting zclient socket: failures=%d",
+ __func__, zlookup->fail);
+ } else {
+ zlookup->fail = 0; /* reset counter on connection */
+ }
+
+ if (zclient_send_hello(zlookup) == ZCLIENT_SEND_FAILURE) {
+ if (close(zlookup->sock)) {
+ zlog_warn("%s: closing fd=%d: errno=%d %s", __func__,
+ zlookup->sock, errno, safe_strerror(errno));
+ }
+ zlookup->sock = -1;
+ }
+
+ if (zlookup->sock < 0) {
+ /* Since last connect failed, retry within 10 secs */
+ zclient_lookup_sched(zlookup, 10);
+ return;
+ }
+
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
+}
+
+/* Schedule connection with delay. */
+static void zclient_lookup_sched(struct zclient *zlookup, int delay)
+{
+ event_add_timer(router->master, zclient_lookup_connect, zlookup, delay,
+ &zlookup->t_connect);
+
+ zlog_notice("%s: zclient lookup connection scheduled for %d seconds",
+ __func__, delay);
+}
+
+/* Schedule connection for now. */
+static void zclient_lookup_sched_now(struct zclient *zlookup)
+{
+ event_add_event(router->master, zclient_lookup_connect, zlookup, 0,
+ &zlookup->t_connect);
+
+ zlog_notice("%s: zclient lookup immediate connection scheduled",
+ __func__);
+}
+
+/* Schedule reconnection, if needed. */
+static void zclient_lookup_reconnect(struct zclient *zlookup)
+{
+ if (zlookup->t_connect) {
+ return;
+ }
+
+ zclient_lookup_sched_now(zlookup);
+}
+
+static void zclient_lookup_failed(struct zclient *zlookup)
+{
+ if (zlookup->sock >= 0) {
+ if (close(zlookup->sock)) {
+ zlog_warn("%s: closing fd=%d: errno=%d %s", __func__,
+ zlookup->sock, errno, safe_strerror(errno));
+ }
+ zlookup->sock = -1;
+ }
+
+ zclient_lookup_reconnect(zlookup);
+}
+
+void zclient_lookup_free(void)
+{
+ EVENT_OFF(zlookup_read);
+ zclient_stop(zlookup);
+ zclient_free(zlookup);
+ zlookup = NULL;
+}
+
+void zclient_lookup_new(void)
+{
+ struct zclient_options options = zclient_options_default;
+ options.synchronous = true;
+
+ zlookup = zclient_new(router->master, &options, NULL, 0);
+ if (!zlookup) {
+ flog_err(EC_LIB_ZAPI_SOCKET, "%s: zclient_new() failure",
+ __func__);
+ return;
+ }
+
+ zlookup->sock = -1;
+ zlookup->t_connect = NULL;
+ zlookup->privs = &pimd_privs;
+
+ zclient_lookup_sched_now(zlookup);
+
+ zlog_notice("%s: zclient lookup socket initialized", __func__);
+}
+
+static int zclient_read_nexthop(struct pim_instance *pim,
+ struct zclient *zlookup,
+ struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr)
+{
+ int num_ifindex = 0;
+ struct stream *s;
+ uint16_t length;
+ uint8_t marker;
+ uint8_t version;
+ vrf_id_t vrf_id;
+ uint16_t command = 0;
+ struct ipaddr raddr;
+ uint8_t distance;
+ uint32_t metric;
+ int nexthop_num;
+ int i, err;
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,
+ pim->vrf->name);
+
+ s = zlookup->ibuf;
+
+ while (command != ZEBRA_NEXTHOP_LOOKUP_MRIB) {
+ stream_reset(s);
+ err = zclient_read_header(s, zlookup->sock, &length, &marker,
+ &version, &vrf_id, &command);
+ if (err < 0) {
+ flog_err(EC_LIB_ZAPI_MISSMATCH,
+ "%s: zclient_read_header() failed", __func__);
+ zclient_lookup_failed(zlookup);
+ return -1;
+ }
+
+ if (command == ZEBRA_ERROR) {
+ enum zebra_error_types error;
+
+ zapi_error_decode(s, &error);
+ /* Do nothing with it for now */
+ return -1;
+ }
+ }
+
+ stream_get_ipaddr(s, &raddr);
+
+ if (raddr.ipa_type != PIM_IPADDR ||
+ pim_addr_cmp(raddr.ipaddr_pim, addr)) {
+ zlog_warn("%s: address mismatch: addr=%pPA(%s) raddr=%pIA",
+ __func__, &addr, pim->vrf->name, &raddr);
+ /* warning only */
+ }
+
+ distance = stream_getc(s);
+ metric = stream_getl(s);
+ nexthop_num = stream_getc(s);
+
+ if (nexthop_num < 1 || nexthop_num > router->multipath) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: socket %d bad nexthop_num=%d", __func__,
+ zlookup->sock, nexthop_num);
+ return -6;
+ }
+
+ for (i = 0; i < nexthop_num; ++i) {
+ vrf_id_t nexthop_vrf_id;
+ enum nexthop_types_t nexthop_type;
+ struct in_addr nh_ip4;
+ struct in6_addr nh_ip6;
+ ifindex_t nh_ifi;
+
+ nexthop_vrf_id = stream_getl(s);
+ nexthop_type = stream_getc(s);
+ if (num_ifindex >= tab_size) {
+ zlog_warn(
+ "%s: found too many nexthop ifindexes (%d > %d) for address %pPAs(%s)",
+ __func__, (num_ifindex + 1), tab_size, &addr,
+ pim->vrf->name);
+ return num_ifindex;
+ }
+ nexthop_tab[num_ifindex].protocol_distance = distance;
+ nexthop_tab[num_ifindex].route_metric = metric;
+ nexthop_tab[num_ifindex].vrf_id = nexthop_vrf_id;
+ switch (nexthop_type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ nexthop_tab[num_ifindex].ifindex = stream_getl(s);
+ /*
+ * Connected route (i.e. no nexthop), use
+ * address passed in as PIM nexthop. This will
+ * allow us to work in cases where we are
+ * trying to find a route for this box.
+ */
+ nexthop_tab[num_ifindex].nexthop_addr = addr;
+ ++num_ifindex;
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV4:
+ nh_ip4.s_addr = stream_get_ipv4(s);
+ nh_ifi = stream_getl(s);
+#if PIM_IPV == 4
+ nexthop_tab[num_ifindex].nexthop_addr = nh_ip4;
+ nexthop_tab[num_ifindex].ifindex = nh_ifi;
+ ++num_ifindex;
+#else
+ zlog_warn(
+ "cannot use IPv4 nexthop %pI4(%d) for IPv6 %pPA",
+ &nh_ip4, nh_ifi, &addr);
+#endif
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ stream_get(&nh_ip6, s, sizeof(nh_ip6));
+ nh_ifi = stream_getl(s);
+
+#if PIM_IPV == 6
+ nexthop_tab[num_ifindex].nexthop_addr = nh_ip6;
+ nexthop_tab[num_ifindex].ifindex = nh_ifi;
+ ++num_ifindex;
+#else
+ /* RFC 5549 v4-over-v6 nexthop handling */
+
+ /*
+ * If we are sending v6 secondary assume we receive v6
+ * secondary
+ */
+ struct interface *ifp = if_lookup_by_index(
+ nh_ifi,
+ nexthop_vrf_id);
+
+ if (!ifp)
+ break;
+
+ struct pim_neighbor *nbr;
+
+ if (pim->send_v6_secondary) {
+ struct prefix p;
+
+ p.family = AF_INET6;
+ p.prefixlen = IPV6_MAX_BITLEN;
+ p.u.prefix6 = nh_ip6;
+
+ nbr = pim_neighbor_find_by_secondary(ifp, &p);
+ } else
+ nbr = pim_neighbor_find_if(ifp);
+
+ if (!nbr)
+ break;
+
+ nexthop_tab[num_ifindex].nexthop_addr =
+ nbr->source_addr;
+ nexthop_tab[num_ifindex].ifindex = nh_ifi;
+ ++num_ifindex;
+#endif
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* do nothing */
+ zlog_warn(
+ "%s: found non-ifindex nexthop type=%d for address %pPAs(%s)",
+ __func__, nexthop_type, &addr, pim->vrf->name);
+ break;
+ }
+ }
+
+ return num_ifindex;
+}
+
+static int zclient_lookup_nexthop_once(struct pim_instance *pim,
+ struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr)
+{
+ struct stream *s;
+ int ret;
+ struct ipaddr ipaddr;
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,
+ pim->vrf->name);
+
+ /* Check socket. */
+ if (zlookup->sock < 0) {
+ flog_err(EC_LIB_ZAPI_SOCKET,
+ "%s: zclient lookup socket is not connected",
+ __func__);
+ zclient_lookup_failed(zlookup);
+ return -1;
+ }
+
+ if (pim->vrf->vrf_id == VRF_UNKNOWN) {
+ zlog_notice(
+ "%s: VRF: %s does not fully exist yet, delaying lookup",
+ __func__, pim->vrf->name);
+ return -1;
+ }
+
+ ipaddr.ipa_type = PIM_IPADDR;
+ ipaddr.ipaddr_pim = addr;
+
+ s = zlookup->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, pim->vrf->vrf_id);
+ stream_put_ipaddr(s, &ipaddr);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = writen(zlookup->sock, s->data, stream_get_endp(s));
+ if (ret < 0) {
+ flog_err(
+ EC_LIB_SOCKET,
+ "%s: writen() failure: %d writing to zclient lookup socket",
+ __func__, errno);
+ zclient_lookup_failed(zlookup);
+ return -2;
+ }
+ if (ret == 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "%s: connection closed on zclient lookup socket",
+ __func__);
+ zclient_lookup_failed(zlookup);
+ return -3;
+ }
+
+ return zclient_read_nexthop(pim, zlookup, nexthop_tab, tab_size, addr);
+}
+
+void zclient_lookup_read_pipe(struct event *thread)
+{
+ struct zclient *zlookup = EVENT_ARG(thread);
+ struct pim_instance *pim = pim_get_pim_instance(VRF_DEFAULT);
+ struct pim_zlookup_nexthop nexthop_tab[10];
+ pim_addr l = PIMADDR_ANY;
+
+ if (!pim) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: Unable to find pim instance", __func__);
+ return;
+ }
+
+ zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l);
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
+}
+
+int zclient_lookup_nexthop(struct pim_instance *pim,
+ struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr,
+ int max_lookup)
+{
+ int lookup;
+ uint32_t route_metric = 0xFFFFFFFF;
+ uint8_t protocol_distance = 0xFF;
+
+ pim->nexthop_lookups++;
+
+ for (lookup = 0; lookup < max_lookup; ++lookup) {
+ int num_ifindex;
+ int first_ifindex;
+ pim_addr nexthop_addr;
+
+ num_ifindex = zclient_lookup_nexthop_once(pim, nexthop_tab,
+ tab_size, addr);
+ if (num_ifindex < 1) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug(
+ "%s: lookup=%d/%d: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, lookup, max_lookup, &addr,
+ pim->vrf->name);
+ return -1;
+ }
+
+ if (lookup < 1) {
+ /* this is the non-recursive lookup - save original
+ * metric/distance */
+ route_metric = nexthop_tab[0].route_metric;
+ protocol_distance = nexthop_tab[0].protocol_distance;
+ }
+
+ /*
+ * FIXME: Non-recursive nexthop ensured only for first ifindex.
+ * However, recursive route lookup should really be fixed in
+ * zebra daemon.
+ * See also TODO T24.
+ *
+ * So Zebra for NEXTHOP_TYPE_IPV4 returns the ifindex now since
+ * it was being stored. This Doesn't solve all cases of
+ * recursive lookup but for the most common types it does.
+ */
+ first_ifindex = nexthop_tab[0].ifindex;
+ nexthop_addr = nexthop_tab[0].nexthop_addr;
+ if (first_ifindex > 0) {
+ /* found: first ifindex is non-recursive nexthop */
+
+ if (lookup > 0) {
+ /* Report non-recursive success after first
+ * lookup */
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: lookup=%d/%d: found non-recursive ifindex=%d for address %pPA(%s) dist=%d met=%d",
+ __func__, lookup, max_lookup,
+ first_ifindex, &addr,
+ pim->vrf->name,
+ nexthop_tab[0]
+ .protocol_distance,
+ nexthop_tab[0].route_metric);
+
+ /* use last address as nexthop address */
+ nexthop_tab[0].nexthop_addr = addr;
+
+ /* report original route metric/distance */
+ nexthop_tab[0].route_metric = route_metric;
+ nexthop_tab[0].protocol_distance =
+ protocol_distance;
+ }
+
+ return num_ifindex;
+ }
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: lookup=%d/%d: zebra returned recursive nexthop %pPAs for address %pPA(%s) dist=%d met=%d",
+ __func__, lookup, max_lookup, &nexthop_addr,
+ &addr, pim->vrf->name,
+ nexthop_tab[0].protocol_distance,
+ nexthop_tab[0].route_metric);
+
+ addr = nexthop_addr; /* use nexthop
+ addr for recursive lookup */
+
+ } /* for (max_lookup) */
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_warn(
+ "%s: lookup=%d/%d: failure searching recursive nexthop ifindex for address %pPA(%s)",
+ __func__, lookup, max_lookup, &addr, pim->vrf->name);
+
+ return -2;
+}
+
+void pim_zlookup_show_ip_multicast(struct vty *vty)
+{
+ vty_out(vty, "Zclient lookup socket: ");
+ if (zlookup) {
+ vty_out(vty, "%d failures=%d\n", zlookup->sock, zlookup->fail);
+ } else {
+ vty_out(vty, "<null zclient>\n");
+ }
+}
+
+int pim_zlookup_sg_statistics(struct channel_oil *c_oil)
+{
+ struct stream *s = zlookup->obuf;
+ uint16_t command = 0;
+ unsigned long long lastused;
+ pim_sgaddr sg;
+ int count = 0;
+ int ret;
+ pim_sgaddr more = {};
+ struct interface *ifp =
+ pim_if_find_by_vif_index(c_oil->pim, *oil_incoming_vif(c_oil));
+
+ if (PIM_DEBUG_ZEBRA) {
+ more.src = *oil_origin(c_oil);
+ more.grp = *oil_mcastgrp(c_oil);
+ zlog_debug("Sending Request for New Channel Oil Information%pSG VIIF %d(%s:%s)",
+ &more, *oil_incoming_vif(c_oil),
+ ifp ? ifp->name : "Unknown", c_oil->pim->vrf->name);
+ }
+
+ if (!ifp)
+ return -1;
+
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_IPMR_ROUTE_STATS,
+ c_oil->pim->vrf->vrf_id);
+ stream_putl(s, PIM_AF);
+ stream_write(s, oil_origin(c_oil), sizeof(pim_addr));
+ stream_write(s, oil_mcastgrp(c_oil), sizeof(pim_addr));
+ stream_putl(s, ifp->ifindex);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ count = stream_get_endp(s);
+ ret = writen(zlookup->sock, s->data, count);
+ if (ret <= 0) {
+ flog_err(
+ EC_LIB_SOCKET,
+ "%s: writen() failure: %d writing to zclient lookup socket",
+ __func__, errno);
+ return -1;
+ }
+
+ s = zlookup->ibuf;
+
+ while (command != ZEBRA_IPMR_ROUTE_STATS) {
+ int err;
+ uint16_t length = 0;
+ vrf_id_t vrf_id;
+ uint8_t marker;
+ uint8_t version;
+
+ stream_reset(s);
+ err = zclient_read_header(s, zlookup->sock, &length, &marker,
+ &version, &vrf_id, &command);
+ if (err < 0) {
+ flog_err(EC_LIB_ZAPI_MISSMATCH,
+ "%s: zclient_read_header() failed", __func__);
+ zclient_lookup_failed(zlookup);
+ return -1;
+ }
+ }
+
+ stream_get(&sg.src, s, sizeof(pim_addr));
+ stream_get(&sg.grp, s, sizeof(pim_addr));
+
+ more.src = *oil_origin(c_oil);
+ more.grp = *oil_mcastgrp(c_oil);
+ if (pim_sgaddr_cmp(sg, more)) {
+ if (PIM_DEBUG_ZEBRA)
+ flog_err(
+ EC_LIB_ZAPI_MISSMATCH,
+ "%s: Received wrong %pSG(%s) information requested",
+ __func__, &more, c_oil->pim->vrf->name);
+ zclient_lookup_failed(zlookup);
+ return -3;
+ }
+
+ stream_get(&lastused, s, sizeof(lastused));
+ /* signed success value from netlink_talk; currently unused */
+ (void)stream_getl(s);
+
+ c_oil->cc.lastused = lastused;
+
+ return 0;
+}
diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h
new file mode 100644
index 0000000..ee2dd20
--- /dev/null
+++ b/pimd/pim_zlookup.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIM_ZLOOKUP_H
+#define PIM_ZLOOKUP_H
+
+#include <zebra.h>
+
+#include "zclient.h"
+
+#define PIM_NEXTHOP_LOOKUP_MAX (3) /* max. recursive route lookup */
+
+struct channel_oil;
+
+struct pim_zlookup_nexthop {
+ vrf_id_t vrf_id;
+ pim_addr nexthop_addr;
+ ifindex_t ifindex;
+ uint32_t route_metric;
+ uint8_t protocol_distance;
+};
+
+void zclient_lookup_new(void);
+void zclient_lookup_free(void);
+
+int zclient_lookup_nexthop(struct pim_instance *pim,
+ struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr,
+ int max_lookup);
+
+void pim_zlookup_show_ip_multicast(struct vty *vty);
+
+int pim_zlookup_sg_statistics(struct channel_oil *c_oil);
+#endif /* PIM_ZLOOKUP_H */
diff --git a/pimd/pim_zpthread.c b/pimd/pim_zpthread.c
new file mode 100644
index 0000000..d6b2621
--- /dev/null
+++ b/pimd/pim_zpthread.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+#include <lib/log.h>
+#include <lib/lib_errors.h>
+
+#include "pimd.h"
+#include "pim_instance.h"
+#include "pim_mlag.h"
+#include "pim_zebra.h"
+
+extern struct zclient *zclient;
+
+#define PIM_MLAG_POST_LIMIT 100
+
+int32_t mlag_bulk_cnt;
+
+static void pim_mlag_zebra_fill_header(enum mlag_msg_type msg_type)
+{
+ uint32_t fill_msg_type = msg_type;
+ uint16_t data_len = 0;
+ uint16_t msg_cnt = 1;
+
+ switch (msg_type) {
+ case MLAG_REGISTER:
+ case MLAG_DEREGISTER:
+ data_len = sizeof(struct mlag_msg);
+ break;
+ case MLAG_MROUTE_ADD:
+ data_len = sizeof(struct mlag_mroute_add);
+ fill_msg_type = MLAG_MROUTE_ADD_BULK;
+ break;
+ case MLAG_MROUTE_DEL:
+ data_len = sizeof(struct mlag_mroute_del);
+ fill_msg_type = MLAG_MROUTE_DEL_BULK;
+ break;
+ case MLAG_MSG_NONE:
+ return;
+ case MLAG_STATUS_UPDATE:
+ case MLAG_DUMP:
+ case MLAG_MROUTE_ADD_BULK:
+ case MLAG_MROUTE_DEL_BULK:
+ case MLAG_PIM_CFG_DUMP:
+ case MLAG_VXLAN_UPDATE:
+ case MLAG_PEER_FRR_STATUS:
+ data_len = 0;
+ break;
+ }
+
+ stream_reset(router->mlag_stream);
+ /* ADD Hedaer */
+ stream_putl(router->mlag_stream, fill_msg_type);
+ /*
+ * In case of Bulk actual size & msg_cnt will be updated
+ * just before writing onto zebra
+ */
+ stream_putw(router->mlag_stream, data_len);
+ stream_putw(router->mlag_stream, msg_cnt);
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(":%s: msg_type: %d/%d len %d",
+ __func__, msg_type, fill_msg_type, data_len);
+}
+
+static void pim_mlag_zebra_flush_buffer(void)
+{
+ uint32_t msg_type;
+
+ /* Stream had bulk messages update the Hedaer */
+ if (mlag_bulk_cnt > 1) {
+ /*
+ * No need to reset the pointer, below api reads from data[0]
+ */
+ STREAM_GETL(router->mlag_stream, msg_type);
+ if (msg_type == MLAG_MROUTE_ADD_BULK) {
+ stream_putw_at(
+ router->mlag_stream, 4,
+ (mlag_bulk_cnt * sizeof(struct mlag_mroute_add)));
+ stream_putw_at(router->mlag_stream, 6, mlag_bulk_cnt);
+ } else if (msg_type == MLAG_MROUTE_DEL_BULK) {
+ stream_putw_at(
+ router->mlag_stream, 4,
+ (mlag_bulk_cnt * sizeof(struct mlag_mroute_del)));
+ stream_putw_at(router->mlag_stream, 6, mlag_bulk_cnt);
+ } else {
+ flog_err(EC_LIB_ZAPI_ENCODE,
+ "unknown bulk message type %d bulk_count %d",
+ msg_type, mlag_bulk_cnt);
+ stream_reset(router->mlag_stream);
+ mlag_bulk_cnt = 0;
+ return;
+ }
+ }
+
+ zclient_send_mlag_data(zclient, router->mlag_stream);
+stream_failure:
+ stream_reset(router->mlag_stream);
+ mlag_bulk_cnt = 0;
+}
+
+/*
+ * Only ROUTE add & Delete will be bulked.
+ * Buffer will be flushed, when
+ * 1) there were no messages in the queue
+ * 2) Curr_msg_type != prev_msg_type
+ */
+
+static void pim_mlag_zebra_check_for_buffer_flush(uint32_t curr_msg_type,
+ uint32_t prev_msg_type)
+{
+ /* First Message, keep bulking */
+ if (prev_msg_type == MLAG_MSG_NONE) {
+ mlag_bulk_cnt = 1;
+ return;
+ }
+
+ /*msg type is route add & delete, keep bulking */
+ if (curr_msg_type == prev_msg_type
+ && (curr_msg_type == MLAG_MROUTE_ADD
+ || curr_msg_type == MLAG_MROUTE_DEL)) {
+ mlag_bulk_cnt++;
+ return;
+ }
+
+ pim_mlag_zebra_flush_buffer();
+}
+
+/*
+ * Thsi thread reads the clients data from the Gloabl queue and encodes with
+ * protobuf and pass on to the MLAG socket.
+ */
+static void pim_mlag_zthread_handler(struct event *event)
+{
+ struct stream *read_s;
+ uint32_t wr_count = 0;
+ uint32_t prev_msg_type = MLAG_MSG_NONE;
+ uint32_t curr_msg_type = MLAG_MSG_NONE;
+
+ router->zpthread_mlag_write = NULL;
+ wr_count = stream_fifo_count_safe(router->mlag_fifo);
+
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(":%s: Processing MLAG write, %d messages in queue",
+ __func__, wr_count);
+
+ if (wr_count == 0)
+ return;
+
+ for (wr_count = 0; wr_count < PIM_MLAG_POST_LIMIT; wr_count++) {
+ /* FIFO is empty,wait for teh message to be add */
+ if (stream_fifo_count_safe(router->mlag_fifo) == 0)
+ break;
+
+ read_s = stream_fifo_pop_safe(router->mlag_fifo);
+ if (!read_s) {
+ zlog_debug(":%s: Got a NULL Messages, some thing wrong",
+ __func__);
+ break;
+ }
+ STREAM_GETL(read_s, curr_msg_type);
+ /*
+ * Check for Buffer Overflow,
+ * MLAG Can't process more than 'PIM_MLAG_BUF_LIMIT' bytes
+ */
+ if (router->mlag_stream->endp + read_s->endp + ZEBRA_HEADER_SIZE
+ > MLAG_BUF_LIMIT)
+ pim_mlag_zebra_flush_buffer();
+
+ pim_mlag_zebra_check_for_buffer_flush(curr_msg_type,
+ prev_msg_type);
+
+ /*
+ * First message to Buffer, fill the Header
+ */
+ if (router->mlag_stream->endp == 0)
+ pim_mlag_zebra_fill_header(curr_msg_type);
+
+ /*
+ * add the data now
+ */
+ stream_put(router->mlag_stream, read_s->data + read_s->getp,
+ read_s->endp - read_s->getp);
+
+ stream_free(read_s);
+ prev_msg_type = curr_msg_type;
+ }
+
+stream_failure:
+ /*
+ * we are here , because
+ * 1. Queue might be empty
+ * 2. we crossed the max Q Read limit
+ * In any acse flush the buffer towards zebra
+ */
+ pim_mlag_zebra_flush_buffer();
+
+ if (wr_count >= PIM_MLAG_POST_LIMIT)
+ pim_mlag_signal_zpthread();
+}
+
+
+int pim_mlag_signal_zpthread(void)
+{
+ if (router->master) {
+ if (PIM_DEBUG_MLAG)
+ zlog_debug(":%s: Scheduling PIM MLAG write Thread",
+ __func__);
+ event_add_event(router->master, pim_mlag_zthread_handler, NULL,
+ 0, &router->zpthread_mlag_write);
+ }
+ return (0);
+}
diff --git a/pimd/pimd.c b/pimd/pimd.c
new file mode 100644
index 0000000..db61974
--- /dev/null
+++ b/pimd/pimd.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "memory.h"
+#include "if.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+#include "hash.h"
+#include "jhash.h"
+#include "vrf.h"
+#include "lib_errors.h"
+#include "bfd.h"
+
+#include "pimd.h"
+#if PIM_IPV == 4
+#include "pim_cmd.h"
+#else
+#include "pim6_cmd.h"
+#endif
+#include "pim_str.h"
+#include "pim_oil.h"
+#include "pim_pim.h"
+#include "pim_ssmpingd.h"
+#include "pim_static.h"
+#include "pim_rp.h"
+#include "pim_ssm.h"
+#include "pim_vxlan.h"
+#include "pim_zlookup.h"
+#include "pim_zebra.h"
+#include "pim_mlag.h"
+
+#if MAXVIFS > 256
+CPP_NOTICE("Work needs to be done to make this work properly via the pim mroute socket\n");
+#endif /* MAXVIFS > 256 */
+
+#if PIM_IPV == 4
+const char *const PIM_ALL_SYSTEMS = MCAST_ALL_SYSTEMS;
+const char *const PIM_ALL_ROUTERS = MCAST_ALL_ROUTERS;
+const char *const PIM_ALL_PIM_ROUTERS = MCAST_ALL_PIM_ROUTERS;
+const char *const PIM_ALL_IGMP_ROUTERS = MCAST_ALL_IGMP_ROUTERS;
+#else
+const char *const PIM_ALL_SYSTEMS = "ff02::1";
+const char *const PIM_ALL_ROUTERS = "ff02::2";
+const char *const PIM_ALL_PIM_ROUTERS = "ff02::d";
+const char *const PIM_ALL_IGMP_ROUTERS = "ff02::16";
+#endif
+
+DEFINE_MTYPE_STATIC(PIMD, ROUTER, "PIM Router information");
+
+struct pim_router *router = NULL;
+pim_addr qpim_all_pim_routers_addr;
+
+void pim_prefix_list_update(struct prefix_list *plist)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ pim = vrf->info;
+ if (!pim)
+ continue;
+
+ pim_rp_prefix_list_update(pim, plist);
+ pim_ssm_prefix_list_update(pim, plist);
+ pim_upstream_spt_prefix_list_update(pim, plist);
+ }
+}
+
+static void pim_free(void)
+{
+ pim_route_map_terminate();
+
+ zclient_lookup_free();
+}
+
+void pim_router_init(void)
+{
+ router = XCALLOC(MTYPE_ROUTER, sizeof(*router));
+
+ router->debugs = 0;
+ router->master = frr_init();
+ router->t_periodic = PIM_DEFAULT_T_PERIODIC;
+ router->multipath = MULTIPATH_NUM;
+
+ /*
+ RFC 4601: 4.6.3. Assert Metrics
+
+ assert_metric
+ infinite_assert_metric() {
+ return {1,infinity,infinity,0}
+ }
+ */
+ router->infinite_assert_metric.rpt_bit_flag = 1;
+ router->infinite_assert_metric.metric_preference =
+ PIM_ASSERT_METRIC_PREFERENCE_MAX;
+ router->infinite_assert_metric.route_metric =
+ PIM_ASSERT_ROUTE_METRIC_MAX;
+ router->infinite_assert_metric.ip_address = PIMADDR_ANY;
+ router->rpf_cache_refresh_delay_msec = 50;
+ router->register_suppress_time = PIM_REGISTER_SUPPRESSION_TIME_DEFAULT;
+ router->packet_process = PIM_DEFAULT_PACKET_PROCESS;
+ router->register_probe_time = PIM_REGISTER_PROBE_TIME_DEFAULT;
+ router->vrf_id = VRF_DEFAULT;
+ router->pim_mlag_intf_cnt = 0;
+ router->connected_to_mlag = false;
+}
+
+void pim_router_terminate(void)
+{
+ XFREE(MTYPE_ROUTER, router);
+}
+
+void pim_init(void)
+{
+ if (!inet_pton(PIM_AF, PIM_ALL_PIM_ROUTERS,
+ &qpim_all_pim_routers_addr)) {
+ flog_err(
+ EC_LIB_SOCKET,
+ "%s %s: could not solve %s to group address: errno=%d: %s",
+ __FILE__, __func__, PIM_ALL_PIM_ROUTERS, errno,
+ safe_strerror(errno));
+ assert(0);
+ return;
+ }
+
+ pim_cmd_init();
+}
+
+void pim_terminate(void)
+{
+ struct zclient *zclient;
+
+ bfd_protocol_integration_set_shutdown(true);
+
+ /* reverse prefix_list_init */
+ prefix_list_add_hook(NULL);
+ prefix_list_delete_hook(NULL);
+ prefix_list_reset();
+
+ pim_vxlan_terminate();
+ pim_vrf_terminate();
+
+ zclient = pim_zebra_zclient_get();
+ if (zclient) {
+ zclient_stop(zclient);
+ zclient_free(zclient);
+ }
+
+ pim_free();
+ pim_mlag_terminate();
+ pim_router_terminate();
+
+ frr_fini();
+}
diff --git a/pimd/pimd.h b/pimd/pimd.h
new file mode 100644
index 0000000..9ec84fc
--- /dev/null
+++ b/pimd/pimd.h
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#ifndef PIMD_H
+#define PIMD_H
+
+#include <stdint.h>
+#include "zebra.h"
+#include "libfrr.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+
+#include "pim_addr.h"
+#include "pim_str.h"
+#include "pim_memory.h"
+#include "pim_assert.h"
+
+#define PIMD_VTY_PORT 2611
+#define PIM6D_VTY_PORT 2622
+
+#define PIM_IP_PROTO_IGMP (2)
+#define PIM_IP_PROTO_PIM (103)
+#define PIM_IGMP_MIN_LEN (8)
+
+#define PIM_ENFORCE_LOOPFREE_MFC
+
+/*
+ * PIM MSG Header Format
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type | Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define PIM_MSG_HEADER_LEN (4)
+#define PIM_PIM_MIN_LEN PIM_MSG_HEADER_LEN
+
+#define PIM_ENCODED_IPV4_UCAST_SIZE (6)
+#define PIM_ENCODED_IPV4_GROUP_SIZE (8)
+#define PIM_ENCODED_IPV4_SOURCE_SIZE (8)
+
+/*
+ * J/P Message Format, Group Header
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Upstream Neighbor Address (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Num groups | Holdtime |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Multicast Group Address 1 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Number of Joined Sources | Number of Pruned Sources |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define PIM_JP_GROUP_HEADER_SIZE \
+ (PIM_ENCODED_IPV4_UCAST_SIZE + 1 + 1 + 2 + PIM_ENCODED_IPV4_GROUP_SIZE \
+ + 2 + 2)
+
+#define PIM_PROTO_VERSION (2)
+
+#define MCAST_ALL_SYSTEMS "224.0.0.1"
+#define MCAST_ALL_ROUTERS "224.0.0.2"
+#define MCAST_ALL_PIM_ROUTERS "224.0.0.13"
+#define MCAST_ALL_IGMP_ROUTERS "224.0.0.22"
+
+#define PIM_FORCE_BOOLEAN(expr) ((expr) != 0)
+
+#define PIM_NET_INADDR_ANY (htonl(INADDR_ANY))
+
+#define PIM_MASK_PIM_EVENTS (1 << 0)
+#define PIM_MASK_PIM_EVENTS_DETAIL (1 << 1)
+#define PIM_MASK_PIM_PACKETS (1 << 2)
+#define PIM_MASK_PIM_PACKETDUMP_SEND (1 << 3)
+#define PIM_MASK_PIM_PACKETDUMP_RECV (1 << 4)
+#define PIM_MASK_PIM_TRACE (1 << 5)
+#define PIM_MASK_PIM_TRACE_DETAIL (1 << 6)
+#define PIM_MASK_GM_EVENTS (1 << 7)
+#define PIM_MASK_GM_PACKETS (1 << 8)
+#define PIM_MASK_GM_TRACE (1 << 9)
+#define PIM_MASK_GM_TRACE_DETAIL (1 << 10)
+#define PIM_MASK_ZEBRA (1 << 11)
+#define PIM_MASK_SSMPINGD (1 << 12)
+#define PIM_MASK_MROUTE (1 << 13)
+#define PIM_MASK_MROUTE_DETAIL (1 << 14)
+#define PIM_MASK_PIM_HELLO (1 << 15)
+#define PIM_MASK_PIM_J_P (1 << 16)
+#define PIM_MASK_STATIC (1 << 17)
+#define PIM_MASK_PIM_REG (1 << 18)
+#define PIM_MASK_MSDP_EVENTS (1 << 19)
+#define PIM_MASK_MSDP_PACKETS (1 << 20)
+#define PIM_MASK_MSDP_INTERNAL (1 << 21)
+#define PIM_MASK_PIM_NHT (1 << 22)
+#define PIM_MASK_PIM_NHT_DETAIL (1 << 23)
+#define PIM_MASK_PIM_NHT_RP (1 << 24)
+#define PIM_MASK_MTRACE (1 << 25)
+#define PIM_MASK_VXLAN (1 << 26)
+#define PIM_MASK_BSM_PROC (1 << 27)
+#define PIM_MASK_MLAG (1 << 28)
+/* Remember 32 bits!!! */
+
+/* PIM error codes */
+#define PIM_SUCCESS 0
+#define PIM_GROUP_BAD_ADDRESS -2
+#define PIM_GROUP_OVERLAP -3
+#define PIM_GROUP_PFXLIST_OVERLAP -4
+#define PIM_RP_BAD_ADDRESS -5
+#define PIM_RP_NO_PATH -6
+#define PIM_RP_NOT_FOUND -7
+#define PIM_RP_PFXLIST_IN_USE -8
+#define PIM_IFACE_NOT_FOUND -9
+#define PIM_UPDATE_SOURCE_DUP -10
+#define PIM_GROUP_BAD_ADDR_MASK_COMBO -11
+
+extern const char *const PIM_ALL_SYSTEMS;
+extern const char *const PIM_ALL_ROUTERS;
+extern const char *const PIM_ALL_PIM_ROUTERS;
+extern const char *const PIM_ALL_IGMP_ROUTERS;
+
+extern struct zebra_privs_t pimd_privs;
+extern pim_addr qpim_all_pim_routers_addr;
+extern uint8_t qpim_ecmp_enable;
+extern uint8_t qpim_ecmp_rebalance_enable;
+
+#define PIM_DEFAULT_PACKET_PROCESS 3
+
+#define PIM_JP_HOLDTIME (router->t_periodic * 7 / 2)
+
+/*
+ * Register-Stop Timer (RST(S,G))
+ * Default values
+ */
+#define PIM_REGISTER_SUPPRESSION_TIME_DEFAULT (60)
+#define PIM_REGISTER_PROBE_TIME_DEFAULT (5)
+
+#define PIM_DEBUG_PIM_EVENTS (router->debugs & PIM_MASK_PIM_EVENTS)
+#define PIM_DEBUG_PIM_EVENTS_DETAIL \
+ (router->debugs & (PIM_MASK_PIM_EVENTS_DETAIL | PIM_MASK_PIM_EVENTS))
+#define PIM_DEBUG_PIM_PACKETS (router->debugs & PIM_MASK_PIM_PACKETS)
+#define PIM_DEBUG_PIM_PACKETDUMP_SEND \
+ (router->debugs & PIM_MASK_PIM_PACKETDUMP_SEND)
+#define PIM_DEBUG_PIM_PACKETDUMP_RECV \
+ (router->debugs & PIM_MASK_PIM_PACKETDUMP_RECV)
+#define PIM_DEBUG_PIM_TRACE \
+ (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_PIM_TRACE_DETAIL))
+#define PIM_DEBUG_PIM_TRACE_DETAIL \
+ (router->debugs & PIM_MASK_PIM_TRACE_DETAIL)
+#define PIM_DEBUG_GM_EVENTS (router->debugs & PIM_MASK_GM_EVENTS)
+#define PIM_DEBUG_GM_PACKETS (router->debugs & PIM_MASK_GM_PACKETS)
+#define PIM_DEBUG_GM_TRACE \
+ (router->debugs & (PIM_MASK_GM_TRACE | PIM_MASK_GM_TRACE_DETAIL))
+#define PIM_DEBUG_GM_TRACE_DETAIL (router->debugs & PIM_MASK_GM_TRACE_DETAIL)
+#define PIM_DEBUG_ZEBRA (router->debugs & PIM_MASK_ZEBRA)
+#define PIM_DEBUG_MLAG (router->debugs & PIM_MASK_MLAG)
+#define PIM_DEBUG_SSMPINGD (router->debugs & PIM_MASK_SSMPINGD)
+#define PIM_DEBUG_MROUTE \
+ (router->debugs & (PIM_MASK_MROUTE | PIM_MASK_MROUTE_DETAIL))
+#define PIM_DEBUG_MROUTE_DETAIL (router->debugs & PIM_MASK_MROUTE_DETAIL)
+#define PIM_DEBUG_PIM_HELLO (router->debugs & PIM_MASK_PIM_HELLO)
+#define PIM_DEBUG_PIM_J_P (router->debugs & PIM_MASK_PIM_J_P)
+#define PIM_DEBUG_PIM_REG (router->debugs & PIM_MASK_PIM_REG)
+#define PIM_DEBUG_STATIC (router->debugs & PIM_MASK_STATIC)
+#define PIM_DEBUG_MSDP_EVENTS (router->debugs & PIM_MASK_MSDP_EVENTS)
+#define PIM_DEBUG_MSDP_PACKETS (router->debugs & PIM_MASK_MSDP_PACKETS)
+#define PIM_DEBUG_MSDP_INTERNAL (router->debugs & PIM_MASK_MSDP_INTERNAL)
+#define PIM_DEBUG_PIM_NHT (router->debugs & PIM_MASK_PIM_NHT)
+#define PIM_DEBUG_PIM_NHT_DETAIL (router->debugs & PIM_MASK_PIM_NHT_DETAIL)
+#define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
+#define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
+#define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
+#define PIM_DEBUG_BSM (router->debugs & PIM_MASK_BSM_PROC)
+
+#define PIM_DEBUG_EVENTS \
+ (router->debugs & (PIM_MASK_PIM_EVENTS | PIM_MASK_GM_EVENTS | \
+ PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
+#define PIM_DEBUG_PACKETS \
+ (router->debugs & \
+ (PIM_MASK_PIM_PACKETS | PIM_MASK_GM_PACKETS | PIM_MASK_MSDP_PACKETS))
+#define PIM_DEBUG_TRACE \
+ (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_GM_TRACE))
+
+#define PIM_DO_DEBUG_PIM_EVENTS (router->debugs |= PIM_MASK_PIM_EVENTS)
+#define PIM_DO_DEBUG_PIM_PACKETS (router->debugs |= PIM_MASK_PIM_PACKETS)
+#define PIM_DO_DEBUG_PIM_PACKETDUMP_SEND \
+ (router->debugs |= PIM_MASK_PIM_PACKETDUMP_SEND)
+#define PIM_DO_DEBUG_PIM_PACKETDUMP_RECV \
+ (router->debugs |= PIM_MASK_PIM_PACKETDUMP_RECV)
+#define PIM_DO_DEBUG_PIM_TRACE (router->debugs |= PIM_MASK_PIM_TRACE)
+#define PIM_DO_DEBUG_PIM_TRACE_DETAIL \
+ (router->debugs |= PIM_MASK_PIM_TRACE_DETAIL)
+#define PIM_DO_DEBUG_GM_EVENTS (router->debugs |= PIM_MASK_GM_EVENTS)
+#define PIM_DO_DEBUG_GM_PACKETS (router->debugs |= PIM_MASK_GM_PACKETS)
+#define PIM_DO_DEBUG_GM_TRACE (router->debugs |= PIM_MASK_GM_TRACE)
+#define PIM_DO_DEBUG_GM_TRACE_DETAIL \
+ (router->debugs |= PIM_MASK_GM_TRACE_DETAIL)
+#define PIM_DO_DEBUG_ZEBRA (router->debugs |= PIM_MASK_ZEBRA)
+#define PIM_DO_DEBUG_MLAG (router->debugs |= PIM_MASK_MLAG)
+#define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
+#define PIM_DO_DEBUG_MROUTE (router->debugs |= PIM_MASK_MROUTE)
+#define PIM_DO_DEBUG_MROUTE_DETAIL (router->debugs |= PIM_MASK_MROUTE_DETAIL)
+#define PIM_DO_DEBUG_BSM (router->debugs |= PIM_MASK_BSM_PROC)
+#define PIM_DO_DEBUG_PIM_HELLO (router->debugs |= PIM_MASK_PIM_HELLO)
+#define PIM_DO_DEBUG_PIM_J_P (router->debugs |= PIM_MASK_PIM_J_P)
+#define PIM_DO_DEBUG_PIM_REG (router->debugs |= PIM_MASK_PIM_REG)
+#define PIM_DO_DEBUG_STATIC (router->debugs |= PIM_MASK_STATIC)
+#define PIM_DO_DEBUG_MSDP_EVENTS (router->debugs |= PIM_MASK_MSDP_EVENTS)
+#define PIM_DO_DEBUG_MSDP_PACKETS (router->debugs |= PIM_MASK_MSDP_PACKETS)
+#define PIM_DO_DEBUG_MSDP_INTERNAL (router->debugs |= PIM_MASK_MSDP_INTERNAL)
+#define PIM_DO_DEBUG_PIM_NHT (router->debugs |= PIM_MASK_PIM_NHT)
+#define PIM_DO_DEBUG_PIM_NHT_DETAIL (router->debugs |= PIM_MASK_PIM_NHT_DETAIL)
+#define PIM_DO_DEBUG_PIM_NHT_RP (router->debugs |= PIM_MASK_PIM_NHT_RP)
+#define PIM_DO_DEBUG_MTRACE (router->debugs |= PIM_MASK_MTRACE)
+#define PIM_DO_DEBUG_VXLAN (router->debugs |= PIM_MASK_VXLAN)
+
+#define PIM_DONT_DEBUG_PIM_EVENTS (router->debugs &= ~PIM_MASK_PIM_EVENTS)
+#define PIM_DONT_DEBUG_PIM_PACKETS (router->debugs &= ~PIM_MASK_PIM_PACKETS)
+#define PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND \
+ (router->debugs &= ~PIM_MASK_PIM_PACKETDUMP_SEND)
+#define PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV \
+ (router->debugs &= ~PIM_MASK_PIM_PACKETDUMP_RECV)
+#define PIM_DONT_DEBUG_PIM_TRACE (router->debugs &= ~PIM_MASK_PIM_TRACE)
+#define PIM_DONT_DEBUG_PIM_TRACE_DETAIL \
+ (router->debugs &= ~PIM_MASK_PIM_TRACE_DETAIL)
+#define PIM_DONT_DEBUG_GM_EVENTS (router->debugs &= ~PIM_MASK_GM_EVENTS)
+#define PIM_DONT_DEBUG_GM_PACKETS (router->debugs &= ~PIM_MASK_GM_PACKETS)
+#define PIM_DONT_DEBUG_GM_TRACE (router->debugs &= ~PIM_MASK_GM_TRACE)
+#define PIM_DONT_DEBUG_GM_TRACE_DETAIL \
+ (router->debugs &= ~PIM_MASK_GM_TRACE_DETAIL)
+#define PIM_DONT_DEBUG_ZEBRA (router->debugs &= ~PIM_MASK_ZEBRA)
+#define PIM_DONT_DEBUG_MLAG (router->debugs &= ~PIM_MASK_MLAG)
+#define PIM_DONT_DEBUG_SSMPINGD (router->debugs &= ~PIM_MASK_SSMPINGD)
+#define PIM_DONT_DEBUG_MROUTE (router->debugs &= ~PIM_MASK_MROUTE)
+#define PIM_DONT_DEBUG_MROUTE_DETAIL (router->debugs &= ~PIM_MASK_MROUTE_DETAIL)
+#define PIM_DONT_DEBUG_PIM_HELLO (router->debugs &= ~PIM_MASK_PIM_HELLO)
+#define PIM_DONT_DEBUG_PIM_J_P (router->debugs &= ~PIM_MASK_PIM_J_P)
+#define PIM_DONT_DEBUG_PIM_REG (router->debugs &= ~PIM_MASK_PIM_REG)
+#define PIM_DONT_DEBUG_STATIC (router->debugs &= ~PIM_MASK_STATIC)
+#define PIM_DONT_DEBUG_MSDP_EVENTS (router->debugs &= ~PIM_MASK_MSDP_EVENTS)
+#define PIM_DONT_DEBUG_MSDP_PACKETS (router->debugs &= ~PIM_MASK_MSDP_PACKETS)
+#define PIM_DONT_DEBUG_MSDP_INTERNAL (router->debugs &= ~PIM_MASK_MSDP_INTERNAL)
+#define PIM_DONT_DEBUG_PIM_NHT (router->debugs &= ~PIM_MASK_PIM_NHT)
+#define PIM_DONT_DEBUG_PIM_NHT_DETAIL \
+ (router->debugs &= ~PIM_MASK_PIM_NHT_DETAIL)
+#define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
+#define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
+#define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
+#define PIM_DONT_DEBUG_BSM (router->debugs &= ~PIM_MASK_BSM_PROC)
+
+/* RFC 3376: 8.1. Robustness Variable - Default: 2 for IGMP */
+/* RFC 2710: 7.1. Robustness Variable - Default: 2 for MLD */
+#define GM_DEFAULT_ROBUSTNESS_VARIABLE 2
+
+/* RFC 3376: 8.2. Query Interval - Default: 125 seconds for IGMP */
+/* RFC 2710: 7.2. Query Interval - Default: 125 seconds for MLD */
+#define GM_GENERAL_QUERY_INTERVAL 125
+
+/* RFC 3376: 8.3. Query Response Interval - Default: 100 deciseconds for IGMP */
+/* RFC 2710: 7.3. Query Response Interval - Default: 100 deciseconds for MLD */
+#define GM_QUERY_MAX_RESPONSE_TIME_DSEC 100
+
+/* RFC 3376: 8.8. Last Member Query Interval - Default: 10 deciseconds for IGMP
+ */
+/* RFC 2710: 7.8. Last Listener Query Interval - Default: 10 deciseconds for MLD
+ */
+#define GM_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC 10
+
+void pim_router_init(void);
+void pim_router_terminate(void);
+
+void pim_init(void);
+void pim_terminate(void);
+
+extern void pim_route_map_init(void);
+extern void pim_route_map_terminate(void);
+void pim_prefix_list_update(struct prefix_list *plist);
+
+#endif /* PIMD_H */
diff --git a/pimd/subdir.am b/pimd/subdir.am
new file mode 100644
index 0000000..1e787a3
--- /dev/null
+++ b/pimd/subdir.am
@@ -0,0 +1,180 @@
+#
+# pimd
+#
+
+if PIMD
+sbin_PROGRAMS += pimd/pimd
+bin_PROGRAMS += pimd/mtracebis
+noinst_PROGRAMS += pimd/test_igmpv3_join
+vtysh_daemons += pimd
+vtysh_daemons += pim6d
+man8 += $(MANBUILD)/frr-pimd.8
+man8 += $(MANBUILD)/mtracebis.8
+endif
+
+pim_common = \
+ pimd/pim_addr.c \
+ pimd/pim_assert.c \
+ pimd/pim_bfd.c \
+ pimd/pim_bsm.c \
+ pimd/pim_cmd_common.c \
+ pimd/pim_errors.c \
+ pimd/pim_hello.c \
+ pimd/pim_iface.c \
+ pimd/pim_ifchannel.c \
+ pimd/pim_instance.c \
+ pimd/pim_int.c \
+ pimd/pim_join.c \
+ pimd/pim_jp_agg.c \
+ pimd/pim_macro.c \
+ pimd/pim_memory.c \
+ pimd/pim_mroute.c \
+ pimd/pim_msg.c \
+ pimd/pim_nb.c \
+ pimd/pim_nb_config.c \
+ pimd/pim_neighbor.c \
+ pimd/pim_nht.c \
+ pimd/pim_oil.c \
+ pimd/pim_pim.c \
+ pimd/pim_routemap.c \
+ pimd/pim_rp.c \
+ pimd/pim_rpf.c \
+ pimd/pim_sock.c \
+ pimd/pim_ssm.c \
+ pimd/pim_ssmpingd.c \
+ pimd/pim_static.c \
+ pimd/pim_tib.c \
+ pimd/pim_time.c \
+ pimd/pim_tlv.c \
+ pimd/pim_upstream.c \
+ pimd/pim_util.c \
+ pimd/pim_vty.c \
+ pimd/pim_zebra.c \
+ pimd/pim_zlookup.c \
+ pimd/pim_vxlan.c \
+ pimd/pim_register.c \
+ pimd/pimd.c \
+ # end
+
+pimd_pimd_SOURCES = \
+ $(pim_common) \
+ pimd/pim_cmd.c \
+ pimd/pim_igmp.c \
+ pimd/pim_igmp_mtrace.c \
+ pimd/pim_igmp_stats.c \
+ pimd/pim_igmpv2.c \
+ pimd/pim_igmpv3.c \
+ pimd/pim_main.c \
+ pimd/pim_mlag.c \
+ pimd/pim_msdp.c \
+ pimd/pim_msdp_packet.c \
+ pimd/pim_msdp_socket.c \
+ pimd/pim_signals.c \
+ pimd/pim_zpthread.c \
+ # end
+
+nodist_pimd_pimd_SOURCES = \
+ yang/frr-pim.yang.c \
+ yang/frr-pim-rp.yang.c \
+ yang/frr-gmp.yang.c \
+ # end
+
+pimd_pim6d_SOURCES = \
+ $(pim_common) \
+ pimd/pim6_main.c \
+ pimd/pim6_mld.c \
+ pimd/pim6_cmd.c \
+ # end
+
+nodist_pimd_pim6d_SOURCES = \
+ yang/frr-pim.yang.c \
+ yang/frr-pim-rp.yang.c \
+ yang/frr-gmp.yang.c \
+ # end
+
+noinst_HEADERS += \
+ pimd/pim_addr.h \
+ pimd/pim_assert.h \
+ pimd/pim_bfd.h \
+ pimd/pim_bsm.h \
+ pimd/pim_cmd.h \
+ pimd/pim_cmd_common.h \
+ pimd/pim_errors.h \
+ pimd/pim_hello.h \
+ pimd/pim_iface.h \
+ pimd/pim_ifchannel.h \
+ pimd/pim_igmp.h \
+ pimd/pim_igmp_join.h \
+ pimd/pim_igmp_mtrace.h \
+ pimd/pim_igmp_stats.h \
+ pimd/pim_igmpv2.h \
+ pimd/pim_igmpv3.h \
+ pimd/pim_instance.h \
+ pimd/pim_int.h \
+ pimd/pim_join.h \
+ pimd/pim_jp_agg.h \
+ pimd/pim_macro.h \
+ pimd/pim_memory.h \
+ pimd/pim_mlag.h \
+ pimd/pim_mroute.h \
+ pimd/pim_msdp.h \
+ pimd/pim_msdp_packet.h \
+ pimd/pim_msdp_socket.h \
+ pimd/pim_msg.h \
+ pimd/pim_nb.h \
+ pimd/pim_neighbor.h \
+ pimd/pim_nht.h \
+ pimd/pim_oil.h \
+ pimd/pim_pim.h \
+ pimd/pim_register.h \
+ pimd/pim_rp.h \
+ pimd/pim_rpf.h \
+ pimd/pim_signals.h \
+ pimd/pim_sock.h \
+ pimd/pim_ssm.h \
+ pimd/pim_ssmpingd.h \
+ pimd/pim_static.h \
+ pimd/pim_str.h \
+ pimd/pim_tib.h \
+ pimd/pim_time.h \
+ pimd/pim_tlv.h \
+ pimd/pim_upstream.h \
+ pimd/pim_util.h \
+ pimd/pim_vty.h \
+ pimd/pim_zebra.h \
+ pimd/pim_zlookup.h \
+ pimd/pim_vxlan.h \
+ pimd/pim_vxlan_instance.h \
+ pimd/pimd.h \
+ pimd/pim6_mld.h \
+ pimd/pim6_mld_protocol.h \
+ pimd/mtracebis_netlink.h \
+ pimd/mtracebis_routeget.h \
+ pimd/pim6_cmd.h \
+ # end
+
+clippy_scan += \
+ pimd/pim_cmd.c \
+ pimd/pim6_cmd.c \
+ pimd/pim6_mld.c \
+ # end
+
+pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
+pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP)
+
+if PIM6D
+sbin_PROGRAMS += pimd/pim6d
+pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6
+pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)
+endif
+
+pimd_test_igmpv3_join_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
+pimd_test_igmpv3_join_LDADD = lib/libfrr.la
+pimd_test_igmpv3_join_SOURCES = pimd/test_igmpv3_join.c
+
+pimd_mtracebis_LDADD = lib/libfrr.la
+pimd_mtracebis_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
+pimd_mtracebis_SOURCES = pimd/mtracebis.c \
+ pimd/mtracebis_netlink.c \
+ pimd/mtracebis_routeget.c \
+ # end
diff --git a/pimd/test_igmpv3_join.c b/pimd/test_igmpv3_join.c
new file mode 100644
index 0000000..926e453
--- /dev/null
+++ b/pimd/test_igmpv3_join.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PIM for Quagga
+ * Copyright (C) 2008 Everton da Silva Marques
+ */
+
+#include <zebra.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+
+#include "if.h"
+#include "pim_igmp_join.h"
+
+const char *prog_name = 0;
+
+static int iface_solve_index(const char *ifname)
+{
+ struct if_nameindex *ini;
+ ifindex_t ifindex = -1;
+ int i;
+
+ if (!ifname)
+ return -1;
+
+ ini = if_nameindex();
+ if (!ini) {
+ int err = errno;
+ fprintf(stderr,
+ "%s: interface=%s: failure solving index: errno=%d: %s\n",
+ prog_name, ifname, err, strerror(err));
+ errno = err;
+ return -1;
+ }
+
+ for (i = 0; ini[i].if_index; ++i) {
+ if (!strcmp(ini[i].if_name, ifname)) {
+ ifindex = ini[i].if_index;
+ break;
+ }
+ }
+
+ if_freenameindex(ini);
+
+ return ifindex;
+}
+
+int main(int argc, const char *argv[])
+{
+ pim_addr group_addr;
+ pim_addr source_addr;
+ const char *ifname;
+ const char *group;
+ const char *source;
+ ifindex_t ifindex;
+ int result;
+ int fd;
+
+ prog_name = argv[0];
+
+ fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (fd < 0) {
+ fprintf(stderr,
+ "%s: could not create socket: socket(): errno=%d: %s\n",
+ prog_name, errno, strerror(errno));
+ exit(1);
+ }
+
+ if (argc != 4) {
+ fprintf(stderr,
+ "usage: %s interface group source\n"
+ "example: %s eth0 232.1.1.1 1.1.1.1\n",
+ prog_name, prog_name);
+ exit(1);
+ }
+
+ ifname = argv[1];
+ group = argv[2];
+ source = argv[3];
+
+ ifindex = iface_solve_index(ifname);
+ if (ifindex < 0) {
+ fprintf(stderr, "%s: could not find interface: %s\n", prog_name,
+ ifname);
+ exit(1);
+ }
+
+ result = inet_pton(AF_INET, group, &group_addr);
+ if (result <= 0) {
+ fprintf(stderr, "%s: bad group address: %s\n", prog_name,
+ group);
+ exit(1);
+ }
+
+ result = inet_pton(AF_INET, source, &source_addr);
+ if (result <= 0) {
+ fprintf(stderr, "%s: bad source address: %s\n", prog_name,
+ source);
+ exit(1);
+ }
+
+ result = pim_gm_join_source(fd, ifindex, group_addr, source_addr);
+ if (result) {
+ fprintf(stderr,
+ "%s: setsockopt(fd=%d) failure for IGMP group %s source %s ifindex %d on interface %s: errno=%d: %s\n",
+ prog_name, fd, group, source, ifindex, ifname, errno,
+ strerror(errno));
+ exit(1);
+ }
+
+ printf("%s: joined channel (S,G)=(%s,%s) on interface %s\n", prog_name,
+ source, group, ifname);
+
+ printf("%s: waiting...\n", prog_name);
+
+ if (getchar() == EOF)
+ fprintf(stderr, "getchar failure\n");
+
+ close(fd);
+
+ printf("%s: left channel (S,G)=(%s,%s) on interface %s\n", prog_name,
+ source, group, ifname);
+
+ exit(0);
+}