diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /net/bridge | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/bridge')
-rw-r--r-- | net/bridge/br.c | 1 | ||||
-rw-r--r-- | net/bridge/br_cfm_netlink.c | 2 | ||||
-rw-r--r-- | net/bridge/br_device.c | 3 | ||||
-rw-r--r-- | net/bridge/br_fdb.c | 71 | ||||
-rw-r--r-- | net/bridge/br_input.c | 2 | ||||
-rw-r--r-- | net/bridge/br_mdb.c | 184 | ||||
-rw-r--r-- | net/bridge/br_multicast.c | 25 | ||||
-rw-r--r-- | net/bridge/br_netfilter_hooks.c | 98 | ||||
-rw-r--r-- | net/bridge/br_netfilter_ipv6.c | 6 | ||||
-rw-r--r-- | net/bridge/br_netlink.c | 17 | ||||
-rw-r--r-- | net/bridge/br_private.h | 30 | ||||
-rw-r--r-- | net/bridge/br_switchdev.c | 84 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtable_broute.c | 1 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtable_filter.c | 1 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtable_nat.c | 1 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 1 | ||||
-rw-r--r-- | net/bridge/netfilter/nf_conntrack_bridge.c | 1 |
17 files changed, 409 insertions, 119 deletions
diff --git a/net/bridge/br.c b/net/bridge/br.c index a6e94ceb7c..ac19b797db 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -477,3 +477,4 @@ module_exit(br_deinit) MODULE_LICENSE("GPL"); MODULE_VERSION(BR_VERSION); MODULE_ALIAS_RTNL_LINK("bridge"); +MODULE_DESCRIPTION("Ethernet bridge driver"); diff --git a/net/bridge/br_cfm_netlink.c b/net/bridge/br_cfm_netlink.c index 5c4c369f85..2faab44652 100644 --- a/net/bridge/br_cfm_netlink.c +++ b/net/bridge/br_cfm_netlink.c @@ -362,7 +362,7 @@ static int br_cc_ccm_tx_parse(struct net_bridge *br, struct nlattr *attr, memset(&tx_info, 0, sizeof(tx_info)); - instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]); + instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE]); nla_memcpy(&tx_info.dmac.addr, tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC], sizeof(tx_info.dmac.addr)); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 9a5ea06236..8f40de3af1 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -92,7 +92,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - mdst = br_mdb_get(brmctx, skb, vid); + mdst = br_mdb_entry_skb_get(brmctx, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) br_multicast_flood(mdst, skb, brmctx, false, true); @@ -472,6 +472,7 @@ static const struct net_device_ops br_netdev_ops = { .ndo_mdb_add = br_mdb_add, .ndo_mdb_del = br_mdb_del, .ndo_mdb_dump = br_mdb_dump, + .ndo_mdb_get = br_mdb_get, .ndo_bridge_getlink = br_getlink, .ndo_bridge_setlink = br_setlink, .ndo_bridge_dellink = br_dellink, diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e69a872bfc..c622de5ecc 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -329,11 +329,18 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f, hlist_del_init_rcu(&f->fdb_node); rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode, br_fdb_rht_params); + if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags)) + atomic_dec(&br->fdb_n_learned); fdb_notify(br, f, RTM_DELNEIGH, swdev_notify); call_rcu(&f->rcu, fdb_rcu_free); } -/* Delete a local entry if no other port had the same address. */ +/* Delete a local entry if no other port had the same address. + * + * This function should only be called on entries with BR_FDB_LOCAL set, + * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase + * the accounting for dynamically learned entries again. + */ static void fdb_delete_local(struct net_bridge *br, const struct net_bridge_port *p, struct net_bridge_fdb_entry *f) @@ -388,9 +395,20 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, __u16 vid, unsigned long flags) { + bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) && + !test_bit(BR_FDB_LOCAL, &flags); + u32 max_learned = READ_ONCE(br->fdb_max_learned); struct net_bridge_fdb_entry *fdb; int err; + if (likely(learned)) { + int n_learned = atomic_read(&br->fdb_n_learned); + + if (unlikely(max_learned && n_learned >= max_learned)) + return NULL; + __set_bit(BR_FDB_DYNAMIC_LEARNED, &flags); + } + fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); if (!fdb) return NULL; @@ -407,6 +425,9 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, return NULL; } + if (likely(learned)) + atomic_inc(&br->fdb_n_learned); + hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list); return fdb; @@ -661,14 +682,30 @@ static int __fdb_flush_validate_ifindex(const struct net_bridge *br, return 0; } -int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, u16 vid, +static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = { + [NDA_VLAN] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2), + [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), + [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, + [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, +}; + +int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack) { - u8 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS; - struct net_bridge_fdb_flush_desc desc = { .vlan_id = vid }; + struct net_bridge_fdb_flush_desc desc = {}; + struct ndmsg *ndm = nlmsg_data(nlh); struct net_bridge_port *p = NULL; + struct nlattr *tb[NDA_MAX + 1]; struct net_bridge *br; + u8 ndm_flags; + int err; + + ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS; + + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, + br_fdb_del_bulk_policy, extack); + if (err) + return err; if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); @@ -681,6 +718,9 @@ int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[], br = p->br; } + if (tb[NDA_VLAN]) + desc.vlan_id = nla_get_u16(tb[NDA_VLAN]); + if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) { NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set"); return -EINVAL; @@ -703,7 +743,7 @@ int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[], desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask); } if (tb[NDA_IFINDEX]) { - int err, ifidx = nla_get_s32(tb[NDA_IFINDEX]); + int ifidx = nla_get_s32(tb[NDA_IFINDEX]); err = __fdb_flush_validate_ifindex(br, ifidx, extack); if (err) @@ -893,8 +933,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, clear_bit(BR_FDB_LOCKED, &fdb->flags); } - if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) + if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) { set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); + if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, + &fdb->flags)) + atomic_dec(&br->fdb_n_learned); + } if (unlikely(fdb_modified)) { trace_br_fdb_update(br, source, addr, vid, flags); fdb_notify(br, fdb, RTM_NEWNEIGH, true); @@ -1056,7 +1100,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, if (!(flags & NLM_F_CREATE)) return -ENOENT; - fdb = fdb_create(br, source, addr, vid, 0); + fdb = fdb_create(br, source, addr, vid, + BIT(BR_FDB_ADDED_BY_USER)); if (!fdb) return -ENOMEM; @@ -1069,6 +1114,10 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, WRITE_ONCE(fdb->dst, source); modified = true; } + + set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); + if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags)) + atomic_dec(&br->fdb_n_learned); } if (fdb_to_nud(br, fdb) != state) { @@ -1100,8 +1149,6 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, if (fdb_handle_notify(fdb, notify)) modified = true; - set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); - fdb->used = jiffies; if (modified) { if (refresh) @@ -1445,6 +1492,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, if (!p) set_bit(BR_FDB_LOCAL, &fdb->flags); + if ((swdev_notify || !p) && + test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags)) + atomic_dec(&br->fdb_n_learned); + if (modified) fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index c729528b5e..f21097e734 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -175,7 +175,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb switch (pkt_type) { case BR_PKT_MULTICAST: - mdst = br_mdb_get(brmctx, skb, vid); + mdst = br_mdb_entry_skb_get(brmctx, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { if ((mdst && mdst->host_joined) || diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 7305f5f821..8cc526067b 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -323,9 +323,6 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, struct net_bridge_mdb_entry *mp; struct nlattr *nest, *nest2; - if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) - return 0; - nest = nla_nest_start_noflag(skb, MDBA_MDB); if (nest == NULL) return -EMSGSIZE; @@ -453,13 +450,15 @@ cancel: return -EMSGSIZE; } -static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg) +static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg) { - size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + - nla_total_size(sizeof(struct br_mdb_entry)) + - nla_total_size(sizeof(u32)); struct net_bridge_group_src *ent; - size_t addr_size = 0; + size_t nlmsg_size, addr_size = 0; + + /* MDBA_MDB_ENTRY_INFO */ + nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) + + /* MDBA_MDB_EATTR_TIMER */ + nla_total_size(sizeof(u32)); if (!pg) goto out; @@ -507,6 +506,17 @@ out: return nlmsg_size; } +static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg) +{ + return NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0) + + /* Port group entry */ + rtnl_mdb_nlmsg_pg_size(pg); +} + void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp, struct net_bridge_port_group *pg, @@ -1401,3 +1411,161 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[], br_mdb_config_fini(&cfg); return err; } + +static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = { + [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), +}; + +static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[], + struct br_ip *group, struct netlink_ext_ack *extack) +{ + struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]); + struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1]; + int err; + + if (!tb[MDBA_GET_ENTRY_ATTRS]) { + __mdb_entry_to_br_ip(entry, group, NULL); + return 0; + } + + err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, + tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol, + extack); + if (err) + return err; + + if (mdbe_attrs[MDBE_ATTR_SOURCE] && + !is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE], + entry->addr.proto, extack)) + return -EINVAL; + + __mdb_entry_to_br_ip(entry, group, mdbe_attrs); + + return 0; +} + +static struct sk_buff * +br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp) +{ + struct net_bridge_port_group *pg; + size_t nlmsg_size; + + nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0); + + if (mp->host_joined) + nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL); + + for (pg = mlock_dereference(mp->ports, mp->br); pg; + pg = mlock_dereference(pg->next, mp->br)) + nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg); + + return nlmsg_new(nlmsg_size, GFP_ATOMIC); +} + +static int br_mdb_get_reply_fill(struct sk_buff *skb, + struct net_bridge_mdb_entry *mp, u32 portid, + u32 seq) +{ + struct nlattr *mdb_nest, *mdb_entry_nest; + struct net_bridge_port_group *pg; + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + int err; + + nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = mp->br->dev->ifindex; + mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!mdb_nest) { + err = -EMSGSIZE; + goto cancel; + } + mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!mdb_entry_nest) { + err = -EMSGSIZE; + goto cancel; + } + + if (mp->host_joined) { + err = __mdb_fill_info(skb, mp, NULL); + if (err) + goto cancel; + } + + for (pg = mlock_dereference(mp->ports, mp->br); pg; + pg = mlock_dereference(pg->next, mp->br)) { + err = __mdb_fill_info(skb, mp, pg); + if (err) + goto cancel; + } + + nla_nest_end(skb, mdb_entry_nest); + nla_nest_end(skb, mdb_nest); + nlmsg_end(skb, nlh); + + return 0; + +cancel: + nlmsg_cancel(skb, nlh); + return err; +} + +int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq, + struct netlink_ext_ack *extack) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_mdb_entry *mp; + struct sk_buff *skb; + struct br_ip group; + int err; + + err = br_mdb_get_parse(dev, tb, &group, extack); + if (err) + return err; + + /* Hold the multicast lock to ensure that the MDB entry does not change + * between the time the reply size is determined and when the reply is + * filled in. + */ + spin_lock_bh(&br->multicast_lock); + + mp = br_mdb_ip_get(br, &group); + if (!mp) { + NL_SET_ERR_MSG_MOD(extack, "MDB entry not found"); + err = -ENOENT; + goto unlock; + } + + skb = br_mdb_get_reply_alloc(mp); + if (!skb) { + err = -ENOMEM; + goto unlock; + } + + err = br_mdb_get_reply_fill(skb, mp, portid, seq); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply"); + goto free; + } + + spin_unlock_bh(&br->multicast_lock); + + return rtnl_unicast(skb, dev_net(dev), portid); + +free: + kfree_skb(skb); +unlock: + spin_unlock_bh(&br->multicast_lock); + return err; +} diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 96d1fc78dd..2d7b732429 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -145,8 +145,9 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, } #endif -struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, - struct sk_buff *skb, u16 vid) +struct net_bridge_mdb_entry * +br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb, + u16 vid) { struct net_bridge *br = brmctx->br; struct br_ip ip; @@ -1761,6 +1762,10 @@ static void br_ip6_multicast_querier_expired(struct timer_list *t) } #endif +static void br_multicast_query_delay_expired(struct timer_list *t) +{ +} + static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, struct br_ip *ip, struct sk_buff *skb) @@ -3197,7 +3202,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, unsigned long max_delay) { if (!timer_pending(&query->timer)) - query->delay_time = jiffies + max_delay; + mod_timer(&query->delay_timer, jiffies + max_delay); mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); } @@ -4040,13 +4045,11 @@ void br_multicast_ctx_init(struct net_bridge *br, brmctx->multicast_querier_interval = 255 * HZ; brmctx->multicast_membership_interval = 260 * HZ; - brmctx->ip4_other_query.delay_time = 0; brmctx->ip4_querier.port_ifidx = 0; seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); brmctx->multicast_igmp_version = 2; #if IS_ENABLED(CONFIG_IPV6) brmctx->multicast_mld_version = 1; - brmctx->ip6_other_query.delay_time = 0; brmctx->ip6_querier.port_ifidx = 0; seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); #endif @@ -4055,6 +4058,8 @@ void br_multicast_ctx_init(struct net_bridge *br, br_ip4_multicast_local_router_expired, 0); timer_setup(&brmctx->ip4_other_query.timer, br_ip4_multicast_querier_expired, 0); + timer_setup(&brmctx->ip4_other_query.delay_timer, + br_multicast_query_delay_expired, 0); timer_setup(&brmctx->ip4_own_query.timer, br_ip4_multicast_query_expired, 0); #if IS_ENABLED(CONFIG_IPV6) @@ -4062,6 +4067,8 @@ void br_multicast_ctx_init(struct net_bridge *br, br_ip6_multicast_local_router_expired, 0); timer_setup(&brmctx->ip6_other_query.timer, br_ip6_multicast_querier_expired, 0); + timer_setup(&brmctx->ip6_other_query.delay_timer, + br_multicast_query_delay_expired, 0); timer_setup(&brmctx->ip6_own_query.timer, br_ip6_multicast_query_expired, 0); #endif @@ -4196,10 +4203,12 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx) { del_timer_sync(&brmctx->ip4_mc_router_timer); del_timer_sync(&brmctx->ip4_other_query.timer); + del_timer_sync(&brmctx->ip4_other_query.delay_timer); del_timer_sync(&brmctx->ip4_own_query.timer); #if IS_ENABLED(CONFIG_IPV6) del_timer_sync(&brmctx->ip6_mc_router_timer); del_timer_sync(&brmctx->ip6_other_query.timer); + del_timer_sync(&brmctx->ip6_other_query.delay_timer); del_timer_sync(&brmctx->ip6_own_query.timer); #endif } @@ -4642,13 +4651,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) max_delay = brmctx->multicast_query_response_interval; if (!timer_pending(&brmctx->ip4_other_query.timer)) - brmctx->ip4_other_query.delay_time = jiffies + max_delay; + mod_timer(&brmctx->ip4_other_query.delay_timer, + jiffies + max_delay); br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); #if IS_ENABLED(CONFIG_IPV6) if (!timer_pending(&brmctx->ip6_other_query.timer)) - brmctx->ip6_other_query.delay_time = jiffies + max_delay; + mod_timer(&brmctx->ip6_other_query.delay_timer, + jiffies + max_delay); br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); #endif diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 92dae4c492..ed17208907 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -501,11 +501,11 @@ static unsigned int br_nf_pre_routing(void *priv, struct brnf_net *brnet; if (unlikely(!pskb_may_pull(skb, len))) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_PKT_TOO_SMALL, 0); p = br_port_get_rcu(state->in); if (p == NULL) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0); br = p->br; brnet = net_generic(state->net, brnf_net_id); @@ -516,7 +516,7 @@ static unsigned int br_nf_pre_routing(void *priv, return NF_ACCEPT; if (!ipv6_mod_enabled()) { pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported."); - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_IPV6DISABLED, 0); } nf_bridge_pull_encap_header_rcsum(skb); @@ -533,12 +533,12 @@ static unsigned int br_nf_pre_routing(void *priv, nf_bridge_pull_encap_header_rcsum(skb); if (br_validate_ipv4(state->net, skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0); if (!nf_bridge_alloc(skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0); if (!setup_pre_routing(skb, state->net)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0); nf_bridge = nf_bridge_info_get(skb); nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; @@ -589,18 +589,12 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff } -/* This is the 'purely bridged' case. For IP, we pass the packet to - * netfilter with indev and outdev set to the bridge device, - * but we are still able to filter on the 'real' indev/outdev - * because of the physdev module. For ARP, indev and outdev are the - * bridge ports. */ -static unsigned int br_nf_forward_ip(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) +static unsigned int br_nf_forward_ip(struct sk_buff *skb, + const struct nf_hook_state *state, + u8 pf) { struct nf_bridge_info *nf_bridge; struct net_device *parent; - u_int8_t pf; nf_bridge = nf_bridge_info_get(skb); if (!nf_bridge) @@ -609,24 +603,15 @@ static unsigned int br_nf_forward_ip(void *priv, /* Need exclusive nf_bridge_info since we might have multiple * different physoutdevs. */ if (!nf_bridge_unshare(skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0); nf_bridge = nf_bridge_info_get(skb); if (!nf_bridge) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0); parent = bridge_parent(state->out); if (!parent) - return NF_DROP; - - if (IS_IP(skb) || is_vlan_ip(skb, state->net) || - is_pppoe_ip(skb, state->net)) - pf = NFPROTO_IPV4; - else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) || - is_pppoe_ipv6(skb, state->net)) - pf = NFPROTO_IPV6; - else - return NF_ACCEPT; + return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0); nf_bridge_pull_encap_header(skb); @@ -637,21 +622,20 @@ static unsigned int br_nf_forward_ip(void *priv, if (pf == NFPROTO_IPV4) { if (br_validate_ipv4(state->net, skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0); IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; - } - - if (pf == NFPROTO_IPV6) { + skb->protocol = htons(ETH_P_IP); + } else if (pf == NFPROTO_IPV6) { if (br_validate_ipv6(state->net, skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0); IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; + skb->protocol = htons(ETH_P_IPV6); + } else { + WARN_ON_ONCE(1); + return NF_DROP; } nf_bridge->physoutdev = skb->dev; - if (pf == NFPROTO_IPV4) - skb->protocol = htons(ETH_P_IP); - else - skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb, brnf_get_logical_dev(skb, state->in, state->net), @@ -660,8 +644,7 @@ static unsigned int br_nf_forward_ip(void *priv, return NF_STOLEN; } -static unsigned int br_nf_forward_arp(void *priv, - struct sk_buff *skb, +static unsigned int br_nf_forward_arp(struct sk_buff *skb, const struct nf_hook_state *state) { struct net_bridge_port *p; @@ -678,14 +661,11 @@ static unsigned int br_nf_forward_arp(void *priv, if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES)) return NF_ACCEPT; - if (!IS_ARP(skb)) { - if (!is_vlan_arp(skb, state->net)) - return NF_ACCEPT; + if (is_vlan_arp(skb, state->net)) nf_bridge_pull_encap_header(skb); - } if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr)))) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_PKT_TOO_SMALL, 0); if (arp_hdr(skb)->ar_pln != 4) { if (is_vlan_arp(skb, state->net)) @@ -699,6 +679,28 @@ static unsigned int br_nf_forward_arp(void *priv, return NF_STOLEN; } +/* This is the 'purely bridged' case. For IP, we pass the packet to + * netfilter with indev and outdev set to the bridge device, + * but we are still able to filter on the 'real' indev/outdev + * because of the physdev module. For ARP, indev and outdev are the + * bridge ports. + */ +static unsigned int br_nf_forward(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + if (IS_IP(skb) || is_vlan_ip(skb, state->net) || + is_pppoe_ip(skb, state->net)) + return br_nf_forward_ip(skb, state, NFPROTO_IPV4); + if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) || + is_pppoe_ipv6(skb, state->net)) + return br_nf_forward_ip(skb, state, NFPROTO_IPV6); + if (IS_ARP(skb) || is_vlan_arp(skb, state->net)) + return br_nf_forward_arp(skb, state); + + return NF_ACCEPT; +} + static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { struct brnf_frag_data *data; @@ -850,7 +852,7 @@ static unsigned int br_nf_post_routing(void *priv, return NF_ACCEPT; if (!realoutdev) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0); if (IS_IP(skb) || is_vlan_ip(skb, state->net) || is_pppoe_ip(skb, state->net)) @@ -963,13 +965,7 @@ static const struct nf_hook_ops br_nf_ops[] = { .priority = NF_BR_PRI_BRNF, }, { - .hook = br_nf_forward_ip, - .pf = NFPROTO_BRIDGE, - .hooknum = NF_BR_FORWARD, - .priority = NF_BR_PRI_BRNF - 1, - }, - { - .hook = br_nf_forward_arp, + .hook = br_nf_forward, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF, diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index ad268bd19d..e0421eaa3a 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -167,13 +167,13 @@ unsigned int br_nf_pre_routing_ipv6(void *priv, struct nf_bridge_info *nf_bridge; if (br_validate_ipv6(state->net, skb)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0); nf_bridge = nf_bridge_alloc(skb); if (!nf_bridge) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0); if (!setup_pre_routing(skb, state->net)) - return NF_DROP; + return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0); nf_bridge = nf_bridge_info_get(skb); nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 10f0d33d8c..5ad4abfcb7 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1229,6 +1229,8 @@ static size_t br_port_get_slave_size(const struct net_device *brdev, } static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { + [IFLA_BR_UNSPEC] = { .strict_start_type = + IFLA_BR_FDB_N_LEARNED }, [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, @@ -1265,6 +1267,8 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 }, [IFLA_BR_MULTI_BOOLOPT] = NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)), + [IFLA_BR_FDB_N_LEARNED] = { .type = NLA_REJECT }, + [IFLA_BR_FDB_MAX_LEARNED] = { .type = NLA_U32 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -1539,6 +1543,12 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], return err; } + if (data[IFLA_BR_FDB_MAX_LEARNED]) { + u32 val = nla_get_u32(data[IFLA_BR_FDB_MAX_LEARNED]); + + WRITE_ONCE(br->fdb_max_learned, val); + } + return 0; } @@ -1593,6 +1603,8 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_FDB_N_LEARNED */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_FDB_MAX_LEARNED */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ @@ -1668,7 +1680,10 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, br->topology_change_detected) || nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) || - nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm)) + nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm) || + nla_put_u32(skb, IFLA_BR_FDB_N_LEARNED, + atomic_read(&br->fdb_n_learned)) || + nla_put_u32(skb, IFLA_BR_FDB_MAX_LEARNED, br->fdb_max_learned)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_VLAN_FILTERING diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index a1f4acfa69..f317d8295b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -78,7 +78,7 @@ struct bridge_mcast_own_query { /* other querier */ struct bridge_mcast_other_query { struct timer_list timer; - unsigned long delay_time; + struct timer_list delay_timer; }; /* selected querier */ @@ -274,6 +274,7 @@ enum { BR_FDB_NOTIFY, BR_FDB_NOTIFY_INACTIVE, BR_FDB_LOCKED, + BR_FDB_DYNAMIC_LEARNED, }; struct net_bridge_fdb_key { @@ -555,6 +556,9 @@ struct net_bridge { struct kobject *ifobj; u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + #ifdef CONFIG_NET_SWITCHDEV /* Counter used to make sure that hardware domains get unique * identifiers in case a bridge spans multiple switchdev instances. @@ -847,8 +851,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, struct netlink_ext_ack *extack); -int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, u16 vid, +int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack); int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, @@ -952,8 +955,9 @@ int br_multicast_rcv(struct net_bridge_mcast **brmctx, struct net_bridge_mcast_port **pmctx, struct net_bridge_vlan *vlan, struct sk_buff *skb, u16 vid); -struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, - struct sk_buff *skb, u16 vid); +struct net_bridge_mdb_entry * +br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb, + u16 vid); int br_multicast_add_port(struct net_bridge_port *port); void br_multicast_del_port(struct net_bridge_port *port); void br_multicast_enable_port(struct net_bridge_port *port); @@ -1018,6 +1022,8 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack); int br_mdb_dump(struct net_device *dev, struct sk_buff *skb, struct netlink_callback *cb); +int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq, + struct netlink_ext_ack *extack); void br_multicast_host_join(const struct net_bridge_mcast *brmctx, struct net_bridge_mdb_entry *mp, bool notify); void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify); @@ -1149,7 +1155,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx, own_querier_enabled = false; } - return time_is_before_jiffies(querier->delay_time) && + return !timer_pending(&querier->delay_timer) && (own_querier_enabled || timer_pending(&querier->timer)); } @@ -1342,8 +1348,9 @@ static inline int br_multicast_rcv(struct net_bridge_mcast **brmctx, return 0; } -static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, - struct sk_buff *skb, u16 vid) +static inline struct net_bridge_mdb_entry * +br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb, + u16 vid) { return NULL; } @@ -1427,6 +1434,13 @@ static inline int br_mdb_dump(struct net_device *dev, struct sk_buff *skb, return 0; } +static inline int br_mdb_get(struct net_device *dev, struct nlattr *tb[], + u32 portid, u32 seq, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + static inline int br_mdb_hash_init(struct net_bridge *br) { return 0; diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index ee84e783e1..7b41ee8740 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -595,21 +595,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, } static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, + struct net_device *dev, + unsigned long action, enum switchdev_obj_id id, const struct net_bridge_mdb_entry *mp, struct net_device *orig_dev) { - struct switchdev_obj_port_mdb *mdb; + struct switchdev_obj_port_mdb mdb = { + .obj = { + .id = id, + .orig_dev = orig_dev, + }, + }; + struct switchdev_obj_port_mdb *pmdb; - mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); - if (!mdb) - return -ENOMEM; + br_switchdev_mdb_populate(&mdb, mp); + + if (action == SWITCHDEV_PORT_OBJ_ADD && + switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) { + /* This event is already in the deferred queue of + * events, so this replay must be elided, lest the + * driver receives duplicate events for it. This can + * only happen when replaying additions, since + * modifications are always immediately visible in + * br->mdb_list, whereas actual event delivery may be + * delayed. + */ + return 0; + } - mdb->obj.id = id; - mdb->obj.orig_dev = orig_dev; - br_switchdev_mdb_populate(mdb, mp); - list_add_tail(&mdb->obj.list, mdb_list); + pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC); + if (!pmdb) + return -ENOMEM; + list_add_tail(&pmdb->obj.list, mdb_list); return 0; } @@ -677,51 +696,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return 0; - /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, - * because the write-side protection is br->multicast_lock. But we - * need to emulate the [ blocking ] calling context of a regular - * switchdev event, so since both br->multicast_lock and RCU read side - * critical sections are atomic, we have no choice but to pick the RCU - * read side lock, queue up all our events, leave the critical section - * and notify switchdev from blocking context. + if (adding) + action = SWITCHDEV_PORT_OBJ_ADD; + else + action = SWITCHDEV_PORT_OBJ_DEL; + + /* br_switchdev_mdb_queue_one() will take care to not queue a + * replay of an event that is already pending in the switchdev + * deferred queue. In order to safely determine that, there + * must be no new deferred MDB notifications enqueued for the + * duration of the MDB scan. Therefore, grab the write-side + * lock to avoid racing with any concurrent IGMP/MLD snooping. */ - rcu_read_lock(); + spin_lock_bh(&br->multicast_lock); - hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { + hlist_for_each_entry(mp, &br->mdb_list, mdb_node) { struct net_bridge_port_group __rcu * const *pp; const struct net_bridge_port_group *p; if (mp->host_joined) { - err = br_switchdev_mdb_queue_one(&mdb_list, + err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, SWITCHDEV_OBJ_ID_HOST_MDB, mp, br_dev); if (err) { - rcu_read_unlock(); + spin_unlock_bh(&br->multicast_lock); goto out_free_mdb; } } - for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p->key.port->dev != dev) continue; - err = br_switchdev_mdb_queue_one(&mdb_list, + err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, SWITCHDEV_OBJ_ID_PORT_MDB, mp, dev); if (err) { - rcu_read_unlock(); + spin_unlock_bh(&br->multicast_lock); goto out_free_mdb; } } } - rcu_read_unlock(); - - if (adding) - action = SWITCHDEV_PORT_OBJ_ADD; - else - action = SWITCHDEV_PORT_OBJ_DEL; + spin_unlock_bh(&br->multicast_lock); list_for_each_entry(obj, &mdb_list, list) { err = br_switchdev_mdb_replay_one(nb, dev, @@ -786,6 +804,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); + + /* Make sure that the device leaving this bridge has seen all + * relevant events before it is disassociated. In the normal + * case, when the device is directly attached to the bridge, + * this is covered by del_nbp(). If the association was indirect + * however, e.g. via a team or bond, and the device is leaving + * that intermediate device, then the bridge port remains in + * place. + */ + switchdev_deferred_process(); } /* Let the bridge know that this port is offloaded, so that it can assign a diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 8f19253024..7413602195 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -135,3 +135,4 @@ static void __exit ebtable_broute_fini(void) module_init(ebtable_broute_init); module_exit(ebtable_broute_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Force packets to be routed instead of bridged"); diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 278f324e67..dacd81b12e 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -116,3 +116,4 @@ static void __exit ebtable_filter_fini(void) module_init(ebtable_filter_init); module_exit(ebtable_filter_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ebtables legacy filter table"); diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 9066f7f376..0f2a8c6118 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -116,3 +116,4 @@ static void __exit ebtable_nat_fini(void) module_init(ebtable_nat_init); module_exit(ebtable_nat_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ebtables legacy stateless nat table"); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index aa23479b20..99d82676f7 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2595,3 +2595,4 @@ EXPORT_SYMBOL(ebt_do_table); module_init(ebtables_init); module_exit(ebtables_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ebtables legacy core"); diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 0fcf357ea7..abb090f94e 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -416,3 +416,4 @@ module_exit(nf_conntrack_l3proto_bridge_fini); MODULE_ALIAS("nf_conntrack-" __stringify(AF_BRIDGE)); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Bridge IPv4 and IPv6 connection tracking"); |