diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
commit | dc50eab76b709d68175a358d6e23a5a3890764d3 (patch) | |
tree | c754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/net/ethernet/sfc | |
parent | Adding debian version 6.6.15-2. (diff) | |
download | linux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/net/ethernet/sfc/efx_channels.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mae.c | 62 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ptp.c | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena/efx_channels.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/tc.c | 337 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/tc.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/tc_conntrack.c | 91 |
8 files changed, 506 insertions, 26 deletions
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 8d2d7ea2eb..c9e17a8208 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index c3e2b4a21d..10709d828a 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -1291,10 +1291,11 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) size_t outlen; int rc; - MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, + MCDI_POPULATE_DWORD_5(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push, MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop, MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap, + MAE_ACTION_SET_ALLOC_IN_DO_NAT, act->do_nat, MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL, act->do_ttl_dec); @@ -1705,8 +1706,10 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, /* action */ act = &rule->lhs_act; - MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, - MAE_MCDI_ENCAP_TYPE_NONE); + rc = efx_mae_encap_type_to_mae_type(act->tun_type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc); /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the * SW path needs to process the packet to update the conntrack tables * on connection establishment (SYN) or termination (FIN, RST). @@ -1734,9 +1737,60 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, return 0; } +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match); + +static int efx_mae_insert_lhs_action_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, + u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + struct efx_tc_lhs_action *act = &rule->lhs_act; + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL), + MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone, + MAE_ACTION_RULE_RESPONSE_DO_RECIRC, + act->rid && !act->zone, + MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE, + MAE_CT_VNI_MODE_ZERO, + MAE_ACTION_RULE_RESPONSE_RECIRC_ID, + act->rid ? act->rid->fw_id : 0, + MAE_ACTION_RULE_RESPONSE_CT_DOMAIN, + act->zone ? act->zone->zone : 0); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_COUNTER_ID, + act->count ? act->count->cnt->fw_id : + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, u32 prio) { + if (rule->is_ar) + return efx_mae_insert_lhs_action_rule(efx, rule, prio); return efx_mae_insert_lhs_outer_rule(efx, rule, prio); } @@ -1770,6 +1824,8 @@ static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx, int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule) { + if (rule->is_ar) + return efx_mae_delete_rule(efx, rule->fw_id); return efx_mae_remove_lhs_outer_rule(efx, rule); } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d23da96273..7657850222 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -2205,10 +2205,9 @@ int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, goto out_free; } - strncpy(desc, + strscpy(desc, MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION), MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)); - desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0'; } else { desc[0] = '\0'; } diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index f54200f03e..b04fdbb8ae 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -108,11 +108,17 @@ #define PTP_MIN_LENGTH 63 #define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */ -#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0x01, 0x81} /* ff0e::181 */ + +/* ff0e::181 */ +static const struct in6_addr ptp_addr_ipv6 = { { { + 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } }; + +/* 01-1B-19-00-00-00 */ +static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = { + 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }; + #define PTP_EVENT_PORT 319 #define PTP_GENERAL_PORT 320 -#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */ /* Annoyingly the format of the version numbers are different between * versions 1 and 2 so it isn't possible to simply look for 1 or 2. @@ -1296,7 +1302,7 @@ static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, struct list_head *filter_list, - struct in6_addr *addr, u16 port, + const struct in6_addr *addr, u16 port, unsigned long expiry) { struct efx_filter_spec spec; @@ -1309,11 +1315,10 @@ static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER; struct efx_filter_spec spec; efx_ptp_init_filter(efx, &spec); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, ptp_addr_ether); spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; spec.ether_type = htons(ETH_P_1588); return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0); @@ -1346,15 +1351,13 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) * PTP over IPv6 and Ethernet */ if (efx_ptp_use_mac_tx_timestamps(efx)) { - struct in6_addr ipv6_addr = {{PTP_ADDR_IPV6}}; - rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_EVENT_PORT, 0); + &ptp_addr_ipv6, PTP_EVENT_PORT, 0); if (rc < 0) goto fail; rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_GENERAL_PORT, 0); + &ptp_addr_ipv6, PTP_GENERAL_PORT, 0); if (rc < 0) goto fail; @@ -1379,9 +1382,7 @@ static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb) ip_hdr(skb)->protocol == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct in6_addr mcast_addr = {{PTP_ADDR_IPV6}}; - - return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &mcast_addr) && + return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 1776f7f8a7..a7346e965b 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 30ebef8824..82e8891a61 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -642,6 +642,15 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, return -EEXIST; } break; + case EFX_TC_EM_PSEUDO_OR: + /* old EM corresponds to an OR that has to be unique + * (it must not overlap with any other OR, whether + * direct-EM or pseudo). + */ + NL_SET_ERR_MSG_FMT_MOD(extack, + "%s encap match conflicts with existing pseudo(OR) entry", + em_type ? "Pseudo" : "Direct"); + return -EEXIST; default: /* Unrecognised pseudo-type. Just say no */ NL_SET_ERR_MSG_FMT_MOD(extack, "%s encap match conflicts with existing pseudo(%d) entry", @@ -872,6 +881,93 @@ static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr, return false; } +/* A foreign LHS rule has matches on enc_ keys at the TC layer (including an + * implied match on enc_ip_proto UDP). Translate these into non-enc_ keys, + * so that we can use the same MAE machinery as local LHS rules (and so that + * the lhs_rules entries have uniform semantics). It may seem odd to do it + * this way round, given that the corresponding fields in the MAE MCDIs are + * all ENC_, but (a) we don't have enc_L2 or enc_ip_proto in struct + * efx_tc_match_fields and (b) semantically an LHS rule doesn't have inner + * fields so it's just matching on *the* header rather than the outer header. + * Make sure that the non-enc_ keys were not already being matched on, as that + * would imply a rule that needed a triple lookup. (Hardware can do that, + * with OR-AR-CT-AR, but it halves packet rate so we avoid it where possible; + * see efx_tc_flower_flhs_needs_ar().) + */ +static int efx_tc_flower_translate_flhs_match(struct efx_tc_match *match) +{ + int rc = 0; + +#define COPY_MASK_AND_VALUE(_key, _ekey) ({ \ + if (match->mask._key) { \ + rc = -EOPNOTSUPP; \ + } else { \ + match->mask._key = match->mask._ekey; \ + match->mask._ekey = 0; \ + match->value._key = match->value._ekey; \ + match->value._ekey = 0; \ + } \ + rc; \ +}) +#define COPY_FROM_ENC(_key) COPY_MASK_AND_VALUE(_key, enc_##_key) + if (match->mask.ip_proto) + return -EOPNOTSUPP; + match->mask.ip_proto = ~0; + match->value.ip_proto = IPPROTO_UDP; + if (COPY_FROM_ENC(src_ip) || COPY_FROM_ENC(dst_ip)) + return rc; +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.src_ip6)) + return -EOPNOTSUPP; + match->mask.src_ip6 = match->mask.enc_src_ip6; + memset(&match->mask.enc_src_ip6, 0, sizeof(struct in6_addr)); + if (!ipv6_addr_any(&match->mask.dst_ip6)) + return -EOPNOTSUPP; + match->mask.dst_ip6 = match->mask.enc_dst_ip6; + memset(&match->mask.enc_dst_ip6, 0, sizeof(struct in6_addr)); +#endif + if (COPY_FROM_ENC(ip_tos) || COPY_FROM_ENC(ip_ttl)) + return rc; + /* should really copy enc_ip_frag but we don't have that in + * parse_match yet + */ + if (COPY_MASK_AND_VALUE(l4_sport, enc_sport) || + COPY_MASK_AND_VALUE(l4_dport, enc_dport)) + return rc; + return 0; +#undef COPY_FROM_ENC +#undef COPY_MASK_AND_VALUE +} + +/* If a foreign LHS rule wants to match on keys that are only available after + * encap header identification and parsing, then it can't be done in the Outer + * Rule lookup, because that lookup determines the encap type used to parse + * beyond the outer headers. Thus, such rules must use the OR-AR-CT-AR lookup + * sequence, with an EM (struct efx_tc_encap_match) in the OR step. + * Return true iff the passed match requires this. + */ +static bool efx_tc_flower_flhs_needs_ar(struct efx_tc_match *match) +{ + /* matches on inner-header keys can't be done in OR */ + return match->mask.eth_proto || + match->mask.vlan_tci[0] || match->mask.vlan_tci[1] || + match->mask.vlan_proto[0] || match->mask.vlan_proto[1] || + memchr_inv(match->mask.eth_saddr, 0, ETH_ALEN) || + memchr_inv(match->mask.eth_daddr, 0, ETH_ALEN) || + match->mask.ip_proto || + match->mask.ip_tos || match->mask.ip_ttl || + match->mask.src_ip || match->mask.dst_ip || +#ifdef CONFIG_IPV6 + !ipv6_addr_any(&match->mask.src_ip6) || + !ipv6_addr_any(&match->mask.dst_ip6) || +#endif + match->mask.ip_frag || match->mask.ip_firstfrag || + match->mask.l4_sport || match->mask.l4_dport || + match->mask.tcp_flags || + /* nor can VNI */ + match->mask.enc_keyid; +} + static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, struct flow_cls_offload *tc, struct flow_rule *fr, @@ -882,9 +978,12 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, struct netlink_ext_ack *extack = tc->common.extack; struct efx_tc_lhs_action *act = &rule->lhs_act; const struct flow_action_entry *fa; + enum efx_tc_counter_type ctype; bool pipe = true; int i; + ctype = rule->is_ar ? EFX_TC_COUNTER_TYPE_AR : EFX_TC_COUNTER_TYPE_OR; + flow_action_for_each(i, fa, &fr->action) { struct efx_tc_ct_zone *ct_zone; struct efx_tc_recirc_id *rid; @@ -917,7 +1016,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, return -EOPNOTSUPP; } cnt = efx_tc_flower_get_counter_index(efx, tc->cookie, - EFX_TC_COUNTER_TYPE_OR); + ctype); if (IS_ERR(cnt)) { NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); return PTR_ERR(cnt); @@ -1354,6 +1453,222 @@ static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung, return 0; } +static int efx_tc_flower_replace_foreign_lhs_ar(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* This is an Action Rule, so it needs a separate Encap Match in the + * Outer Rule table. Insert that now. + */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_DIRECT, 0, 0, extack); + if (rc) + return rc; + + match->mask.recirc_id = 0xff; + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + /* We must inhibit match on TCP SYN/FIN/RST, so that SW can see + * the packet and update the conntrack table. + * Outer Rules will do that with CT_TCP_FLAGS_INHIBIT, but Action + * Rules don't have that; instead they support matching on + * TCP_SYN_FIN_RST (aka TCP_INTERESTING_FLAGS), so use that. + * This is only strictly needed if there will be a DO_CT action, + * which we don't know yet, but typically there will be and it's + * simpler not to bother checking here. + */ + match->mask.tcp_syn_fin_rst = true; + + rc = efx_mae_match_check_caps(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + rule->is_ar = true; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + +static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + if (tc->common.chain_index) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (!efx_tc_match_is_encap(&match->mask)) { + /* This is not a tunnel decap rule, ignore it */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign LHS filter without encap match\n"); + return -EOPNOTSUPP; + } + + if (efx_tc_flower_flhs_needs_ar(match)) + return efx_tc_flower_replace_foreign_lhs_ar(efx, tc, fr, match, + net_dev); + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* Reserve the outer tuple with a pseudo Encap Match */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_PSEUDO_OR, 0, 0, + extack); + if (rc) + return rc; + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_tc_flower_translate_flhs_match(match); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule cannot match on inner fields"); + goto release_encap_match; + } + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + static int efx_tc_flower_replace_foreign(struct efx_nic *efx, struct net_device *net_dev, struct flow_cls_offload *tc) @@ -1371,7 +1686,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, /* Parse match */ memset(&match, 0, sizeof(match)); - rc = efx_tc_flower_parse_match(efx, fr, &match, NULL); + rc = efx_tc_flower_parse_match(efx, fr, &match, extack); if (rc) return rc; /* The rule as given to us doesn't specify a source netdevice. @@ -1387,6 +1702,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, match.value.ingress_port = rc; match.mask.ingress_port = ~0; + if (efx_tc_rule_is_lhs_rule(fr, &match)) + return efx_tc_flower_replace_foreign_lhs(efx, tc, fr, &match, + net_dev); + if (tc->common.chain_index) { struct efx_tc_recirc_id *rid; @@ -1416,6 +1735,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (match.mask.ct_state_est && !match.value.ct_state_est) { if (match.value.tcp_syn_fin_rst) { /* Can't offload this combination */ + NL_SET_ERR_MSG_MOD(extack, "TCP flags and -est conflict for offload"); rc = -EOPNOTSUPP; goto release; } @@ -1442,7 +1762,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, goto release; } - rc = efx_mae_match_check_caps(efx, &match.mask, NULL); + rc = efx_mae_match_check_caps(efx, &match.mask, extack); if (rc) goto release; @@ -1470,7 +1790,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, extack); if (rc) goto release; - } else { + } else if (!tc->common.chain_index) { /* This is not a tunnel decap rule, ignore it */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); @@ -1530,6 +1850,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, goto release; } if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { + NL_SET_ERR_MSG_MOD(extack, "Count action violates action order (can't happen)"); rc = -EOPNOTSUPP; goto release; } @@ -2136,6 +2457,14 @@ static int efx_tc_flower_replace(struct efx_nic *efx, NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device"); rc = -EOPNOTSUPP; goto release; + case FLOW_ACTION_CT: + if (fa->ct.action != TCA_CT_ACT_NAT) { + rc = -EOPNOTSUPP; + NL_SET_ERR_MSG_FMT_MOD(extack, "Can only offload CT 'nat' action in RHS rules, not %d", fa->ct.action); + goto release; + } + act->do_nat = 1; + break; default: NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", fa->id); diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h index 4dd2c378fd..7b5190078b 100644 --- a/drivers/net/ethernet/sfc/tc.h +++ b/drivers/net/ethernet/sfc/tc.h @@ -48,6 +48,7 @@ struct efx_tc_encap_action; /* see tc_encap_actions.h */ * @vlan_push: the number of vlan headers to push * @vlan_pop: the number of vlan headers to pop * @decap: used to indicate a tunnel header decapsulation should take place + * @do_nat: perform NAT/NPT with values returned by conntrack match * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented * @deliver: used to indicate a deliver action should take place * @vlan_tci: tci fields for vlan push actions @@ -68,6 +69,7 @@ struct efx_tc_action_set { u16 vlan_push:2; u16 vlan_pop:2; u16 decap:1; + u16 do_nat:1; u16 do_ttl_dec:1; u16 deliver:1; __be16 vlan_tci[2]; @@ -140,10 +142,14 @@ static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask) * The pseudo encap match may be referenced again by an encap match * with different values for these fields, but all masks must match the * first (stored in our child_* fields). + * @EFX_TC_EM_PSEUDO_OR: registered by an fLHS rule that fits in the OR + * table. The &struct efx_tc_lhs_rule already holds the HW OR entry. + * Only one reference to this encap match may exist. */ enum efx_tc_em_pseudo_type { EFX_TC_EM_DIRECT, EFX_TC_EM_PSEUDO_MASK, + EFX_TC_EM_PSEUDO_OR, }; struct efx_tc_encap_match { @@ -183,6 +189,7 @@ struct efx_tc_action_set_list { }; struct efx_tc_lhs_action { + enum efx_encap_type tun_type; struct efx_tc_recirc_id *rid; struct efx_tc_ct_zone *zone; struct efx_tc_counter_index *count; @@ -203,6 +210,7 @@ struct efx_tc_lhs_rule { struct efx_tc_lhs_action lhs_act; struct rhash_head linkage; u32 fw_id; + bool is_ar; /* Action Rule (for OR-AR-CT-AR sequence) */ }; enum efx_tc_rule_prios { diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c index 44bb576703..d90206f271 100644 --- a/drivers/net/ethernet/sfc/tc_conntrack.c +++ b/drivers/net/ethernet/sfc/tc_conntrack.c @@ -276,10 +276,84 @@ static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr, return 0; } +/** + * struct efx_tc_ct_mangler_state - tracks which fields have been pedited + * + * @ipv4: IP source or destination addr has been set + * @tcpudp: TCP/UDP source or destination port has been set + */ +struct efx_tc_ct_mangler_state { + u8 ipv4:1; + u8 tcpudp:1; +}; + +static int efx_tc_ct_mangle(struct efx_nic *efx, struct efx_tc_ct_entry *conn, + const struct flow_action_entry *fa, + struct efx_tc_ct_mangler_state *mung) +{ + /* Is this the first mangle we've processed for this rule? */ + bool first = !(mung->ipv4 || mung->tcpudp); + bool dnat = false; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, daddr): + dnat = true; + fallthrough; + case offsetof(struct iphdr, saddr): + if (fa->mangle.mask) + return -EOPNOTSUPP; + conn->nat_ip = htonl(fa->mangle.val); + mung->ipv4 = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ + switch (fa->mangle.offset) { + case offsetof(struct tcphdr, dest): + BUILD_BUG_ON(offsetof(struct tcphdr, dest) != + offsetof(struct udphdr, dest)); + dnat = true; + fallthrough; + case offsetof(struct tcphdr, source): + BUILD_BUG_ON(offsetof(struct tcphdr, source) != + offsetof(struct udphdr, source)); + if (~fa->mangle.mask != 0xffff) + return -EOPNOTSUPP; + conn->l4_natport = htons(fa->mangle.val); + mung->tcpudp = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + /* first mangle tells us whether this is SNAT or DNAT; + * subsequent mangles must match that + */ + if (first) + conn->dnat = dnat; + else if (conn->dnat != dnat) + return -EOPNOTSUPP; + return 0; +} + static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, struct flow_cls_offload *tc) { struct flow_rule *fr = flow_cls_offload_flow_rule(tc); + struct efx_tc_ct_mangler_state mung = {}; struct efx_tc_ct_entry *conn, *old; struct efx_nic *efx = ct_zone->efx; const struct flow_action_entry *fa; @@ -326,6 +400,17 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, goto release; } break; + case FLOW_ACTION_MANGLE: + if (conn->eth_proto != htons(ETH_P_IP)) { + netif_dbg(efx, drv, efx->net_dev, + "NAT only supported for IPv4\n"); + rc = -EOPNOTSUPP; + goto release; + } + rc = efx_tc_ct_mangle(efx, conn, fa, &mung); + if (rc) + goto release; + break; default: netif_dbg(efx, drv, efx->net_dev, "Unhandled action %u for conntrack\n", fa->id); @@ -335,8 +420,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, } /* fill in defaults for unmangled values */ - conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; - conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; + if (!mung.ipv4) + conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; + if (!mung.tcpudp) + conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT); if (IS_ERR(cnt)) { |