summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/tc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/tc.c')
-rw-r--r--drivers/net/ethernet/sfc/tc.c680
1 files changed, 680 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644
index 000000000..d312147cd
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <net/pkt_cls.h>
+#include "tc.h"
+#include "tc_bindings.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+#define EFX_EFV_PF NULL
+/* Look up the representor information (efv) for a device.
+ * May return NULL for the PF (us), or an error pointer for a device that
+ * isn't supported as a TC offload endpoint
+ */
+static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
+ struct net_device *dev)
+{
+ struct efx_rep *efv;
+
+ if (!dev)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it us (the PF)? */
+ if (dev == efx->net_dev)
+ return EFX_EFV_PF;
+ /* Is it an efx vfrep at all? */
+ if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it ours? We don't support TC rules that include another
+ * EF100's netdevices (not even on another port of the same NIC).
+ */
+ efv = netdev_priv(dev);
+ if (efv->parent != efx)
+ return ERR_PTR(-EOPNOTSUPP);
+ return efv;
+}
+
+/* Convert a driver-internal vport ID into an external device (wire or VF) */
+static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
+{
+ u32 mport;
+
+ if (IS_ERR(efv))
+ return PTR_ERR(efv);
+ if (!efv) /* device is PF (us) */
+ efx_mae_mport_wire(efx, &mport);
+ else /* device is repr */
+ efx_mae_mport_mport(efx, efv->mport, &mport);
+ return mport;
+}
+
+static const struct rhashtable_params efx_tc_match_action_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_flow_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_flow_rule, linkage),
+};
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+ struct efx_tc_action_set *act, bool in_hw)
+{
+ /* Failure paths calling this on the 'running action' set in_hw=false,
+ * because if the alloc had succeeded we'd've put it in acts.list and
+ * not still have it in act.
+ */
+ if (in_hw) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ /* in_hw is true iff we are on an acts.list; make sure to
+ * remove ourselves from that list before we are freed.
+ */
+ list_del(&act->list);
+ }
+ kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts,
+ bool in_hw)
+{
+ struct efx_tc_action_set *act, *next;
+
+ /* Failure paths set in_hw=false, because usually the acts didn't get
+ * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+ * has a separate efx_mae_free_action_set_list() before calling us.
+ */
+ if (in_hw)
+ efx_mae_free_action_set_list(efx, acts);
+ /* Any act that's on the list will be in_hw even if the list isn't */
+ list_for_each_entry_safe(act, next, &acts->list, list)
+ efx_tc_free_action_set(efx, act, true);
+ /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static void efx_tc_flow_free(void *ptr, void *arg)
+{
+ struct efx_tc_flow_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+
+ kfree(rule);
+}
+
+static int efx_tc_flower_parse_match(struct efx_nic *efx,
+ struct flow_rule *rule,
+ struct efx_tc_match *match,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(rule, &fm);
+
+ if (fm.mask->flags) {
+ efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
+ fm.mask->flags);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC))) {
+ efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(rule, &fm);
+ if (fm.mask->n_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ if (fm.mask->ip_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_flower_replace(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc,
+ struct efx_rep *efv)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_action_set *act = NULL;
+ const struct flow_action_entry *fa;
+ struct efx_rep *from_efv, *to_efv;
+ struct efx_tc_match match;
+ s64 rc;
+ int i;
+
+ if (!tc_can_offload_extack(efx->net_dev, extack))
+ return -EOPNOTSUPP;
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
+ if (IS_ERR(from_efv)) {
+ /* Might be a tunnel decap rule from an indirect block.
+ * Support for those not implemented yet.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (efv != from_efv) {
+ /* can't happen */
+ efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
+ netdev_name(net_dev), efv ? "non-" : "",
+ from_efv ? "non-" : "");
+ if (efv)
+ NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
+ else
+ NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
+ return -EINVAL;
+ }
+
+ /* Parse match */
+ memset(&match, 0, sizeof(match));
+ rc = efx_tc_flower_external_mport(efx, from_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
+ return rc;
+ }
+ match.value.ingress_port = rc;
+ match.mask.ingress_port = ~0;
+ rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
+ if (rc)
+ return rc;
+
+ if (tc->common.chain_index) {
+ EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
+ return -EOPNOTSUPP;
+ }
+ match.mask.recirc_id = 0xff;
+
+ rc = efx_mae_match_check_caps(efx, &match.mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&rule->acts.list);
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_action_set save;
+
+ if (!act) {
+ /* more actions after a non-pipe action */
+ EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
+ rc = -EINVAL;
+ goto release;
+ }
+
+ switch (fa->id) {
+ case FLOW_ACTION_DROP:
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* end of the line */
+ break;
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED:
+ save = *act;
+ to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
+ if (IS_ERR(to_efv)) {
+ EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
+ rc = PTR_ERR(to_efv);
+ goto release;
+ }
+ rc = efx_tc_flower_external_mport(efx, to_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
+ goto release;
+ }
+ act->dest_mport = rc;
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL;
+ if (fa->id == FLOW_ACTION_REDIRECT)
+ break; /* end of the line */
+ /* Mirror, so continue on with saved act */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+ *act = save;
+ break;
+ default:
+ efx_tc_err(efx, "Unhandled action %u\n", fa->id);
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ goto release;
+ }
+ }
+
+ if (act) {
+ /* Not shot/redirected, so deliver to default dest */
+ if (from_efv == EFX_EFV_PF)
+ /* Rule applies to traffic from the wire,
+ * and default dest is thus the PF
+ */
+ efx_mae_mport_uplink(efx, &act->dest_mport);
+ else
+ /* Representor, so rule applies to traffic from
+ * representee, and default dest is thus the rep.
+ * All reps use the same mport for delivery
+ */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &act->dest_mport);
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* Prevent double-free in error path */
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed filter (cookie %lx)\n",
+ tc->cookie);
+
+ rule->match = match;
+
+ rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
+ goto release;
+ }
+ rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
+ rule->acts.fw_id, &rule->fw_id);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
+ goto release_acts;
+ }
+ return 0;
+
+release_acts:
+ efx_mae_free_action_set_list(efx, &rule->acts);
+release:
+ /* We failed to insert the rule, so free up any entries we created in
+ * subsidiary tables.
+ */
+ if (act)
+ efx_tc_free_action_set(efx, act, false);
+ if (rule) {
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ efx_tc_free_action_set_list(efx, &rule->acts, false);
+ }
+ kfree(rule);
+ return rc;
+}
+
+static int efx_tc_flower_destroy(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule;
+
+ rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
+ efx_tc_match_action_ht_params);
+ if (!rule) {
+ /* Only log a message if we're the ingress device. Otherwise
+ * it's a foreign filter and we might just not have been
+ * interested (e.g. we might not have been the egress device
+ * either).
+ */
+ if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
+ netif_warn(efx, drv, efx->net_dev,
+ "Filter %lx not found to remove\n", tc->cookie);
+ NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
+ return -ENOENT;
+ }
+
+ /* Remove it from HW */
+ efx_tc_delete_rule(efx, rule);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
+ efx_tc_match_action_ht_params);
+ netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
+ kfree(rule);
+ return 0;
+}
+
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv)
+{
+ int rc;
+
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->tc->mutex);
+ switch (tc->command) {
+ case FLOW_CLS_REPLACE:
+ rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
+ break;
+ case FLOW_CLS_DESTROY:
+ rc = efx_tc_flower_destroy(efx, net_dev, tc);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&efx->tc->mutex);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+ u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+ struct efx_tc_action_set_list *acts = &rule->acts;
+ struct efx_tc_match *match = &rule->match;
+ struct efx_tc_action_set *act;
+ int rc;
+
+ match->value.ingress_port = ing_port;
+ match->mask.ingress_port = ~0;
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (!act)
+ return -ENOMEM;
+ act->deliver = 1;
+ act->dest_mport = eg_port;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc)
+ goto fail1;
+ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+ list_add_tail(&act->list, &acts->list);
+ rc = efx_mae_alloc_action_set_list(efx, acts);
+ if (rc)
+ goto fail2;
+ rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+ acts->fw_id, &rule->fw_id);
+ if (rc)
+ goto fail3;
+ return 0;
+fail3:
+ efx_mae_free_action_set_list(efx, acts);
+fail2:
+ list_del(&act->list);
+ efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+ kfree(act);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_uplink(efx, &ing_port);
+ efx_mae_mport_wire(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_wire(efx, &ing_port);
+ efx_mae_mport_uplink(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+ struct efx_tc_flow_rule *rule = &efv->dflt;
+ struct efx_nic *efx = efv->parent;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_mport(efx, efv->mport, &ing_port);
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule)
+{
+ if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+ efx_tc_delete_rule(efx, rule);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+ u32 rep_mport_label;
+ int rc;
+
+ rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+ efx->tc->reps_mport_id, rep_mport_label);
+ /* Use mport *selector* as vport ID */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &efx->tc->reps_mport_vport_id);
+ return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+ efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+ efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+ struct efx_filter_spec promisc, allmulti;
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+ if (!efx->tc)
+ return 0;
+ efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_uc_def(&promisc);
+ efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &promisc, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_uc = rc;
+ efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_mc_def(&allmulti);
+ efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &allmulti, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_mc = rc;
+ return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return;
+ if (!efx->tc)
+ return;
+ if (efx->tc->reps_filter_mc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+ efx->tc->reps_filter_mc = -1;
+ if (efx->tc->reps_filter_uc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+ efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mae_get_caps(efx, efx->tc->caps);
+ if (rc)
+ return rc;
+ if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
+ /* Firmware supports some match fields the driver doesn't know
+ * about. Not fatal, unless any of those fields are required
+ * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
+ */
+ netif_warn(efx, probe, efx->net_dev,
+ "FW reports additional match fields %u\n",
+ efx->tc->caps->match_field_count);
+ if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
+ netif_err(efx, probe, efx->net_dev,
+ "Too few action prios supported (have %u, need %u)\n",
+ efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
+ return -EIO;
+ }
+ rc = efx_tc_configure_default_rule_pf(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_default_rule_wire(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
+ efx->tc->up = true;
+ return 0;
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+ /* We can get called even if efx_init_struct_tc() failed */
+ if (!efx->tc)
+ return;
+ if (efx->tc->up)
+ flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+ efx->tc->up = false;
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+ if (!efx->tc)
+ return -ENOMEM;
+ efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
+ if (!efx->tc->caps) {
+ rc = -ENOMEM;
+ goto fail_alloc_caps;
+ }
+ INIT_LIST_HEAD(&efx->tc->block_list);
+
+ mutex_init(&efx->tc->mutex);
+ rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
+ if (rc < 0)
+ goto fail_match_action_ht;
+ efx->tc->reps_filter_uc = -1;
+ efx->tc->reps_filter_mc = -1;
+ INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ return 0;
+fail_match_action_ht:
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+fail_alloc_caps:
+ kfree(efx->tc);
+ efx->tc = NULL;
+ return rc;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+ if (!efx->tc)
+ return;
+
+ mutex_lock(&efx->tc->mutex);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
+ efx);
+ mutex_unlock(&efx->tc->mutex);
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+ kfree(efx->tc);
+ efx->tc = NULL;
+}