diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/prestera')
32 files changed, 15650 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig new file mode 100644 index 0000000000..f2f7663c3d --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell Prestera drivers configuration +# + +config PRESTERA + tristate "Marvell Prestera Switch ASICs support" + depends on NET_SWITCHDEV && VLAN_8021Q + depends on BRIDGE || BRIDGE=n + select NET_DEVLINK + select PHYLINK + help + This driver supports Marvell Prestera Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called prestera. + +config PRESTERA_PCI + tristate "PCI interface driver for Marvell Prestera Switch ASICs family" + depends on PCI && HAS_IOMEM && PRESTERA + default PRESTERA + help + This is implementation of PCI interface support for Marvell Prestera + Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called prestera_pci. diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile new file mode 100644 index 0000000000..df14cee801 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PRESTERA) += prestera.o +prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \ + prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \ + prestera_switchdev.o prestera_acl.o prestera_flow.o \ + prestera_flower.o prestera_span.o prestera_counter.o \ + prestera_router.o prestera_router_hw.o prestera_matchall.o + +obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h new file mode 100644 index 0000000000..35554ee805 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -0,0 +1,417 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_H_ +#define _PRESTERA_H_ + +#include <linux/notifier.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/phylink.h> +#include <net/devlink.h> +#include <uapi/linux/if_ether.h> + +#define PRESTERA_DRV_NAME "prestera" + +#define PRESTERA_DEFAULT_VID 1 + +struct prestera_fw_rev { + u16 maj; + u16 min; + u16 sub; +}; + +struct prestera_flood_domain { + struct prestera_switch *sw; + struct list_head flood_domain_port_list; + u32 idx; +}; + +struct prestera_mdb_entry { + struct prestera_switch *sw; + struct prestera_flood_domain *flood_domain; + unsigned char addr[ETH_ALEN]; + u16 vid; +}; + +struct prestera_flood_domain_port { + struct prestera_flood_domain *flood_domain; + struct net_device *dev; + struct list_head flood_domain_port_node; + u16 vid; +}; + +struct prestera_port_stats { + u64 good_octets_received; + u64 bad_octets_received; + u64 mac_trans_error; + u64 broadcast_frames_received; + u64 multicast_frames_received; + u64 frames_64_octets; + u64 frames_65_to_127_octets; + u64 frames_128_to_255_octets; + u64 frames_256_to_511_octets; + u64 frames_512_to_1023_octets; + u64 frames_1024_to_max_octets; + u64 excessive_collision; + u64 multicast_frames_sent; + u64 broadcast_frames_sent; + u64 fc_sent; + u64 fc_received; + u64 buffer_overrun; + u64 undersize; + u64 fragments; + u64 oversize; + u64 jabber; + u64 rx_error_frame_received; + u64 bad_crc; + u64 collisions; + u64 late_collision; + u64 unicast_frames_received; + u64 unicast_frames_sent; + u64 sent_multiple; + u64 sent_deferred; + u64 good_octets_sent; +}; + +#define PRESTERA_AP_PORT_MAX (10) + +struct prestera_port_caps { + u64 supp_link_modes; + u8 supp_fec; + u8 type; + u8 transceiver; +}; + +struct prestera_lag { + struct net_device *dev; + struct list_head members; + u16 member_count; + u16 lag_id; +}; + +struct prestera_flow_block; + +struct prestera_port_mac_state { + bool valid; + u32 mode; + u32 speed; + bool oper; + u8 duplex; + u8 fc; + u8 fec; +}; + +struct prestera_port_phy_state { + u64 lmode_bmap; + struct { + bool pause; + bool asym_pause; + } remote_fc; + u8 mdix; +}; + +struct prestera_port_mac_config { + u32 mode; + u32 speed; + bool admin; + u8 inband; + u8 duplex; + u8 fec; +}; + +struct prestera_port_phy_config { + u32 mode; + bool admin; + u8 mdix; +}; + +struct prestera_port { + struct net_device *dev; + struct prestera_switch *sw; + struct prestera_flow_block *ingress_flow_block; + struct prestera_flow_block *egress_flow_block; + struct devlink_port dl_port; + struct list_head lag_member; + struct prestera_lag *lag; + u32 id; + u32 hw_id; + u32 dev_id; + u16 fp_id; + u16 pvid; + bool autoneg; + u64 adver_link_modes; + u8 adver_fec; + struct prestera_port_caps caps; + struct list_head list; + struct list_head vlans_list; + struct { + struct prestera_port_stats stats; + struct delayed_work caching_dw; + } cached_hw_stats; + struct prestera_port_mac_config cfg_mac; + struct prestera_port_phy_config cfg_phy; + struct prestera_port_mac_state state_mac; + struct prestera_port_phy_state state_phy; + + struct phylink_config phy_config; + struct phylink *phy_link; + struct phylink_pcs phylink_pcs; + + /* protects state_mac */ + spinlock_t state_mac_lock; +}; + +struct prestera_device { + struct device *dev; + u8 __iomem *ctl_regs; + u8 __iomem *pp_regs; + struct prestera_fw_rev fw_rev; + void *priv; + + /* called by device driver to handle received packets */ + void (*recv_pkt)(struct prestera_device *dev); + + /* called by device driver to pass event up to the higher layer */ + int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size); + + /* called by higher layer to send request to the firmware */ + int (*send_req)(struct prestera_device *dev, int qid, void *in_msg, + size_t in_size, void *out_msg, size_t out_size, + unsigned int wait); +}; + +enum prestera_event_type { + PRESTERA_EVENT_TYPE_UNSPEC, + + PRESTERA_EVENT_TYPE_PORT, + PRESTERA_EVENT_TYPE_FDB, + PRESTERA_EVENT_TYPE_RXTX, + + PRESTERA_EVENT_TYPE_MAX +}; + +enum prestera_rxtx_event_id { + PRESTERA_RXTX_EVENT_UNSPEC, + PRESTERA_RXTX_EVENT_RCV_PKT, +}; + +enum prestera_port_event_id { + PRESTERA_PORT_EVENT_UNSPEC, + PRESTERA_PORT_EVENT_MAC_STATE_CHANGED, +}; + +struct prestera_port_event { + u32 port_id; + union { + struct { + u32 mode; + u32 speed; + u8 oper; + u8 duplex; + u8 fc; + u8 fec; + } mac; + struct { + u64 lmode_bmap; + struct { + bool pause; + bool asym_pause; + } remote_fc; + u8 mdix; + } phy; + } data; +}; + +enum prestera_fdb_entry_type { + PRESTERA_FDB_ENTRY_TYPE_REG_PORT, + PRESTERA_FDB_ENTRY_TYPE_LAG, + PRESTERA_FDB_ENTRY_TYPE_MAX +}; + +enum prestera_fdb_event_id { + PRESTERA_FDB_EVENT_UNSPEC, + PRESTERA_FDB_EVENT_LEARNED, + PRESTERA_FDB_EVENT_AGED, +}; + +struct prestera_fdb_event { + enum prestera_fdb_entry_type type; + union { + u32 port_id; + u16 lag_id; + } dest; + u32 vid; + union { + u8 mac[ETH_ALEN]; + } data; +}; + +struct prestera_event { + u16 id; + union { + struct prestera_port_event port_evt; + struct prestera_fdb_event fdb_evt; + }; +}; + +enum prestera_if_type { + /* the interface is of port type (dev,port) */ + PRESTERA_IF_PORT_E = 0, + + /* the interface is of lag type (lag-id) */ + PRESTERA_IF_LAG_E = 1, + + /* the interface is of Vid type (vlan-id) */ + PRESTERA_IF_VID_E = 3, +}; + +struct prestera_iface { + enum prestera_if_type type; + struct { + u32 hw_dev_num; + u32 port_num; + } dev_port; + u32 hw_dev_num; + u16 vr_id; + u16 lag_id; + u16 vlan_id; +}; + +struct prestera_switchdev; +struct prestera_span; +struct prestera_rxtx; +struct prestera_trap_data; +struct prestera_acl; + +struct prestera_switch { + struct prestera_device *dev; + struct prestera_switchdev *swdev; + struct prestera_rxtx *rxtx; + struct prestera_acl *acl; + struct prestera_span *span; + struct list_head event_handlers; + struct notifier_block netdev_nb; + struct prestera_trap_data *trap_data; + char base_mac[ETH_ALEN]; + struct list_head port_list; + rwlock_t port_list_lock; + u32 port_count; + u32 mtu_min; + u32 mtu_max; + u8 id; + struct device_node *np; + struct prestera_router *router; + struct prestera_lag *lags; + struct prestera_counter *counter; + u8 lag_member_max; + u8 lag_max; + u32 size_tbl_router_nexthop; +}; + +struct prestera_router { + struct prestera_switch *sw; + struct list_head vr_list; + struct list_head rif_entry_list; + struct rhashtable nh_neigh_ht; + struct rhashtable nexthop_group_ht; + struct rhashtable fib_ht; + struct rhashtable kern_neigh_cache_ht; + struct rhashtable kern_fib_cache_ht; + struct notifier_block inetaddr_nb; + struct notifier_block inetaddr_valid_nb; + struct notifier_block fib_nb; + struct notifier_block netevent_nb; + u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */ + unsigned long nhgrp_hw_cache_kick; /* jiffies */ + struct { + struct delayed_work dw; + } neighs_update; +}; + +struct prestera_rxtx_params { + bool use_sdma; + u32 map_addr; +}; + +#define prestera_dev(sw) ((sw)->dev->dev) + +static inline void prestera_write(const struct prestera_switch *sw, + unsigned int reg, u32 val) +{ + writel(val, sw->dev->pp_regs + reg); +} + +static inline u32 prestera_read(const struct prestera_switch *sw, + unsigned int reg) +{ + return readl(sw->dev->pp_regs + reg); +} + +int prestera_device_register(struct prestera_device *dev); +void prestera_device_unregister(struct prestera_device *dev); + +struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, + u32 dev_id, u32 hw_id); + +int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes); + +int prestera_router_init(struct prestera_switch *sw); +void prestera_router_fini(struct prestera_switch *sw); + +struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id); + +struct prestera_switch *prestera_switch_get(struct net_device *dev); + +int prestera_port_cfg_mac_read(struct prestera_port *port, + struct prestera_port_mac_config *cfg); + +int prestera_port_cfg_mac_write(struct prestera_port *port, + struct prestera_port_mac_config *cfg); + +struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev); + +void prestera_queue_work(struct work_struct *work); +void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay); +void prestera_queue_drain(void); + +int prestera_port_learning_set(struct prestera_port *port, bool learn_enable); +int prestera_port_uc_flood_set(struct prestera_port *port, bool flood); +int prestera_port_mc_flood_set(struct prestera_port *port, bool flood); + +int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked); + +int prestera_port_pvid_set(struct prestera_port *port, u16 vid); + +bool prestera_netdev_check(const struct net_device *dev); + +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr); + +bool prestera_port_is_lag_member(const struct prestera_port *port); +int prestera_lag_id(struct prestera_switch *sw, + struct net_device *lag_dev, u16 *lag_id); + +struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id); + +u16 prestera_port_lag_id(const struct prestera_port *port); + +struct prestera_mdb_entry * +prestera_mdb_entry_create(struct prestera_switch *sw, + const unsigned char *addr, u16 vid); +void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry); + +struct prestera_flood_domain * +prestera_flood_domain_create(struct prestera_switch *sw); +void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain); + +int +prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain, + struct net_device *dev, + u16 vid); +void +prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port); +struct prestera_flood_domain_port * +prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain, + struct net_device *dev, u16 vid); + +#endif /* _PRESTERA_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c new file mode 100644 index 0000000000..cba89fda50 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -0,0 +1,927 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/rhashtable.h> + +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_hw.h" +#include "prestera.h" + +#define ACL_KEYMASK_SIZE \ + (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) + +struct prestera_acl { + struct prestera_switch *sw; + struct list_head vtcam_list; + struct list_head rules; + struct rhashtable ruleset_ht; + struct rhashtable acl_rule_entry_ht; + struct idr uid; +}; + +struct prestera_acl_ruleset_ht_key { + struct prestera_flow_block *block; + u32 chain_index; +}; + +struct prestera_acl_rule_entry { + struct rhash_head ht_node; + struct prestera_acl_rule_entry_key key; + u32 hw_id; + u32 vtcam_id; + struct { + struct { + u8 valid:1; + } accept, drop, trap; + struct { + u8 valid:1; + struct prestera_acl_action_police i; + } police; + struct { + struct prestera_acl_action_jump i; + u8 valid:1; + } jump; + struct { + u32 id; + struct prestera_counter_block *block; + } counter; + }; +}; + +struct prestera_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct prestera_acl_ruleset_ht_key ht_key; + struct rhashtable rule_ht; + struct prestera_acl *acl; + struct { + u32 min; + u32 max; + } prio; + unsigned long rule_count; + refcount_t refcount; + void *keymask; + u32 vtcam_id; + u32 index; + u16 pcl_id; + bool offload; + bool ingress; +}; + +struct prestera_acl_vtcam { + struct list_head list; + __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + refcount_t refcount; + u32 id; + bool is_keymask_set; + u8 lookup; + u8 direction; +}; + +static const struct rhashtable_params prestera_acl_ruleset_ht_params = { + .key_len = sizeof(struct prestera_acl_ruleset_ht_key), + .key_offset = offsetof(struct prestera_acl_ruleset, ht_key), + .head_offset = offsetof(struct prestera_acl_ruleset, ht_node), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params prestera_acl_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct prestera_acl_rule, cookie), + .head_offset = offsetof(struct prestera_acl_rule, ht_node), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = { + .key_offset = offsetof(struct prestera_acl_rule_entry, key), + .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node), + .key_len = sizeof(struct prestera_acl_rule_entry_key), + .automatic_shrinking = true, +}; + +int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client) +{ + static const u32 ingress_client_map[] = { + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0, + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1, + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 + }; + + if (!ingress) { + /* prestera supports only one chain on egress */ + if (chain_index > 0) + return -EINVAL; + + *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP; + return 0; + } + + if (chain_index >= ARRAY_SIZE(ingress_client_map)) + return -EINVAL; + + *client = ingress_client_map[chain_index]; + return 0; +} + +static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress) +{ + if (!ingress) + /* prestera supports only one chain on egress */ + return chain_index == 0; + + return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0; +} + +static struct prestera_acl_ruleset * +prestera_acl_ruleset_create(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index) +{ + struct prestera_acl_ruleset *ruleset; + u32 uid = 0; + int err; + + if (!prestera_acl_chain_is_supported(chain_index, block->ingress)) + return ERR_PTR(-EINVAL); + + ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); + if (!ruleset) + return ERR_PTR(-ENOMEM); + + ruleset->acl = acl; + ruleset->ingress = block->ingress; + ruleset->ht_key.block = block; + ruleset->ht_key.chain_index = chain_index; + refcount_set(&ruleset->refcount, 1); + + err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); + if (err) + goto err_rhashtable_init; + + err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL); + if (err) + goto err_ruleset_create; + + /* make pcl-id based on uid */ + ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index); + ruleset->index = uid; + + ruleset->prio.min = UINT_MAX; + ruleset->prio.max = 0; + + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_insert; + + return ruleset; + +err_ruleset_ht_insert: + idr_remove(&acl->uid, uid); +err_ruleset_create: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); +} + +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask) +{ + ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); + if (!ruleset->keymask) + return -ENOMEM; + + return 0; +} + +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) +{ + struct prestera_acl_iface iface; + u32 vtcam_id; + int dir; + int err; + + dir = ruleset->ingress ? + PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS; + + if (ruleset->offload) + return -EEXIST; + + err = prestera_acl_vtcam_id_get(ruleset->acl, + ruleset->ht_key.chain_index, + dir, + ruleset->keymask, &vtcam_id); + if (err) + goto err_vtcam_create; + + if (ruleset->ht_key.chain_index) { + /* for chain > 0, bind iface index to pcl-id to be able + * to jump from any other ruleset to this one using the index. + */ + iface.index = ruleset->index; + iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX; + err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface, + vtcam_id, ruleset->pcl_id); + if (err) + goto err_ruleset_bind; + } + + ruleset->vtcam_id = vtcam_id; + ruleset->offload = true; + return 0; + +err_ruleset_bind: + prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id); +err_vtcam_create: + return err; +} + +static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) +{ + struct prestera_acl *acl = ruleset->acl; + u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER; + int err; + + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + + if (ruleset->offload) { + if (ruleset->ht_key.chain_index) { + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_INDEX, + .index = ruleset->index + }; + err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface, + ruleset->vtcam_id); + WARN_ON(err); + } + WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id)); + } + + idr_remove(&acl->uid, uid); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset->keymask); + kfree(ruleset); +} + +static struct prestera_acl_ruleset * +__prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index) +{ + struct prestera_acl_ruleset_ht_key ht_key; + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.block = block; + ht_key.chain_index = chain_index; + return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + prestera_acl_ruleset_ht_params); +} + +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index) +{ + struct prestera_acl_ruleset *ruleset; + + ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); + if (!ruleset) + return ERR_PTR(-ENOENT); + + refcount_inc(&ruleset->refcount); + return ruleset; +} + +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index) +{ + struct prestera_acl_ruleset *ruleset; + + ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); + if (ruleset) { + refcount_inc(&ruleset->refcount); + return ruleset; + } + + return prestera_acl_ruleset_create(acl, block, chain_index); +} + +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset) +{ + if (!refcount_dec_and_test(&ruleset->refcount)) + return; + + prestera_acl_ruleset_destroy(ruleset); +} + +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) +{ + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; + + return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id, + ruleset->pcl_id); +} + +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) +{ + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; + + return prestera_hw_vtcam_iface_unbind(port->sw, &iface, + ruleset->vtcam_id); +} + +static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) +{ + struct prestera_flow_block_binding *binding; + int err; + + block->ruleset_zero = ruleset; + list_for_each_entry(binding, &block->binding_list, list) { + err = prestera_acl_ruleset_bind(ruleset, binding->port); + if (err) + goto rollback; + } + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, &block->binding_list, + list) + err = prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; + + return err; +} + +static void +prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; +} + +static void +prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl, + struct prestera_acl_ruleset *ruleset) +{ + struct prestera_acl_rule *rule; + + ruleset->prio.min = UINT_MAX; + ruleset->prio.max = 0; + + list_for_each_entry(rule, &acl->rules, list) { + if (ruleset->ingress != rule->ruleset->ingress) + continue; + if (ruleset->ht_key.chain_index != rule->chain_index) + continue; + + ruleset->prio.min = min(ruleset->prio.min, rule->priority); + ruleset->prio.max = max(ruleset->prio.max, rule->priority); + } +} + +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id) +{ + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID); + __be16 pcl_id_key = htons(pcl_id); + + rule_match_set(r_match->key, PCL_ID, pcl_id_key); + rule_match_set(r_match->mask, PCL_ID, pcl_id_mask); +} + +struct prestera_acl_rule * +prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, + unsigned long cookie) +{ + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, + prestera_acl_rule_ht_params); +} + +u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset) +{ + return ruleset->index; +} + +void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset, + u32 *prio_min, u32 *prio_max) +{ + *prio_min = ruleset->prio.min; + *prio_max = ruleset->prio.max; +} + +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) +{ + return ruleset->offload; +} + +struct prestera_acl_rule * +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, + unsigned long cookie, u32 chain_index) +{ + struct prestera_acl_rule *rule; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return ERR_PTR(-ENOMEM); + + rule->ruleset = ruleset; + rule->cookie = cookie; + rule->chain_index = chain_index; + + refcount_inc(&ruleset->refcount); + + return rule; +} + +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority) +{ + rule->priority = priority; +} + +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) +{ + if (rule->jump_ruleset) + /* release ruleset kept by jump action */ + prestera_acl_ruleset_put(rule->jump_ruleset); + + prestera_acl_ruleset_put(rule->ruleset); + kfree(rule); +} + +static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset, + u32 prio) +{ + ruleset->prio.min = min(ruleset->prio.min, prio); + ruleset->prio.max = max(ruleset->prio.max, prio); +} + +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule) +{ + int err; + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; + + /* try to add rule to hash table first */ + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + if (err) + goto err_ht_insert; + + prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id); + rule->re_arg.vtcam_id = ruleset->vtcam_id; + rule->re_key.prio = rule->priority; + + rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); + err = WARN_ON(rule->re) ? -EEXIST : 0; + if (err) + goto err_rule_add; + + rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key, + &rule->re_arg); + err = !rule->re ? -EINVAL : 0; + if (err) + goto err_rule_add; + + /* bind the block (all ports) to chain index 0, rest of + * the chains are bound to goto action + */ + if (!ruleset->ht_key.chain_index && !ruleset->rule_count) { + err = prestera_acl_ruleset_block_bind(ruleset, block); + if (err) + goto err_acl_block_bind; + } + + list_add_tail(&rule->list, &sw->acl->rules); + ruleset->rule_count++; + prestera_acl_ruleset_prio_update(ruleset, rule->priority); + return 0; + +err_acl_block_bind: + prestera_acl_rule_entry_destroy(sw->acl, rule->re); +err_rule_add: + rule->re = NULL; + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); +err_ht_insert: + return err; +} + +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule) +{ + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + ruleset->rule_count--; + list_del(&rule->list); + + prestera_acl_rule_entry_destroy(sw->acl, rule->re); + prestera_acl_ruleset_prio_refresh(sw->acl, ruleset); + + /* unbind block (all ports) */ + if (!ruleset->ht_key.chain_index && !ruleset->rule_count) + prestera_acl_ruleset_block_unbind(ruleset, block); +} + +int prestera_acl_rule_get_stats(struct prestera_acl *acl, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) +{ + u64 current_packets; + u64 current_bytes; + int err; + + err = prestera_counter_stats_get(acl->sw->counter, + rule->re->counter.block, + rule->re->counter.id, + ¤t_packets, ¤t_bytes); + if (err) + return err; + + *packets = current_packets; + *bytes = current_bytes; + *last_use = jiffies; + + return 0; +} + +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key) +{ + return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, + __prestera_acl_rule_entry_ht_params); +} + +static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) +{ + return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id); +} + +static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) +{ + struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX]; + int act_num; + + memset(&act_hw, 0, sizeof(act_hw)); + act_num = 0; + + /* accept */ + if (e->accept.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + act_num++; + } + /* drop */ + if (e->drop.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP; + act_num++; + } + /* trap */ + if (e->trap.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; + act_num++; + } + /* police */ + if (e->police.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE; + act_hw[act_num].police = e->police.i; + act_num++; + } + /* jump */ + if (e->jump.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP; + act_hw[act_num].jump = e->jump.i; + act_num++; + } + /* counter */ + if (e->counter.block) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT; + act_hw[act_num].count.id = e->counter.id; + act_num++; + } + + return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio, + e->key.match.key, e->key.match.mask, + act_hw, act_num, &e->hw_id); +} + +static void +__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) +{ + /* counter */ + prestera_counter_put(sw->counter, e->counter.block, e->counter.id); + /* police */ + if (e->police.valid) + prestera_hw_policer_release(sw, e->police.i.id); +} + +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e) +{ + int ret; + + rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + + ret = __prestera_acl_rule_entry2hw_del(acl->sw, e); + WARN_ON(ret && ret != -ENODEV); + + __prestera_acl_rule_entry_act_destruct(acl->sw, e); + kfree(e); +} + +static int +__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e, + struct prestera_acl_rule_entry_arg *arg) +{ + int err; + + /* accept */ + e->accept.valid = arg->accept.valid; + /* drop */ + e->drop.valid = arg->drop.valid; + /* trap */ + e->trap.valid = arg->trap.valid; + /* jump */ + e->jump.valid = arg->jump.valid; + e->jump.i = arg->jump.i; + /* police */ + if (arg->police.valid) { + u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS : + PRESTERA_POLICER_TYPE_EGRESS; + + err = prestera_hw_policer_create(sw, type, &e->police.i.id); + if (err) + goto err_out; + + err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id, + arg->police.rate, + arg->police.burst); + if (err) { + prestera_hw_policer_release(sw, e->police.i.id); + goto err_out; + } + e->police.valid = arg->police.valid; + } + /* counter */ + if (arg->count.valid) { + err = prestera_counter_get(sw->counter, arg->count.client, + &e->counter.block, + &e->counter.id); + if (err) + goto err_out; + } + + return 0; + +err_out: + __prestera_acl_rule_entry_act_destruct(sw, e); + return -EINVAL; +} + +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg) +{ + struct prestera_acl_rule_entry *e; + int err; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; + + memcpy(&e->key, key, sizeof(*key)); + e->vtcam_id = arg->vtcam_id; + err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg); + if (err) + goto err_act_construct; + + err = __prestera_acl_rule_entry2hw_add(acl->sw, e); + if (err) + goto err_hw_add; + + err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + if (err) + goto err_ht_insert; + + return e; + +err_ht_insert: + WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e)); +err_hw_add: + __prestera_acl_rule_entry_act_destruct(acl->sw, e); +err_act_construct: + kfree(e); +err_kzalloc: + return NULL; +} + +static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id) +{ + struct prestera_acl_vtcam *vtcam; + int i; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup) + continue; + + if (!keymask && !vtcam->is_keymask_set) + goto vtcam_found; + + if (!(keymask && vtcam->is_keymask_set)) + continue; + + /* try to fit with vtcam keymask */ + for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) { + __be32 __keymask = ((__be32 *)keymask)[i]; + + if (!__keymask) + /* vtcam keymask in not interested */ + continue; + + if (__keymask & ~vtcam->keymask[i]) + /* keymask does not fit the vtcam keymask */ + break; + } + + if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) + /* keymask fits vtcam keymask, return it */ + goto vtcam_found; + } + + /* nothing is found */ + return -ENOENT; + +vtcam_found: + refcount_inc(&vtcam->refcount); + *vtcam_id = vtcam->id; + return 0; +} + +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir, + void *keymask, u32 *vtcam_id) +{ + struct prestera_acl_vtcam *vtcam; + u32 new_vtcam_id; + int err; + + /* find the vtcam that suits keymask. We do not expect to have + * a big number of vtcams, so, the list type for vtcam list is + * fine for now + */ + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup || + dir != vtcam->direction) + continue; + + if (!keymask && !vtcam->is_keymask_set) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + + if (keymask && vtcam->is_keymask_set && + !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + } + + /* vtcam not found, try to create new one */ + vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL); + if (!vtcam) + return -ENOMEM; + + err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id, + dir); + if (err) { + kfree(vtcam); + + /* cannot create new, try to fit into existing vtcam */ + if (__prestera_acl_vtcam_id_try_fit(acl, lookup, + keymask, &new_vtcam_id)) + return err; + + *vtcam_id = new_vtcam_id; + return 0; + } + + vtcam->direction = dir; + vtcam->id = new_vtcam_id; + vtcam->lookup = lookup; + if (keymask) { + memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask)); + vtcam->is_keymask_set = true; + } + refcount_set(&vtcam->refcount, 1); + list_add_rcu(&vtcam->list, &acl->vtcam_list); + +vtcam_found: + *vtcam_id = vtcam->id; + return 0; +} + +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id) +{ + struct prestera_acl_vtcam *vtcam; + int err; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (vtcam_id != vtcam->id) + continue; + + if (!refcount_dec_and_test(&vtcam->refcount)) + return 0; + + err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id); + if (err && err != -ENODEV) { + refcount_set(&vtcam->refcount, 1); + return err; + } + + list_del(&vtcam->list); + kfree(vtcam); + return 0; + } + + return -ENOENT; +} + +int prestera_acl_init(struct prestera_switch *sw) +{ + struct prestera_acl *acl; + int err; + + acl = kzalloc(sizeof(*acl), GFP_KERNEL); + if (!acl) + return -ENOMEM; + + acl->sw = sw; + INIT_LIST_HEAD(&acl->rules); + INIT_LIST_HEAD(&acl->vtcam_list); + idr_init(&acl->uid); + + err = rhashtable_init(&acl->acl_rule_entry_ht, + &__prestera_acl_rule_entry_ht_params); + if (err) + goto err_acl_rule_entry_ht_init; + + err = rhashtable_init(&acl->ruleset_ht, + &prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_init; + + sw->acl = acl; + + return 0; + +err_ruleset_ht_init: + rhashtable_destroy(&acl->acl_rule_entry_ht); +err_acl_rule_entry_ht_init: + kfree(acl); + return err; +} + +void prestera_acl_fini(struct prestera_switch *sw) +{ + struct prestera_acl *acl = sw->acl; + + WARN_ON(!idr_is_empty(&acl->uid)); + idr_destroy(&acl->uid); + + WARN_ON(!list_empty(&acl->vtcam_list)); + WARN_ON(!list_empty(&acl->rules)); + + rhashtable_destroy(&acl->ruleset_ht); + rhashtable_destroy(&acl->acl_rule_entry_ht); + + kfree(acl); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h new file mode 100644 index 0000000000..a35cc0609a --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_ACL_H_ +#define _PRESTERA_ACL_H_ + +#include <linux/types.h> +#include "prestera_counter.h" + +#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF +#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \ + (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF) +#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \ + (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00) +#define PRESTERA_ACL_CHAIN_MASK \ + (PRESTERA_ACL_KEYMASK_PCL_ID >> 8) + +#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \ + (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \ + (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN)) + +#define rule_match_set_n(match_p, type, val_p, size) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + val_p, size) +#define rule_match_set(match_p, type, val) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + &(val), sizeof(val)) + +enum prestera_acl_match_type { + PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE, + + __PRESTERA_ACL_RULE_MATCH_TYPE_MAX +}; + +enum prestera_acl_rule_action { + PRESTERA_ACL_RULE_ACTION_ACCEPT = 0, + PRESTERA_ACL_RULE_ACTION_DROP = 1, + PRESTERA_ACL_RULE_ACTION_TRAP = 2, + PRESTERA_ACL_RULE_ACTION_JUMP = 5, + PRESTERA_ACL_RULE_ACTION_COUNT = 7, + PRESTERA_ACL_RULE_ACTION_POLICE = 8, + + PRESTERA_ACL_RULE_ACTION_MAX +}; + +enum { + PRESTERA_ACL_IFACE_TYPE_PORT, + PRESTERA_ACL_IFACE_TYPE_INDEX +}; + +struct prestera_acl_match { + __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; +}; + +struct prestera_acl_action_jump { + u32 index; +}; + +struct prestera_acl_action_police { + u32 id; +}; + +struct prestera_acl_action_count { + u32 id; +}; + +struct prestera_acl_rule_entry_key { + u32 prio; + struct prestera_acl_match match; +}; + +struct prestera_acl_hw_action_info { + enum prestera_acl_rule_action id; + union { + struct prestera_acl_action_police police; + struct prestera_acl_action_count count; + struct prestera_acl_action_jump jump; + }; +}; + +/* This struct (arg) used only to be passed as parameter for + * acl_rule_entry_create. Must be flat. Can contain object keys, which will be + * resolved to object links, before saving to acl_rule_entry struct + */ +struct prestera_acl_rule_entry_arg { + u32 vtcam_id; + struct { + struct { + u8 valid:1; + } accept, drop, trap; + struct { + struct prestera_acl_action_jump i; + u8 valid:1; + } jump; + struct { + u8 valid:1; + u64 rate; + u64 burst; + bool ingress; + } police; + struct { + u8 valid:1; + u32 client; + } count; + }; +}; + +struct prestera_acl_rule { + struct rhash_head ht_node; /* Member of acl HT */ + struct list_head list; + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_ruleset *jump_ruleset; + unsigned long cookie; + u32 chain_index; + u32 priority; + struct prestera_acl_rule_entry_key re_key; + struct prestera_acl_rule_entry_arg re_arg; + struct prestera_acl_rule_entry *re; +}; + +struct prestera_acl_iface { + union { + struct prestera_port *port; + u32 index; + }; + u8 type; +}; + +struct prestera_acl; +struct prestera_switch; +struct prestera_flow_block; + +int prestera_acl_init(struct prestera_switch *sw); +void prestera_acl_fini(struct prestera_switch *sw); + +struct prestera_acl_rule * +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, + unsigned long cookie, u32 chain_index); +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority); +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); +struct prestera_acl_rule * +prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, + unsigned long cookie); +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule); +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule); +int prestera_acl_rule_get_stats(struct prestera_acl *acl, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key); +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg); +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index); +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block, + u32 chain_index); +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask); +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset); +void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset, + u32 *prio_min, u32 *prio_max); +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, + u16 pcl_id); + +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir, + void *keymask, u32 *vtcam_id); +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id); +int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client); + +#endif /* _PRESTERA_ACL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c new file mode 100644 index 0000000000..4cd53a2dae --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_counter.h" + +#define COUNTER_POLL_TIME (msecs_to_jiffies(1000)) +#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50)) +#define COUNTER_BULK_SIZE (256) + +struct prestera_counter { + struct prestera_switch *sw; + struct delayed_work stats_dw; + struct mutex mtx; /* protect block_list */ + struct prestera_counter_block **block_list; + u32 total_read; + u32 block_list_len; + u32 curr_idx; + bool is_fetching; +}; + +struct prestera_counter_block { + struct list_head list; + u32 id; + u32 offset; + u32 num_counters; + u32 client; + struct idr counter_idr; + refcount_t refcnt; + struct mutex mtx; /* protect stats and counter_idr */ + struct prestera_counter_stats *stats; + u8 *counter_flag; + bool is_updating; + bool full; +}; + +enum { + COUNTER_FLAG_READY = 0, + COUNTER_FLAG_INVALID = 1 +}; + +static bool +prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) +{ + return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; +} + +static void prestera_counter_lock(struct prestera_counter *counter) +{ + mutex_lock(&counter->mtx); +} + +static void prestera_counter_unlock(struct prestera_counter *counter) +{ + mutex_unlock(&counter->mtx); +} + +static void prestera_counter_block_lock(struct prestera_counter_block *block) +{ + mutex_lock(&block->mtx); +} + +static void prestera_counter_block_unlock(struct prestera_counter_block *block) +{ + mutex_unlock(&block->mtx); +} + +static bool prestera_counter_block_incref(struct prestera_counter_block *block) +{ + return refcount_inc_not_zero(&block->refcnt); +} + +static bool prestera_counter_block_decref(struct prestera_counter_block *block) +{ + return refcount_dec_and_test(&block->refcnt); +} + +/* must be called with prestera_counter_block_lock() */ +static void prestera_counter_stats_clear(struct prestera_counter_block *block, + u32 counter_id) +{ + memset(&block->stats[counter_id - block->offset], 0, + sizeof(*block->stats)); +} + +static struct prestera_counter_block * +prestera_counter_block_lookup_not_full(struct prestera_counter *counter, + u32 client) +{ + u32 i; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->client == client && + !counter->block_list[i]->full && + prestera_counter_block_incref(counter->block_list[i])) { + prestera_counter_unlock(counter); + return counter->block_list[i]; + } + } + prestera_counter_unlock(counter); + + return NULL; +} + +static int prestera_counter_block_list_add(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + struct prestera_counter_block **arr; + u32 i; + + prestera_counter_lock(counter); + + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i]) + continue; + + counter->block_list[i] = block; + prestera_counter_unlock(counter); + return 0; + } + + arr = krealloc(counter->block_list, (counter->block_list_len + 1) * + sizeof(*counter->block_list), GFP_KERNEL); + if (!arr) { + prestera_counter_unlock(counter); + return -ENOMEM; + } + + counter->block_list = arr; + counter->block_list[counter->block_list_len] = block; + counter->block_list_len++; + prestera_counter_unlock(counter); + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get(struct prestera_counter *counter, u32 client) +{ + struct prestera_counter_block *block; + int err; + + block = prestera_counter_block_lookup_not_full(counter, client); + if (block) + return block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + + err = prestera_hw_counter_block_get(counter->sw, client, + &block->id, &block->offset, + &block->num_counters); + if (err) + goto err_block; + + block->stats = kcalloc(block->num_counters, + sizeof(*block->stats), GFP_KERNEL); + if (!block->stats) { + err = -ENOMEM; + goto err_stats; + } + + block->counter_flag = kcalloc(block->num_counters, + sizeof(*block->counter_flag), + GFP_KERNEL); + if (!block->counter_flag) { + err = -ENOMEM; + goto err_flag; + } + + block->client = client; + mutex_init(&block->mtx); + refcount_set(&block->refcnt, 1); + idr_init_base(&block->counter_idr, block->offset); + + err = prestera_counter_block_list_add(counter, block); + if (err) + goto err_list_add; + + return block; + +err_list_add: + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->counter_flag); +err_flag: + kfree(block->stats); +err_stats: + prestera_hw_counter_block_release(counter->sw, block->id); +err_block: + kfree(block); + return ERR_PTR(err); +} + +static void prestera_counter_block_put(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + u32 i; + + if (!prestera_counter_block_decref(block)) + return; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->id == block->id) { + counter->block_list[i] = NULL; + break; + } + } + prestera_counter_unlock(counter); + + WARN_ON(!idr_is_empty(&block->counter_idr)); + + prestera_hw_counter_block_release(counter->sw, block->id); + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->stats); + kfree(block); +} + +static int prestera_counter_get_vacant(struct prestera_counter_block *block, + u32 *id) +{ + int free_id; + + if (block->full) + return -ENOSPC; + + prestera_counter_block_lock(block); + free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset, + block->offset + block->num_counters, + GFP_KERNEL); + if (free_id < 0) { + if (free_id == -ENOSPC) + block->full = true; + + prestera_counter_block_unlock(block); + return free_id; + } + *id = free_id; + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **bl, u32 *counter_id) +{ + struct prestera_counter_block *block; + int err; + u32 id; + +get_next_block: + block = prestera_counter_block_get(counter, client); + if (IS_ERR(block)) + return PTR_ERR(block); + + err = prestera_counter_get_vacant(block, &id); + if (err) { + prestera_counter_block_put(counter, block); + + if (err == -ENOSPC) + goto get_next_block; + + return err; + } + + prestera_counter_block_lock(block); + if (block->is_updating) + block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID; + prestera_counter_block_unlock(block); + + *counter_id = id; + *bl = block; + + return 0; +} + +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id) +{ + if (!block) + return; + + prestera_counter_block_lock(block); + idr_remove(&block->counter_idr, counter_id); + block->full = false; + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + prestera_hw_counter_clear(counter->sw, block->id, counter_id); + prestera_counter_block_put(counter, block); +} + +static u32 prestera_counter_block_idx_next(struct prestera_counter *counter, + u32 curr_idx) +{ + u32 idx, i, start = curr_idx + 1; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + idx = (start + i) % counter->block_list_len; + if (!counter->block_list[idx]) + continue; + + prestera_counter_unlock(counter); + return idx; + } + prestera_counter_unlock(counter); + + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) +{ + if (idx >= counter->block_list_len) + return NULL; + + prestera_counter_lock(counter); + + if (!counter->block_list[idx] || + !prestera_counter_block_incref(counter->block_list[idx])) { + prestera_counter_unlock(counter); + return NULL; + } + + prestera_counter_unlock(counter); + return counter->block_list[idx]; +} + +static void prestera_counter_stats_work(struct work_struct *work) +{ + struct delayed_work *dl_work = + container_of(work, struct delayed_work, work); + struct prestera_counter *counter = + container_of(dl_work, struct prestera_counter, stats_dw); + struct prestera_counter_block *block; + u32 resched_time = COUNTER_POLL_TIME; + u32 count = COUNTER_BULK_SIZE; + bool done = false; + int err; + u32 i; + + block = prestera_counter_block_get_by_idx(counter, counter->curr_idx); + if (!block) { + if (counter->is_fetching) + goto abort; + + goto next; + } + + if (!counter->is_fetching) { + err = prestera_hw_counter_trigger(counter->sw, block->id); + if (err) + goto abort; + + prestera_counter_block_lock(block); + block->is_updating = true; + prestera_counter_block_unlock(block); + + counter->is_fetching = true; + counter->total_read = 0; + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + prestera_counter_block_lock(block); + err = prestera_hw_counters_get(counter->sw, counter->total_read, + &count, &done, + &block->stats[counter->total_read]); + prestera_counter_block_unlock(block); + if (err) + goto abort; + + counter->total_read += count; + if (!done || counter->total_read < block->num_counters) { + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + for (i = 0; i < block->num_counters; i++) { + if (block->counter_flag[i] == COUNTER_FLAG_INVALID) { + prestera_counter_block_lock(block); + block->counter_flag[i] = COUNTER_FLAG_READY; + memset(&block->stats[i], 0, sizeof(*block->stats)); + prestera_counter_block_unlock(block); + } + } + + prestera_counter_block_lock(block); + block->is_updating = false; + prestera_counter_block_unlock(block); + + goto next; +abort: + prestera_hw_counter_abort(counter->sw); +next: + counter->is_fetching = false; + counter->curr_idx = + prestera_counter_block_idx_next(counter, counter->curr_idx); +resched: + if (block) + prestera_counter_block_put(counter, block); + + schedule_delayed_work(&counter->stats_dw, resched_time); +} + +/* Can be executed without rtnl_lock(). + * So pay attention when something changing. + */ +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes) +{ + if (!block || !prestera_counter_is_ready(block, counter_id)) { + *packets = 0; + *bytes = 0; + return 0; + } + + prestera_counter_block_lock(block); + *packets = block->stats[counter_id - block->offset].packets; + *bytes = block->stats[counter_id - block->offset].bytes; + + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_init(struct prestera_switch *sw) +{ + struct prestera_counter *counter; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return -ENOMEM; + + counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL); + if (!counter->block_list) { + kfree(counter); + return -ENOMEM; + } + + mutex_init(&counter->mtx); + counter->block_list_len = 1; + counter->sw = sw; + sw->counter = counter; + + INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work); + schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME); + + return 0; +} + +void prestera_counter_fini(struct prestera_switch *sw) +{ + struct prestera_counter *counter = sw->counter; + u32 i; + + cancel_delayed_work_sync(&counter->stats_dw); + + for (i = 0; i < counter->block_list_len; i++) + WARN_ON(counter->block_list[i]); + + mutex_destroy(&counter->mtx); + kfree(counter->block_list); + kfree(counter); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h new file mode 100644 index 0000000000..ad6b739077 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_COUNTER_H_ +#define _PRESTERA_COUNTER_H_ + +#include <linux/types.h> + +struct prestera_counter_stats { + u64 packets; + u64 bytes; +}; + +struct prestera_switch; +struct prestera_counter; +struct prestera_counter_block; + +int prestera_counter_init(struct prestera_switch *sw); +void prestera_counter_fini(struct prestera_switch *sw); + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **block, + u32 *counter_id); +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id); +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes); + +#endif /* _PRESTERA_COUNTER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c new file mode 100644 index 0000000000..2a4c9df4eb --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <net/devlink.h> + +#include "prestera_devlink.h" +#include "prestera_hw.h" + +/* All driver-specific traps must be documented in + * Documentation/networking/devlink/prestera.rst + */ +enum { + DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX, + DEVLINK_PRESTERA_TRAP_ID_ARP_BC, + DEVLINK_PRESTERA_TRAP_ID_IS_IS, + DEVLINK_PRESTERA_TRAP_ID_OSPF, + DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC, + DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC, + DEVLINK_PRESTERA_TRAP_ID_VRRP, + DEVLINK_PRESTERA_TRAP_ID_DHCP, + DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME, + DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS, + DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE, + DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME, + DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7, + DEVLINK_PRESTERA_TRAP_ID_BGP, + DEVLINK_PRESTERA_TRAP_ID_SSH, + DEVLINK_PRESTERA_TRAP_ID_TELNET, + DEVLINK_PRESTERA_TRAP_ID_ICMP, + DEVLINK_PRESTERA_TRAP_ID_MET_RED, + DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO, + DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH, + DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR, + DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR, + DEVLINK_PRESTERA_TRAP_ID_INVALID_SA, + DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT, + DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN, + DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP, +}; + +#define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \ + "arp_bc" +#define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \ + "is_is" +#define DEVLINK_PRESTERA_TRAP_NAME_OSPF \ + "ospf" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \ + "ip_bc_mac" +#define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \ + "router_mc" +#define DEVLINK_PRESTERA_TRAP_NAME_VRRP \ + "vrrp" +#define DEVLINK_PRESTERA_TRAP_NAME_DHCP \ + "dhcp" +#define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \ + "mac_to_me" +#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \ + "ipv4_options" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \ + "ip_default_route" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \ + "ip_to_me" +#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \ + "ipv4_icmp_redirect" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \ + "acl_code_0" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \ + "acl_code_1" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \ + "acl_code_2" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \ + "acl_code_3" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \ + "acl_code_4" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \ + "acl_code_5" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \ + "acl_code_6" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \ + "acl_code_7" +#define DEVLINK_PRESTERA_TRAP_NAME_BGP \ + "bgp" +#define DEVLINK_PRESTERA_TRAP_NAME_SSH \ + "ssh" +#define DEVLINK_PRESTERA_TRAP_NAME_TELNET \ + "telnet" +#define DEVLINK_PRESTERA_TRAP_NAME_ICMP \ + "icmp" +#define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \ + "rxdma_drop" +#define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \ + "port_no_vlan" +#define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \ + "local_port" +#define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \ + "invalid_sa" +#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \ + "illegal_ip_addr" +#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \ + "illegal_ipv4_hdr" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \ + "ip_uc_dip_da_mismatch" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \ + "ip_sip_is_zero" +#define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \ + "met_red" + +struct prestera_trap { + struct devlink_trap trap; + u8 cpu_code; +}; + +struct prestera_trap_item { + enum devlink_trap_action action; + void *trap_ctx; +}; + +struct prestera_trap_data { + struct prestera_switch *sw; + struct prestera_trap_item *trap_items_arr; + u32 traps_count; +}; + +#define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT + +#define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \ + DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +static const struct devlink_trap_group prestera_trap_groups_arr[] = { + /* No policer is associated with following groups (policerid == 0)*/ + DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0), + DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0), + DEVLINK_TRAP_GROUP_GENERIC(STP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LACP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0), + DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0), + DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0), + DEVLINK_TRAP_GROUP_GENERIC(BGP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0), + DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0), +}; + +/* Initialize trap list, as well as associate CPU code with them. */ +static struct prestera_trap prestera_trap_items_arr[] = { + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY), + .cpu_code = 5, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY), + .cpu_code = 13, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF), + .cpu_code = 16, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY), + .cpu_code = 19, + }, + { + .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP), + .cpu_code = 26, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP), + .cpu_code = 27, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP), + .cpu_code = 28, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY), + .cpu_code = 29, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP), + .cpu_code = 30, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP), + .cpu_code = 33, + }, + { + .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS), + .cpu_code = 63, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY), + .cpu_code = 65, + }, + { + .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS), + .cpu_code = 133, + }, + { + .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS, + L3_EXCEPTIONS), + .cpu_code = 141, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE, + LOCAL_DELIVERY), + .cpu_code = 160, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY, + TRAP), + .cpu_code = 161, + }, + { + .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT, + L3_EXCEPTIONS), + .cpu_code = 180, + }, + { + .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY, + TRAP), + .cpu_code = 188, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP), + .cpu_code = 192, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP), + .cpu_code = 193, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP), + .cpu_code = 194, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP), + .cpu_code = 195, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP), + .cpu_code = 196, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP), + .cpu_code = 197, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP), + .cpu_code = 198, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP), + .cpu_code = 199, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP), + .cpu_code = 206, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY), + .cpu_code = 207, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY), + .cpu_code = 208, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY), + .cpu_code = 209, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS), + .cpu_code = 37, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS), + .cpu_code = 39, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS), + .cpu_code = 56, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS), + .cpu_code = 60, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS), + .cpu_code = 136, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS), + .cpu_code = 137, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH, + L3_DROPS), + .cpu_code = 138, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS), + .cpu_code = 145, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS), + .cpu_code = 185, + }, +}; + +static int prestera_drop_counter_get(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops); + +static int prestera_dl_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct prestera_switch *sw = devlink_priv(dl); + char buf[16]; + + snprintf(buf, sizeof(buf), "%d.%d.%d", + sw->dev->fw_rev.maj, + sw->dev->fw_rev.min, + sw->dev->fw_rev.sub); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); +} + +static int prestera_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + +static int prestera_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack); + +static const struct devlink_ops prestera_dl_ops = { + .info_get = prestera_dl_info_get, + .trap_init = prestera_trap_init, + .trap_action_set = prestera_trap_action_set, + .trap_drop_counter_get = prestera_drop_counter_get, +}; + +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev) +{ + struct devlink *dl; + + dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch), + dev->dev); + + return devlink_priv(dl); +} + +void prestera_devlink_free(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + + devlink_free(dl); +} + +void prestera_devlink_register(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + + devlink_register(dl); +} + +void prestera_devlink_unregister(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + + devlink_unregister(dl); +} + +int prestera_devlink_port_register(struct prestera_port *port) +{ + struct prestera_switch *sw = port->sw; + struct devlink *dl = priv_to_devlink(sw); + struct devlink_port_attrs attrs = {}; + int err; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = port->fp_id; + attrs.switch_id.id_len = sizeof(sw->id); + memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len); + + devlink_port_attrs_set(&port->dl_port, &attrs); + + err = devlink_port_register(dl, &port->dl_port, port->fp_id); + if (err) { + dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err); + return err; + } + + return 0; +} + +void prestera_devlink_port_unregister(struct prestera_port *port) +{ + devlink_port_unregister(&port->dl_port); +} + +int prestera_devlink_traps_register(struct prestera_switch *sw) +{ + const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); + const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); + struct devlink *devlink = priv_to_devlink(sw); + struct prestera_trap_data *trap_data; + struct prestera_trap *prestera_trap; + int err, i; + + trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL); + if (!trap_data) + return -ENOMEM; + + trap_data->trap_items_arr = kcalloc(traps_count, + sizeof(struct prestera_trap_item), + GFP_KERNEL); + if (!trap_data->trap_items_arr) { + err = -ENOMEM; + goto err_trap_items_alloc; + } + + trap_data->sw = sw; + trap_data->traps_count = traps_count; + sw->trap_data = trap_data; + + err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr, + groups_count); + if (err) + goto err_groups_register; + + for (i = 0; i < traps_count; i++) { + prestera_trap = &prestera_trap_items_arr[i]; + err = devlink_traps_register(devlink, &prestera_trap->trap, 1, + sw); + if (err) + goto err_trap_register; + } + + return 0; + +err_trap_register: + for (i--; i >= 0; i--) { + prestera_trap = &prestera_trap_items_arr[i]; + devlink_traps_unregister(devlink, &prestera_trap->trap, 1); + } + devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr, + groups_count); +err_groups_register: + kfree(trap_data->trap_items_arr); +err_trap_items_alloc: + kfree(trap_data); + return err; +} + +static struct prestera_trap_item * +prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code) +{ + struct prestera_trap_data *trap_data = sw->trap_data; + struct prestera_trap *prestera_trap; + int i; + + for (i = 0; i < trap_data->traps_count; i++) { + prestera_trap = &prestera_trap_items_arr[i]; + if (cpu_code == prestera_trap->cpu_code) + return &trap_data->trap_items_arr[i]; + } + + return NULL; +} + +void prestera_devlink_trap_report(struct prestera_port *port, + struct sk_buff *skb, u8 cpu_code) +{ + struct prestera_trap_item *trap_item; + struct devlink *devlink; + + devlink = port->dl_port.devlink; + + trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code); + if (unlikely(!trap_item)) + return; + + devlink_trap_report(devlink, skb, trap_item->trap_ctx, + &port->dl_port, NULL); +} + +static struct prestera_trap_item * +prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id) +{ + struct prestera_trap_data *trap_data = sw->trap_data; + int i; + + for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) { + if (prestera_trap_items_arr[i].trap.id == trap_id) + return &trap_data->trap_items_arr[i]; + } + + return NULL; +} + +static int prestera_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx) +{ + struct prestera_switch *sw = devlink_priv(devlink); + struct prestera_trap_item *trap_item; + + trap_item = prestera_devlink_trap_item_lookup(sw, trap->id); + if (WARN_ON(!trap_item)) + return -EINVAL; + + trap_item->trap_ctx = trap_ctx; + trap_item->action = trap->init_action; + + return 0; +} + +static int prestera_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack) +{ + /* Currently, driver does not support trap action altering */ + return -EOPNOTSUPP; +} + +static int prestera_drop_counter_get(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops) +{ + struct prestera_switch *sw = devlink_priv(devlink); + enum prestera_hw_cpu_code_cnt_t cpu_code_type = + PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP; + struct prestera_trap *prestera_trap = + container_of(trap, struct prestera_trap, trap); + + return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code, + cpu_code_type, p_drops); +} + +void prestera_devlink_traps_unregister(struct prestera_switch *sw) +{ + struct prestera_trap_data *trap_data = sw->trap_data; + struct devlink *dl = priv_to_devlink(sw); + const struct devlink_trap *trap; + int i; + + for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) { + trap = &prestera_trap_items_arr[i].trap; + devlink_traps_unregister(dl, trap, 1); + } + + devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, + ARRAY_SIZE(prestera_trap_groups_arr)); + kfree(trap_data->trap_items_arr); + kfree(trap_data); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h new file mode 100644 index 0000000000..bf84ad6fd8 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_DEVLINK_H_ +#define _PRESTERA_DEVLINK_H_ + +#include "prestera.h" + +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); +void prestera_devlink_free(struct prestera_switch *sw); + +void prestera_devlink_register(struct prestera_switch *sw); +void prestera_devlink_unregister(struct prestera_switch *sw); + +int prestera_devlink_port_register(struct prestera_port *port); +void prestera_devlink_port_unregister(struct prestera_port *port); + +void prestera_devlink_trap_report(struct prestera_port *port, + struct sk_buff *skb, u8 cpu_code); +int prestera_devlink_traps_register(struct prestera_switch *sw); +void prestera_devlink_traps_unregister(struct prestera_switch *sw); + +#endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c new file mode 100644 index 0000000000..b7e89c0ca5 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/string.h> + +#include "prestera_dsa.h" + +#define PRESTERA_DSA_W0_CMD GENMASK(31, 30) +#define PRESTERA_DSA_W0_IS_TAGGED BIT(29) +#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24) +#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19) +#define PRESTERA_DSA_W0_VPT GENMASK(15, 13) +#define PRESTERA_DSA_W0_EXT_BIT BIT(12) +#define PRESTERA_DSA_W0_VID GENMASK(11, 0) + +#define PRESTERA_DSA_W1_EXT_BIT BIT(31) +#define PRESTERA_DSA_W1_CFI_BIT BIT(30) +#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10) +#define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0) + +#define PRESTERA_DSA_W2_EXT_BIT BIT(31) +#define PRESTERA_DSA_W2_PORT_NUM BIT(20) + +#define PRESTERA_DSA_W3_VID GENMASK(30, 27) +#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7) +#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0) + +#define PRESTERA_DSA_VID GENMASK(15, 12) +#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5) + +int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf) +{ + __be32 *dsa_words = (__be32 *)dsa_buf; + enum prestera_dsa_cmd cmd; + u32 words[4]; + u32 field; + + words[0] = ntohl(dsa_words[0]); + words[1] = ntohl(dsa_words[1]); + words[2] = ntohl(dsa_words[2]); + words[3] = ntohl(dsa_words[3]); + + /* set the common parameters */ + cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]); + + /* only to CPU is supported */ + if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU)) + return -EINVAL; + + if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0) + return -EINVAL; + if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0) + return -EINVAL; + if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0) + return -EINVAL; + + field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]); + + dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]); + dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]); + dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]); + dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]); + dsa->vlan.vid &= ~PRESTERA_DSA_VID; + dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field); + + field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]); + + dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]); + dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field); + + dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) | + (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) | + (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7); + + dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]); + + return 0; +} + +int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf) +{ + __be32 *dsa_words = (__be32 *)dsa_buf; + u32 dev_num = dsa->hw_dev_num; + u32 words[4] = { 0 }; + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU); + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num); + dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num); + words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num); + + words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num); + + words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1); + words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1); + words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1); + + dsa_words[0] = htonl(words[0]); + dsa_words[1] = htonl(words[1]); + dsa_words[2] = htonl(words[2]); + dsa_words[3] = htonl(words[3]); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h new file mode 100644 index 0000000000..c99342f475 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef __PRESTERA_DSA_H_ +#define __PRESTERA_DSA_H_ + +#include <linux/types.h> + +#define PRESTERA_DSA_HLEN 16 + +enum prestera_dsa_cmd { + /* DSA command is "To CPU" */ + PRESTERA_DSA_CMD_TO_CPU = 0, + + /* DSA command is "From CPU" */ + PRESTERA_DSA_CMD_FROM_CPU, +}; + +struct prestera_dsa_vlan { + u16 vid; + u8 vpt; + u8 cfi_bit; + bool is_tagged; +}; + +struct prestera_dsa { + struct prestera_dsa_vlan vlan; + u32 hw_dev_num; + u32 port_num; + u8 cpu_code; +}; + +int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf); +int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf); + +#endif /* _PRESTERA_DSA_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c new file mode 100644 index 0000000000..2f52daba58 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> + +#include "prestera_ethtool.h" +#include "prestera.h" +#include "prestera_hw.h" + +#define PRESTERA_STATS_CNT \ + (sizeof(struct prestera_port_stats) / sizeof(u64)) +#define PRESTERA_STATS_IDX(name) \ + (offsetof(struct prestera_port_stats, name) / sizeof(u64)) +#define PRESTERA_STATS_FIELD(name) \ + [PRESTERA_STATS_IDX(name)] = __stringify(name) + +static const char driver_kind[] = "prestera"; + +static const struct prestera_link_mode { + enum ethtool_link_mode_bit_indices eth_mode; + u32 speed; + u64 pr_mask; + u8 duplex; + u8 port_type; +} port_link_modes[PRESTERA_LINK_MODE_MAX] = { + [PRESTERA_LINK_MODE_10baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT, + .speed = 10, + .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_10baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT, + .speed = 10, + .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT, + .speed = 100, + .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + .speed = 100, + .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseT_Half] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half, + .duplex = PRESTERA_PORT_DUPLEX_HALF, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseT_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_1000baseX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_1000baseKX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = 1000, + .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_2500baseX_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + .speed = 2500, + .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + }, + [PRESTERA_LINK_MODE_10GbaseKR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_10GbaseSR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_10GbaseLR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + .speed = 10000, + .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_20GbaseKR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, + .speed = 20000, + .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_25GbaseCR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_25GbaseKR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_25GbaseSR_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + .speed = 25000, + .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_40GbaseKR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_40GbaseCR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_40GbaseSR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + .speed = 40000, + .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_50GbaseCR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + }, + [PRESTERA_LINK_MODE_50GbaseKR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_50GbaseSR2_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + .speed = 50000, + .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_100GbaseKR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_TP, + }, + [PRESTERA_LINK_MODE_100GbaseSR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_FIBRE, + }, + [PRESTERA_LINK_MODE_100GbaseCR4_Full] = { + .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = 100000, + .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full, + .duplex = PRESTERA_PORT_DUPLEX_FULL, + .port_type = PRESTERA_PORT_TYPE_DA, + } +}; + +static const struct prestera_fec { + u32 eth_fec; + enum ethtool_link_mode_bit_indices eth_mode; + u8 pr_fec; +} port_fec_caps[PRESTERA_PORT_FEC_MAX] = { + [PRESTERA_PORT_FEC_OFF] = { + .eth_fec = ETHTOOL_FEC_OFF, + .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_OFF, + }, + [PRESTERA_PORT_FEC_BASER] = { + .eth_fec = ETHTOOL_FEC_BASER, + .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_BASER, + }, + [PRESTERA_PORT_FEC_RS] = { + .eth_fec = ETHTOOL_FEC_RS, + .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT, + .pr_fec = 1 << PRESTERA_PORT_FEC_RS, + } +}; + +static const struct prestera_port_type { + enum ethtool_link_mode_bit_indices eth_mode; + u8 eth_type; +} port_types[PRESTERA_PORT_TYPE_MAX] = { + [PRESTERA_PORT_TYPE_NONE] = { + .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, + .eth_type = PORT_NONE, + }, + [PRESTERA_PORT_TYPE_TP] = { + .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, + .eth_type = PORT_TP, + }, + [PRESTERA_PORT_TYPE_AUI] = { + .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT, + .eth_type = PORT_AUI, + }, + [PRESTERA_PORT_TYPE_MII] = { + .eth_mode = ETHTOOL_LINK_MODE_MII_BIT, + .eth_type = PORT_MII, + }, + [PRESTERA_PORT_TYPE_FIBRE] = { + .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT, + .eth_type = PORT_FIBRE, + }, + [PRESTERA_PORT_TYPE_BNC] = { + .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT, + .eth_type = PORT_BNC, + }, + [PRESTERA_PORT_TYPE_DA] = { + .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, + .eth_type = PORT_TP, + }, + [PRESTERA_PORT_TYPE_OTHER] = { + .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, + .eth_type = PORT_OTHER, + } +}; + +static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = { + PRESTERA_STATS_FIELD(good_octets_received), + PRESTERA_STATS_FIELD(bad_octets_received), + PRESTERA_STATS_FIELD(mac_trans_error), + PRESTERA_STATS_FIELD(broadcast_frames_received), + PRESTERA_STATS_FIELD(multicast_frames_received), + PRESTERA_STATS_FIELD(frames_64_octets), + PRESTERA_STATS_FIELD(frames_65_to_127_octets), + PRESTERA_STATS_FIELD(frames_128_to_255_octets), + PRESTERA_STATS_FIELD(frames_256_to_511_octets), + PRESTERA_STATS_FIELD(frames_512_to_1023_octets), + PRESTERA_STATS_FIELD(frames_1024_to_max_octets), + PRESTERA_STATS_FIELD(excessive_collision), + PRESTERA_STATS_FIELD(multicast_frames_sent), + PRESTERA_STATS_FIELD(broadcast_frames_sent), + PRESTERA_STATS_FIELD(fc_sent), + PRESTERA_STATS_FIELD(fc_received), + PRESTERA_STATS_FIELD(buffer_overrun), + PRESTERA_STATS_FIELD(undersize), + PRESTERA_STATS_FIELD(fragments), + PRESTERA_STATS_FIELD(oversize), + PRESTERA_STATS_FIELD(jabber), + PRESTERA_STATS_FIELD(rx_error_frame_received), + PRESTERA_STATS_FIELD(bad_crc), + PRESTERA_STATS_FIELD(collisions), + PRESTERA_STATS_FIELD(late_collision), + PRESTERA_STATS_FIELD(unicast_frames_received), + PRESTERA_STATS_FIELD(unicast_frames_sent), + PRESTERA_STATS_FIELD(sent_multiple), + PRESTERA_STATS_FIELD(sent_deferred), + PRESTERA_STATS_FIELD(good_octets_sent), +}; + +static void prestera_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_switch *sw = port->sw; + + strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)), + sizeof(drvinfo->bus_info)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + sw->dev->fw_rev.maj, + sw->dev->fw_rev.min, + sw->dev->fw_rev.sub); +} + +static u8 prestera_port_type_get(struct prestera_port *port) +{ + if (port->caps.type < PRESTERA_PORT_TYPE_MAX) + return port_types[port->caps.type].eth_type; + + return PORT_OTHER; +} + +static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 new_mode = PRESTERA_LINK_MODE_MAX; + u32 type, mode; + + for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) { + if (port_types[type].eth_type == ecmd->base.port && + test_bit(port_types[type].eth_mode, + ecmd->link_modes.supported)) { + break; + } + } + + if (type == port->caps.type) + return 0; + if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE) + return -EINVAL; + if (type == PRESTERA_PORT_TYPE_MAX) + return -EOPNOTSUPP; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & + port->caps.supp_link_modes) && + type == port_link_modes[mode].port_type) { + new_mode = mode; + } + } + + if (new_mode >= PRESTERA_LINK_MODE_MAX) + return -EINVAL; + + port->caps.type = type; + port->autoneg = false; + + return 0; +} + +static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes, + u8 fec, u8 type) +{ + u32 mode; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & link_modes) == 0) + continue; + + if (type != PRESTERA_PORT_TYPE_NONE && + port_link_modes[mode].port_type != type) + continue; + + __set_bit(port_link_modes[mode].eth_mode, eth_modes); + } + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].pr_fec & fec) == 0) + continue; + + __set_bit(port_fec_caps[mode].eth_mode, eth_modes); + } +} + +static void prestera_modes_from_eth(const unsigned long *eth_modes, + u64 *link_modes, u8 *fec, u8 type) +{ + u64 adver_modes = 0; + u32 fec_modes = 0; + u32 mode; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if (!test_bit(port_link_modes[mode].eth_mode, eth_modes)) + continue; + + if (port_link_modes[mode].port_type != type) + continue; + + adver_modes |= port_link_modes[mode].pr_mask; + } + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes)) + continue; + + fec_modes |= port_fec_caps[mode].pr_fec; + } + + *link_modes = adver_modes; + *fec = fec_modes; +} + +static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u32 mode; + u8 ptype; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if ((port_link_modes[mode].pr_mask & + port->caps.supp_link_modes) == 0) + continue; + + ptype = port_link_modes[mode].port_type; + __set_bit(port_types[ptype].eth_mode, + ecmd->link_modes.supported); + } +} + +static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + struct prestera_port_phy_state *state = &port->state_phy; + bool asym_pause; + bool pause; + u64 bitmap; + int err; + + err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap, + &state->remote_fc.pause, + &state->remote_fc.asym_pause); + if (err) + netdev_warn(port->dev, "Remote link caps get failed %d", + port->caps.transceiver); + + bitmap = state->lmode_bmap; + + prestera_modes_to_eth(ecmd->link_modes.lp_advertising, + bitmap, 0, PRESTERA_PORT_TYPE_NONE); + + if (!bitmap_empty(ecmd->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Autoneg); + } + + pause = state->remote_fc.pause; + asym_pause = state->remote_fc.asym_pause; + + if (pause) + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Pause); + if (asym_pause) + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Asym_Pause); +} + +static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + struct prestera_port_mac_state *state = &port->state_mac; + u32 speed; + u8 duplex; + int err; + + if (!port->state_mac.oper) + return; + + if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) { + err = prestera_hw_port_mac_mode_get(port, NULL, &speed, + &duplex, NULL); + if (err) { + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + } else { + state->speed = speed; + state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ? + DUPLEX_FULL : DUPLEX_HALF; + } + } + + ecmd->base.speed = port->state_mac.speed; + ecmd->base.duplex = port->state_mac.duplex; +} + +static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + struct prestera_port_phy_state *state = &port->state_phy; + + if (prestera_hw_port_phy_mode_get(port, + &state->mdix, NULL, NULL, NULL)) { + netdev_warn(port->dev, "MDIX params get failed"); + state->mdix = ETH_TP_MDI_INVALID; + } + + ecmd->base.eth_tp_mdix = port->state_phy.mdix; + ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix; +} + +static int +prestera_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) +{ + struct prestera_port *port = netdev_priv(dev); + + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising); + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + + if (port->phy_link) + return phylink_ethtool_ksettings_get(port->phy_link, ecmd); + + ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port->caps.type == PRESTERA_PORT_TYPE_TP) { + ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg); + + if (netif_running(dev) && + (port->autoneg || + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)) + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + Autoneg); + } + + prestera_modes_to_eth(ecmd->link_modes.supported, + port->caps.supp_link_modes, + port->caps.supp_fec, + port->caps.type); + + prestera_port_supp_types_get(ecmd, port); + + if (netif_carrier_ok(dev)) + prestera_port_link_mode_get(ecmd, port); + + ecmd->base.port = prestera_port_type_get(port); + + if (port->autoneg) { + if (netif_running(dev)) + prestera_modes_to_eth(ecmd->link_modes.advertising, + port->adver_link_modes, + port->adver_fec, + port->caps.type); + + if (netif_carrier_ok(dev) && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) + prestera_port_remote_cap_get(ecmd, port); + } + + if (port->caps.type == PRESTERA_PORT_TYPE_TP && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) + prestera_port_mdix_get(ecmd, port); + + return 0; +} + +static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && + port->caps.type == PRESTERA_PORT_TYPE_TP) { + port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl; + return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } + return 0; + +} + +static int prestera_port_link_mode_set(struct prestera_port *port, + u32 speed, u8 duplex, u8 type) +{ + u32 new_mode = PRESTERA_LINK_MODE_MAX; + u32 mode; + int err; + + for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { + if (speed != SPEED_UNKNOWN && + speed != port_link_modes[mode].speed) + continue; + + if (duplex != DUPLEX_UNKNOWN && + duplex != port_link_modes[mode].duplex) + continue; + + if (!(port_link_modes[mode].pr_mask & + port->caps.supp_link_modes)) + continue; + + if (type != port_link_modes[mode].port_type) + continue; + + new_mode = mode; + break; + } + + if (new_mode == PRESTERA_LINK_MODE_MAX) + return -EOPNOTSUPP; + + err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + false, new_mode, 0, + port->cfg_phy.mdix); + if (err) + return err; + + port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); + port->adver_link_modes = 0; + port->cfg_phy.mode = new_mode; + port->autoneg = false; + + return 0; +} + +static int +prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) +{ + u8 duplex = DUPLEX_UNKNOWN; + + if (ecmd->base.duplex != DUPLEX_UNKNOWN) + duplex = ecmd->base.duplex == DUPLEX_FULL ? + PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF; + + return prestera_port_link_mode_set(port, ecmd->base.speed, duplex, + port->caps.type); +} + +static int +prestera_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) +{ + struct prestera_port *port = netdev_priv(dev); + u64 adver_modes; + u8 adver_fec; + int err; + + if (port->phy_link) + return phylink_ethtool_ksettings_set(port->phy_link, ecmd); + + err = prestera_port_type_set(ecmd, port); + if (err) + return err; + + if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) { + err = prestera_port_mdix_set(ecmd, port); + if (err) + return err; + } + + prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes, + &adver_fec, port->caps.type); + + if (ecmd->base.autoneg == AUTONEG_ENABLE) + err = prestera_port_autoneg_set(port, adver_modes); + else + err = prestera_port_speed_duplex_set(ecmd, port); + + return err; +} + +static int prestera_ethtool_get_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct prestera_port *port = netdev_priv(dev); + u8 active; + u32 mode; + int err; + + err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active); + if (err) + return err; + + fecparam->fec = 0; + + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0) + continue; + + fecparam->fec |= port_fec_caps[mode].eth_fec; + } + + if (active < PRESTERA_PORT_FEC_MAX) + fecparam->active_fec = port_fec_caps[active].eth_fec; + else + fecparam->active_fec = ETHTOOL_FEC_AUTO; + + return 0; +} + +static int prestera_ethtool_set_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_mac_config cfg_mac; + u32 mode; + u8 fec; + + if (port->autoneg) { + netdev_err(dev, "FEC set is not allowed while autoneg is on\n"); + return -EINVAL; + } + + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + netdev_err(dev, "FEC set is not allowed on non-SFP ports\n"); + return -EINVAL; + } + + fec = PRESTERA_PORT_FEC_MAX; + for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { + if ((port_fec_caps[mode].eth_fec & fecparam->fec) && + (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) { + fec = mode; + break; + } + } + + prestera_port_cfg_mac_read(port, &cfg_mac); + + if (fec == cfg_mac.fec) + return 0; + + if (fec == PRESTERA_PORT_FEC_MAX) { + netdev_err(dev, "Unsupported FEC requested"); + return -EINVAL; + } + + cfg_mac.fec = fec; + + return prestera_port_cfg_mac_write(port, &cfg_mac); +} + +static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return PRESTERA_STATS_CNT; + default: + return -EOPNOTSUPP; + } +} + +static void prestera_ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + if (stringset != ETH_SS_STATS) + return; + + memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name)); +} + +static void prestera_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_stats *port_stats; + + port_stats = &port->cached_hw_stats.stats; + + memcpy(data, port_stats, sizeof(*port_stats)); +} + +static int prestera_ethtool_nway_reset(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + + if (netif_running(dev) && + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && + port->caps.type == PRESTERA_PORT_TYPE_TP) + return prestera_hw_port_autoneg_restart(port); + + return -EINVAL; +} + +const struct ethtool_ops prestera_ethtool_ops = { + .get_drvinfo = prestera_ethtool_get_drvinfo, + .get_link_ksettings = prestera_ethtool_get_link_ksettings, + .set_link_ksettings = prestera_ethtool_set_link_ksettings, + .get_fecparam = prestera_ethtool_get_fecparam, + .set_fecparam = prestera_ethtool_set_fecparam, + .get_sset_count = prestera_ethtool_get_sset_count, + .get_strings = prestera_ethtool_get_strings, + .get_ethtool_stats = prestera_ethtool_get_stats, + .get_link = ethtool_op_get_link, + .nway_reset = prestera_ethtool_nway_reset +}; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h new file mode 100644 index 0000000000..bd5600886b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef __PRESTERA_ETHTOOL_H_ +#define __PRESTERA_ETHTOOL_H_ + +#include <linux/ethtool.h> + +struct prestera_port_event; +struct prestera_port; + +extern const struct ethtool_ops prestera_ethtool_ops; + +#endif /* _PRESTERA_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c new file mode 100644 index 0000000000..9f4267f326 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_flower.h" +#include "prestera_matchall.h" +#include "prestera_span.h" + +static LIST_HEAD(prestera_block_cb_list); + +static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return prestera_mall_replace(block, f); + case TC_CLSMATCHALL_DESTROY: + prestera_mall_destroy(block); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return prestera_flower_replace(block, f); + case FLOW_CLS_DESTROY: + prestera_flower_destroy(block, f); + return 0; + case FLOW_CLS_STATS: + return prestera_flower_stats(block, f); + case FLOW_CLS_TMPLT_CREATE: + return prestera_flower_tmplt_create(block, f); + case FLOW_CLS_TMPLT_DESTROY: + prestera_flower_tmplt_destroy(block, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int prestera_flow_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return prestera_flow_block_flower_cb(block, type_data); + case TC_SETUP_CLSMATCHALL: + return prestera_flow_block_mall_cb(block, type_data); + default: + return -EOPNOTSUPP; + } +} + +static void prestera_flow_block_destroy(void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + prestera_flower_template_cleanup(block); + + WARN_ON(!list_empty(&block->template_list)); + WARN_ON(!list_empty(&block->binding_list)); + + kfree(block); +} + +static struct prestera_flow_block * +prestera_flow_block_create(struct prestera_switch *sw, + struct net *net, + bool ingress) +{ + struct prestera_flow_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + + INIT_LIST_HEAD(&block->binding_list); + INIT_LIST_HEAD(&block->template_list); + block->net = net; + block->sw = sw; + block->mall.prio_min = UINT_MAX; + block->mall.prio_max = 0; + block->mall.bound = false; + block->ingress = ingress; + + return block; +} + +static void prestera_flow_block_release(void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + prestera_flow_block_destroy(block); +} + +static bool +prestera_flow_block_is_bound(const struct prestera_flow_block *block) +{ + return block->ruleset_zero; +} + +static struct prestera_flow_block_binding * +prestera_flow_block_lookup(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + if (binding->port == port) + return binding; + + return NULL; +} + +static int prestera_flow_block_bind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + int err; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + + binding->span_id = PRESTERA_SPAN_INVALID_ID; + binding->port = port; + + if (prestera_flow_block_is_bound(block)) { + err = prestera_acl_ruleset_bind(block->ruleset_zero, port); + if (err) + goto err_ruleset_bind; + } + + list_add(&binding->list, &block->binding_list); + return 0; + +err_ruleset_bind: + kfree(binding); + return err; +} + +static int prestera_flow_block_unbind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + binding = prestera_flow_block_lookup(block, port); + if (!binding) + return -ENOENT; + + list_del(&binding->list); + + if (prestera_flow_block_is_bound(block)) + prestera_acl_ruleset_unbind(block->ruleset_zero, port); + + kfree(binding); + return 0; +} + +static struct prestera_flow_block * +prestera_flow_block_get(struct prestera_switch *sw, + struct flow_block_offload *f, + bool *register_block, + bool ingress) +{ + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + + block_cb = flow_block_cb_lookup(f->block, + prestera_flow_block_cb, sw); + if (!block_cb) { + block = prestera_flow_block_create(sw, f->net, ingress); + if (!block) + return ERR_PTR(-ENOMEM); + + block_cb = flow_block_cb_alloc(prestera_flow_block_cb, + sw, block, + prestera_flow_block_release); + if (IS_ERR(block_cb)) { + prestera_flow_block_destroy(block); + return ERR_CAST(block_cb); + } + + block->block_cb = block_cb; + *register_block = true; + } else { + block = flow_block_cb_priv(block_cb); + *register_block = false; + } + + flow_block_cb_incref(block_cb); + + return block; +} + +static void prestera_flow_block_put(struct prestera_flow_block *block) +{ + struct flow_block_cb *block_cb = block->block_cb; + + if (flow_block_cb_decref(block_cb)) + return; + + flow_block_cb_free(block_cb); + prestera_flow_block_destroy(block); +} + +static int prestera_setup_flow_block_bind(struct prestera_port *port, + struct flow_block_offload *f, bool ingress) +{ + struct prestera_switch *sw = port->sw; + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + bool register_block; + int err; + + block = prestera_flow_block_get(sw, f, ®ister_block, ingress); + if (IS_ERR(block)) + return PTR_ERR(block); + + block_cb = block->block_cb; + + err = prestera_flow_block_bind(block, port); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &prestera_block_cb_list); + } + + if (ingress) + port->ingress_flow_block = block; + else + port->egress_flow_block = block; + + return 0; + +err_block_bind: + prestera_flow_block_put(block); + + return err; +} + +static void prestera_setup_flow_block_unbind(struct prestera_port *port, + struct flow_block_offload *f, bool ingress) +{ + struct prestera_switch *sw = port->sw; + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); + if (!block_cb) + return; + + block = flow_block_cb_priv(block_cb); + + prestera_mall_destroy(block); + + err = prestera_flow_block_unbind(block, port); + if (err) + goto error; + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +error: + if (ingress) + port->ingress_flow_block = NULL; + else + port->egress_flow_block = NULL; +} + +static int prestera_setup_flow_block_clsact(struct prestera_port *port, + struct flow_block_offload *f, + bool ingress) +{ + f->driver_block_list = &prestera_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return prestera_setup_flow_block_bind(port, f, ingress); + case FLOW_BLOCK_UNBIND: + prestera_setup_flow_block_unbind(port, f, ingress); + return 0; + default: + return -EOPNOTSUPP; + } +} + +int prestera_flow_block_setup(struct prestera_port *port, + struct flow_block_offload *f) +{ + switch (f->binder_type) { + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: + return prestera_setup_flow_block_clsact(port, f, true); + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: + return prestera_setup_flow_block_clsact(port, f, false); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h new file mode 100644 index 0000000000..a85a3eb402 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_FLOW_H_ +#define _PRESTERA_FLOW_H_ + +#include <net/flow_offload.h> + +struct prestera_port; +struct prestera_switch; + +struct prestera_flow_block_binding { + struct list_head list; + struct prestera_port *port; + int span_id; +}; + +struct prestera_flow_block { + struct list_head binding_list; + struct prestera_switch *sw; + struct net *net; + struct prestera_acl_ruleset *ruleset_zero; + struct flow_block_cb *block_cb; + struct list_head template_list; + struct { + u32 prio_min; + u32 prio_max; + bool bound; + } mall; + unsigned int rule_count; + bool ingress; +}; + +int prestera_flow_block_setup(struct prestera_port *port, + struct flow_block_offload *f); + +#endif /* _PRESTERA_FLOW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c new file mode 100644 index 0000000000..8b9455d8a4 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include "prestera.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_flower.h" +#include "prestera_matchall.h" + +struct prestera_flower_template { + struct prestera_acl_ruleset *ruleset; + struct list_head list; + u32 chain_index; +}; + +static void +prestera_flower_template_free(struct prestera_flower_template *template) +{ + prestera_acl_ruleset_put(template->ruleset); + list_del(&template->list); + kfree(template); +} + +void prestera_flower_template_cleanup(struct prestera_flow_block *block) +{ + struct prestera_flower_template *template, *tmp; + + /* put the reference to all rulesets kept in tmpl create */ + list_for_each_entry_safe(template, tmp, &block->template_list, list) + prestera_flower_template_free(template); +} + +static int +prestera_flower_parse_goto_action(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + u32 chain_index, + const struct flow_action_entry *act) +{ + struct prestera_acl_ruleset *ruleset; + + if (act->chain_index <= chain_index) + /* we can jump only forward */ + return -EINVAL; + + if (rule->re_arg.jump.valid) + return -EEXIST; + + ruleset = prestera_acl_ruleset_get(block->sw->acl, block, + act->chain_index); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule->re_arg.jump.valid = 1; + rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset); + + rule->jump_ruleset = ruleset; + + return 0; +} + +static int prestera_flower_parse_actions(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + struct flow_action *flow_action, + u32 chain_index, + struct netlink_ext_ack *extack) +{ + const struct flow_action_entry *act; + int err, i; + + /* whole struct (rule->re_arg) must be initialized with 0 */ + if (!flow_action_has_entries(flow_action)) + return 0; + + if (!flow_action_mixed_hw_stats_check(flow_action, extack)) + return -EOPNOTSUPP; + + act = flow_action_first_entry_get(flow_action); + if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { + /* Nothing to do */ + } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) { + /* setup counter first */ + rule->re_arg.count.valid = true; + err = prestera_acl_chain_to_client(chain_index, block->ingress, + &rule->re_arg.count.client); + if (err) + return err; + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: + if (rule->re_arg.accept.valid) + return -EEXIST; + + rule->re_arg.accept.valid = 1; + break; + case FLOW_ACTION_DROP: + if (rule->re_arg.drop.valid) + return -EEXIST; + + rule->re_arg.drop.valid = 1; + break; + case FLOW_ACTION_TRAP: + if (rule->re_arg.trap.valid) + return -EEXIST; + + rule->re_arg.trap.valid = 1; + break; + case FLOW_ACTION_POLICE: + if (rule->re_arg.police.valid) + return -EEXIST; + + rule->re_arg.police.valid = 1; + rule->re_arg.police.rate = + act->police.rate_bytes_ps; + rule->re_arg.police.burst = act->police.burst; + rule->re_arg.police.ingress = block->ingress; + break; + case FLOW_ACTION_GOTO: + err = prestera_flower_parse_goto_action(block, rule, + chain_index, + act); + if (err) + return err; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); + pr_err("Unsupported action\n"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, + struct flow_cls_offload *f, + struct prestera_flow_block *block) +{ + struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct prestera_acl_match *r_match = &rule->re_key.match; + struct prestera_port *port; + struct net_device *ingress_dev; + struct flow_match_meta match; + __be16 key, mask; + + flow_rule_match_meta(f_rule, &match); + + if (match.mask->l2_miss) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\""); + return -EOPNOTSUPP; + } + + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported ingress ifindex mask"); + return -EINVAL; + } + + ingress_dev = __dev_get_by_index(block->net, + match.key->ingress_ifindex); + if (!ingress_dev) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Can't find specified ingress port to match on"); + return -EINVAL; + } + + if (!prestera_netdev_check(ingress_dev)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Can't match on switchdev ingress port"); + return -EINVAL; + } + port = netdev_priv(ingress_dev); + + mask = htons(0x1FFF << 3); + key = htons(port->hw_id << 3); + rule_match_set(r_match->key, SYS_PORT, key); + rule_match_set(r_match->mask, SYS_PORT, mask); + + mask = htons(0x3FF); + key = htons(port->dev_id); + rule_match_set(r_match->key, SYS_DEV, key); + rule_match_set(r_match->mask, SYS_DEV, mask); + + return 0; +} + +static int prestera_flower_parse(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + struct flow_cls_offload *f) +{ + struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = f_rule->match.dissector; + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 n_proto_mask = 0; + __be16 n_proto_key = 0; + u16 addr_type = 0; + u8 ip_proto = 0; + int err; + + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); + return -EOPNOTSUPP; + } + + prestera_acl_rule_priority_set(rule, f->common.prio); + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) { + err = prestera_flower_parse_meta(rule, f, block); + if (err) + return err; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(f_rule, &match); + addr_type = match.key->addr_type; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(f_rule, &match); + n_proto_key = match.key->n_proto; + n_proto_mask = match.mask->n_proto; + + if (ntohs(match.key->n_proto) == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + + rule_match_set(r_match->key, ETH_TYPE, n_proto_key); + rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); + + rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); + rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); + ip_proto = match.key->ip_proto; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(f_rule, &match); + + /* DA key, mask */ + rule_match_set_n(r_match->key, + ETH_DMAC_0, &match.key->dst[0], 4); + rule_match_set_n(r_match->key, + ETH_DMAC_1, &match.key->dst[4], 2); + + rule_match_set_n(r_match->mask, + ETH_DMAC_0, &match.mask->dst[0], 4); + rule_match_set_n(r_match->mask, + ETH_DMAC_1, &match.mask->dst[4], 2); + + /* SA key, mask */ + rule_match_set_n(r_match->key, + ETH_SMAC_0, &match.key->src[0], 4); + rule_match_set_n(r_match->key, + ETH_SMAC_1, &match.key->src[4], 2); + + rule_match_set_n(r_match->mask, + ETH_SMAC_0, &match.mask->src[0], 4); + rule_match_set_n(r_match->mask, + ETH_SMAC_1, &match.mask->src[4], 2); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(f_rule, &match); + + rule_match_set(r_match->key, IP_SRC, match.key->src); + rule_match_set(r_match->mask, IP_SRC, match.mask->src); + + rule_match_set(r_match->key, IP_DST, match.key->dst); + rule_match_set(r_match->mask, IP_DST, match.mask->dst); + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + NL_SET_ERR_MSG_MOD + (f->common.extack, + "Only UDP and TCP keys are supported"); + return -EINVAL; + } + + flow_rule_match_ports(f_rule, &match); + + rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); + rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); + + rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); + rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) { + struct flow_match_ports_range match; + __be32 tp_key, tp_mask; + + flow_rule_match_ports_range(f_rule, &match); + + /* src port range (min, max) */ + tp_key = htonl(ntohs(match.key->tp_min.src) | + (ntohs(match.key->tp_max.src) << 16)); + tp_mask = htonl(ntohs(match.mask->tp_min.src) | + (ntohs(match.mask->tp_max.src) << 16)); + rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key); + rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask); + + /* dst port range (min, max) */ + tp_key = htonl(ntohs(match.key->tp_min.dst) | + (ntohs(match.key->tp_max.dst) << 16)); + tp_mask = htonl(ntohs(match.mask->tp_min.dst) | + (ntohs(match.mask->tp_max.dst) << 16)); + rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key); + rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask); + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(f_rule, &match); + + if (match.mask->vlan_id != 0) { + __be16 key = cpu_to_be16(match.key->vlan_id); + __be16 mask = cpu_to_be16(match.mask->vlan_id); + + rule_match_set(r_match->key, VLAN_ID, key); + rule_match_set(r_match->mask, VLAN_ID, mask); + } + + rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); + rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + + flow_rule_match_icmp(f_rule, &match); + + rule_match_set(r_match->key, ICMP_TYPE, match.key->type); + rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); + + rule_match_set(r_match->key, ICMP_CODE, match.key->code); + rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); + } + + return prestera_flower_parse_actions(block, rule, &f->rule->action, + f->common.chain_index, + f->common.extack); +} + +static int prestera_flower_prio_check(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + u32 mall_prio_min; + u32 mall_prio_max; + int err; + + err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max); + if (err == -ENOENT) + /* No matchall filters installed on this chain. */ + return 0; + + if (err) { + NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); + return err; + } + + if (f->common.prio <= mall_prio_max && block->ingress) { + NL_SET_ERR_MSG(f->common.extack, + "Failed to add in front of existing matchall rules"); + return -EOPNOTSUPP; + } + if (f->common.prio >= mall_prio_min && !block->ingress) { + NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); + return -EOPNOTSUPP; + } + + return 0; +} + +int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index, + u32 *prio_min, u32 *prio_max) +{ + struct prestera_acl_ruleset *ruleset; + + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max); + return 0; +} + +int prestera_flower_replace(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_acl_ruleset *ruleset; + struct prestera_acl *acl = block->sw->acl; + struct prestera_acl_rule *rule; + int err; + + err = prestera_flower_prio_check(block, f); + if (err) + return err; + + ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* increments the ruleset reference */ + rule = prestera_acl_rule_create(ruleset, f->cookie, + f->common.chain_index); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } + + err = prestera_flower_parse(block, rule, f); + if (err) + goto err_rule_add; + + if (!prestera_acl_ruleset_is_offload(ruleset)) { + err = prestera_acl_ruleset_offload(ruleset); + if (err) + goto err_ruleset_offload; + } + + err = prestera_acl_rule_add(block->sw, rule); + if (err) + goto err_rule_add; + + prestera_acl_ruleset_put(ruleset); + return 0; + +err_ruleset_offload: +err_rule_add: + prestera_acl_rule_destroy(rule); +err_rule_create: + prestera_acl_ruleset_put(ruleset); + return err; +} + +void prestera_flower_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_rule *rule; + + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, + f->common.chain_index); + if (IS_ERR(ruleset)) + return; + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); + if (rule) { + prestera_acl_rule_del(block->sw, rule); + prestera_acl_rule_destroy(rule); + } + prestera_acl_ruleset_put(ruleset); +} + +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_flower_template *template; + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_rule rule; + int err; + + memset(&rule, 0, sizeof(rule)); + err = prestera_flower_parse(block, &rule, f); + if (err) + return err; + + template = kmalloc(sizeof(*template), GFP_KERNEL); + if (!template) { + err = -ENOMEM; + goto err_malloc; + } + + prestera_acl_rule_keymask_pcl_id_set(&rule, 0); + ruleset = prestera_acl_ruleset_get(block->sw->acl, block, + f->common.chain_index); + if (IS_ERR_OR_NULL(ruleset)) { + err = -EINVAL; + goto err_ruleset_get; + } + + /* preserve keymask/template to this ruleset */ + err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + if (err) + goto err_ruleset_keymask_set; + + /* skip error, as it is not possible to reject template operation, + * so, keep the reference to the ruleset for rules to be added + * to that ruleset later. In case of offload fail, the ruleset + * will be offloaded again during adding a new rule. Also, + * unlikly possble that ruleset is already offloaded at this staage. + */ + prestera_acl_ruleset_offload(ruleset); + + /* keep the reference to the ruleset */ + template->ruleset = ruleset; + template->chain_index = f->common.chain_index; + list_add_rcu(&template->list, &block->template_list); + return 0; + +err_ruleset_keymask_set: + prestera_acl_ruleset_put(ruleset); +err_ruleset_get: + kfree(template); +err_malloc: + NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed"); + return err; +} + +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_flower_template *template, *tmp; + + list_for_each_entry_safe(template, tmp, &block->template_list, list) + if (template->chain_index == f->common.chain_index) { + /* put the reference to the ruleset kept in create */ + prestera_flower_template_free(template); + return; + } +} + +int prestera_flower_stats(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_rule *rule; + u64 packets; + u64 lastuse; + u64 bytes; + int err; + + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, + f->common.chain_index); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); + if (!rule) { + err = -EINVAL; + goto err_rule_get_stats; + } + + err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, + &bytes, &lastuse); + if (err) + goto err_rule_get_stats; + + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, + FLOW_ACTION_HW_STATS_DELAYED); + +err_rule_get_stats: + prestera_acl_ruleset_put(ruleset); + return err; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h new file mode 100644 index 0000000000..1181115fe6 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_FLOWER_H_ +#define _PRESTERA_FLOWER_H_ + +#include <net/pkt_cls.h> + +struct prestera_flow_block; + +int prestera_flower_replace(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f); +int prestera_flower_stats(struct prestera_flow_block *block, + struct flow_cls_offload *f); +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_template_cleanup(struct prestera_flow_block *block); +int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index, + u32 *prio_min, u32 *prio_max); + +#endif /* _PRESTERA_FLOWER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c new file mode 100644 index 0000000000..fc6f7d2746 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -0,0 +1,2561 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/ethtool.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_counter.h" +#include "prestera_router_hw.h" + +#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) + +#define PRESTERA_MIN_MTU 64 + +#define PRESTERA_MSG_CHUNK_SIZE 1024 + +enum prestera_cmd_type_t { + PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1, + PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2, + + PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100, + PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101, + PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110, + + PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200, + PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201, + PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202, + PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203, + + PRESTERA_CMD_TYPE_FDB_ADD = 0x300, + PRESTERA_CMD_TYPE_FDB_DELETE = 0x301, + PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310, + PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311, + PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312, + + PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400, + PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401, + PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, + PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, + + PRESTERA_CMD_TYPE_COUNTER_GET = 0x510, + PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511, + PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514, + PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515, + + PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540, + PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541, + PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550, + PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551, + PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560, + PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561, + + PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600, + PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, + PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610, + PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624, + PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, + PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, + + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700, + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701, + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702, + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703, + + PRESTERA_CMD_TYPE_MDB_CREATE = 0x704, + PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705, + + PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, + + PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900, + PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901, + PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902, + PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903, + + PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000, + + PRESTERA_CMD_TYPE_SPAN_GET = 0x1100, + PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101, + PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102, + PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103, + PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104, + PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105, + + PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500, + PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501, + PRESTERA_CMD_TYPE_POLICER_SET = 0x1502, + + PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000, + + PRESTERA_CMD_TYPE_ACK = 0x10000, + PRESTERA_CMD_TYPE_MAX +}; + +enum { + PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1, + PRESTERA_CMD_PORT_ATTR_MTU = 3, + PRESTERA_CMD_PORT_ATTR_MAC = 4, + PRESTERA_CMD_PORT_ATTR_SPEED = 5, + PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6, + PRESTERA_CMD_PORT_ATTR_LEARNING = 7, + PRESTERA_CMD_PORT_ATTR_FLOOD = 8, + PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9, + PRESTERA_CMD_PORT_ATTR_LOCKED = 10, + PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12, + PRESTERA_CMD_PORT_ATTR_TYPE = 13, + PRESTERA_CMD_PORT_ATTR_STATS = 17, + PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18, + PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19, + PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22, +}; + +enum { + PRESTERA_CMD_SWITCH_ATTR_MAC = 1, + PRESTERA_CMD_SWITCH_ATTR_AGEING = 2, +}; + +enum { + PRESTERA_CMD_ACK_OK, + PRESTERA_CMD_ACK_FAILED, + + PRESTERA_CMD_ACK_MAX +}; + +enum { + PRESTERA_PORT_TP_NA, + PRESTERA_PORT_TP_MDI, + PRESTERA_PORT_TP_MDIX, + PRESTERA_PORT_TP_AUTO, +}; + +enum { + PRESTERA_PORT_FLOOD_TYPE_UC = 0, + PRESTERA_PORT_FLOOD_TYPE_MC = 1, +}; + +enum { + PRESTERA_PORT_GOOD_OCTETS_RCV_CNT, + PRESTERA_PORT_BAD_OCTETS_RCV_CNT, + PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT, + PRESTERA_PORT_BRDC_PKTS_RCV_CNT, + PRESTERA_PORT_MC_PKTS_RCV_CNT, + PRESTERA_PORT_PKTS_64L_CNT, + PRESTERA_PORT_PKTS_65TO127L_CNT, + PRESTERA_PORT_PKTS_128TO255L_CNT, + PRESTERA_PORT_PKTS_256TO511L_CNT, + PRESTERA_PORT_PKTS_512TO1023L_CNT, + PRESTERA_PORT_PKTS_1024TOMAXL_CNT, + PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT, + PRESTERA_PORT_MC_PKTS_SENT_CNT, + PRESTERA_PORT_BRDC_PKTS_SENT_CNT, + PRESTERA_PORT_FC_SENT_CNT, + PRESTERA_PORT_GOOD_FC_RCV_CNT, + PRESTERA_PORT_DROP_EVENTS_CNT, + PRESTERA_PORT_UNDERSIZE_PKTS_CNT, + PRESTERA_PORT_FRAGMENTS_PKTS_CNT, + PRESTERA_PORT_OVERSIZE_PKTS_CNT, + PRESTERA_PORT_JABBER_PKTS_CNT, + PRESTERA_PORT_MAC_RCV_ERROR_CNT, + PRESTERA_PORT_BAD_CRC_CNT, + PRESTERA_PORT_COLLISIONS_CNT, + PRESTERA_PORT_LATE_COLLISIONS_CNT, + PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT, + PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT, + PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT, + PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT, + PRESTERA_PORT_GOOD_OCTETS_SENT_CNT, + + PRESTERA_PORT_CNT_MAX +}; + +enum { + PRESTERA_FC_NONE, + PRESTERA_FC_SYMMETRIC, + PRESTERA_FC_ASYMMETRIC, + PRESTERA_FC_SYMM_ASYMM, +}; + +enum { + PRESTERA_POLICER_MODE_SR_TCM +}; + +enum { + PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0, + PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1, + PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2, +}; + +struct prestera_fw_event_handler { + struct list_head list; + struct rcu_head rcu; + enum prestera_event_type type; + prestera_event_cb_t func; + void *arg; +}; + +enum { + PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0, + PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1, + PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2, +}; + +struct prestera_msg_cmd { + __le32 type; +}; + +struct prestera_msg_ret { + struct prestera_msg_cmd cmd; + __le32 status; +}; + +struct prestera_msg_common_req { + struct prestera_msg_cmd cmd; +}; + +struct prestera_msg_common_resp { + struct prestera_msg_ret ret; +}; + +struct prestera_msg_switch_attr_req { + struct prestera_msg_cmd cmd; + __le32 attr; + union { + __le32 ageing_timeout_ms; + struct { + u8 mac[ETH_ALEN]; + u8 __pad[2]; + }; + } param; +}; + +struct prestera_msg_switch_init_resp { + struct prestera_msg_ret ret; + __le32 port_count; + __le32 mtu_max; + __le32 size_tbl_router_nexthop; + u8 switch_id; + u8 lag_max; + u8 lag_member_max; +}; + +struct prestera_msg_event_port_param { + union { + struct { + __le32 mode; + __le32 speed; + u8 oper; + u8 duplex; + u8 fc; + u8 fec; + } mac; + struct { + __le64 lmode_bmap; + u8 mdix; + u8 fc; + u8 __pad[2]; + } __packed phy; /* make sure always 12 bytes size */ + }; +}; + +struct prestera_msg_port_cap_param { + __le64 link_mode; + u8 type; + u8 fec; + u8 fc; + u8 transceiver; +}; + +struct prestera_msg_port_flood_param { + u8 type; + u8 enable; + u8 __pad[2]; +}; + +union prestera_msg_port_param { + __le32 mtu; + __le32 speed; + __le32 link_mode; + u8 admin_state; + u8 oper_state; + u8 mac[ETH_ALEN]; + u8 accept_frm_type; + u8 learning; + u8 flood; + u8 type; + u8 duplex; + u8 fec; + u8 fc; + u8 br_locked; + union { + struct { + u8 admin; + u8 fc; + u8 ap_enable; + u8 __reserved[5]; + union { + struct { + __le32 mode; + __le32 speed; + u8 inband; + u8 duplex; + u8 fec; + u8 fec_supp; + } reg_mode; + struct { + __le32 mode; + __le32 speed; + u8 fec; + u8 fec_supp; + u8 __pad[2]; + } ap_modes[PRESTERA_AP_PORT_MAX]; + }; + } mac; + struct { + __le64 modes; + __le32 mode; + u8 admin; + u8 adv_enable; + u8 mdix; + u8 __pad; + } phy; + } link; + + struct prestera_msg_port_cap_param cap; + struct prestera_msg_port_flood_param flood_ext; + struct prestera_msg_event_port_param link_evt; +}; + +struct prestera_msg_port_attr_req { + struct prestera_msg_cmd cmd; + __le32 attr; + __le32 port; + __le32 dev; + union prestera_msg_port_param param; +}; + +struct prestera_msg_port_attr_resp { + struct prestera_msg_ret ret; + union prestera_msg_port_param param; +}; + +struct prestera_msg_port_stats_resp { + struct prestera_msg_ret ret; + __le64 stats[PRESTERA_PORT_CNT_MAX]; +}; + +struct prestera_msg_port_info_req { + struct prestera_msg_cmd cmd; + __le32 port; +}; + +struct prestera_msg_port_info_resp { + struct prestera_msg_ret ret; + __le32 hw_id; + __le32 dev_id; + __le16 fp_id; + u8 pad[2]; +}; + +struct prestera_msg_vlan_req { + struct prestera_msg_cmd cmd; + __le32 port; + __le32 dev; + __le16 vid; + u8 is_member; + u8 is_tagged; +}; + +struct prestera_msg_fdb_req { + struct prestera_msg_cmd cmd; + __le32 flush_mode; + union { + struct { + __le32 port; + __le32 dev; + }; + __le16 lag_id; + } dest; + __le16 vid; + u8 dest_type; + u8 dynamic; + u8 mac[ETH_ALEN]; + u8 __pad[2]; +}; + +struct prestera_msg_bridge_req { + struct prestera_msg_cmd cmd; + __le32 port; + __le32 dev; + __le16 bridge; + u8 pad[2]; +}; + +struct prestera_msg_bridge_resp { + struct prestera_msg_ret ret; + __le16 bridge; + u8 pad[2]; +}; + +struct prestera_msg_vtcam_create_req { + struct prestera_msg_cmd cmd; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + u8 direction; + u8 lookup; + u8 pad[2]; +}; + +struct prestera_msg_vtcam_destroy_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; +}; + +struct prestera_msg_vtcam_rule_add_req { + struct prestera_msg_cmd cmd; + __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 vtcam_id; + __le32 prio; + __le32 n_act; +}; + +struct prestera_msg_vtcam_rule_del_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; + __le32 id; +}; + +struct prestera_msg_vtcam_bind_req { + struct prestera_msg_cmd cmd; + union { + struct { + __le32 hw_id; + __le32 dev_id; + } port; + __le32 index; + }; + __le32 vtcam_id; + __le16 pcl_id; + __le16 type; +}; + +struct prestera_msg_vtcam_resp { + struct prestera_msg_ret ret; + __le32 vtcam_id; + __le32 rule_id; +}; + +struct prestera_msg_acl_action { + __le32 id; + __le32 __reserved; + union { + struct { + __le32 index; + } jump; + struct { + __le32 id; + } police; + struct { + __le32 id; + } count; + __le32 reserved[6]; + }; +}; + +struct prestera_msg_counter_req { + struct prestera_msg_cmd cmd; + __le32 client; + __le32 block_id; + __le32 num_counters; +}; + +struct prestera_msg_counter_stats { + __le64 packets; + __le64 bytes; +}; + +struct prestera_msg_counter_resp { + struct prestera_msg_ret ret; + __le32 block_id; + __le32 offset; + __le32 num_counters; + __le32 done; + struct prestera_msg_counter_stats stats[]; +}; + +struct prestera_msg_span_req { + struct prestera_msg_cmd cmd; + __le32 port; + __le32 dev; + u8 id; + u8 pad[3]; +}; + +struct prestera_msg_span_resp { + struct prestera_msg_ret ret; + u8 id; + u8 pad[3]; +}; + +struct prestera_msg_stp_req { + struct prestera_msg_cmd cmd; + __le32 port; + __le32 dev; + __le16 vid; + u8 state; + u8 __pad; +}; + +struct prestera_msg_rxtx_req { + struct prestera_msg_cmd cmd; + u8 use_sdma; + u8 pad[3]; +}; + +struct prestera_msg_rxtx_resp { + struct prestera_msg_ret ret; + __le32 map_addr; +}; + +struct prestera_msg_iface { + union { + struct { + __le32 dev; + __le32 port; + }; + __le16 lag_id; + }; + __le16 vr_id; + __le16 vid; + u8 type; + u8 __pad[3]; +}; + +struct prestera_msg_ip_addr { + union { + __be32 ipv4; + __be32 ipv6[4]; + } u; + u8 v; /* e.g. PRESTERA_IPV4 */ + u8 __pad[3]; +}; + +struct prestera_msg_nh { + struct prestera_msg_iface oif; + __le32 hw_id; + u8 mac[ETH_ALEN]; + u8 is_active; + u8 pad; +}; + +struct prestera_msg_rif_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_iface iif; + __le32 mtu; + __le16 rif_id; + __le16 __reserved; + u8 mac[ETH_ALEN]; + u8 __pad[2]; +}; + +struct prestera_msg_rif_resp { + struct prestera_msg_ret ret; + __le16 rif_id; + u8 __pad[2]; +}; + +struct prestera_msg_lpm_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_ip_addr dst; + __le32 grp_id; + __le32 dst_len; + __le16 vr_id; + u8 __pad[2]; +}; + +struct prestera_msg_nh_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX]; + __le32 size; + __le32 grp_id; +}; + +struct prestera_msg_nh_chunk_req { + struct prestera_msg_cmd cmd; + __le32 offset; +}; + +struct prestera_msg_nh_chunk_resp { + struct prestera_msg_ret ret; + u8 hw_state[PRESTERA_MSG_CHUNK_SIZE]; +}; + +struct prestera_msg_nh_grp_req { + struct prestera_msg_cmd cmd; + __le32 grp_id; + __le32 size; +}; + +struct prestera_msg_nh_grp_resp { + struct prestera_msg_ret ret; + __le32 grp_id; +}; + +struct prestera_msg_vr_req { + struct prestera_msg_cmd cmd; + __le16 vr_id; + u8 __pad[2]; +}; + +struct prestera_msg_vr_resp { + struct prestera_msg_ret ret; + __le16 vr_id; + u8 __pad[2]; +}; + +struct prestera_msg_lag_req { + struct prestera_msg_cmd cmd; + __le32 port; + __le32 dev; + __le16 lag_id; + u8 pad[2]; +}; + +struct prestera_msg_cpu_code_counter_req { + struct prestera_msg_cmd cmd; + u8 counter_type; + u8 code; + u8 pad[2]; +}; + +struct mvsw_msg_cpu_code_counter_ret { + struct prestera_msg_ret ret; + __le64 packet_count; +}; + +struct prestera_msg_policer_req { + struct prestera_msg_cmd cmd; + __le32 id; + union { + struct { + __le64 cir; + __le32 cbs; + } __packed sr_tcm; /* make sure always 12 bytes size */ + __le32 reserved[6]; + }; + u8 mode; + u8 type; + u8 pad[2]; +}; + +struct prestera_msg_policer_resp { + struct prestera_msg_ret ret; + __le32 id; +}; + +struct prestera_msg_event { + __le16 type; + __le16 id; +}; + +struct prestera_msg_event_port { + struct prestera_msg_event id; + __le32 port_id; + struct prestera_msg_event_port_param param; +}; + +union prestera_msg_event_fdb_param { + u8 mac[ETH_ALEN]; +}; + +struct prestera_msg_event_fdb { + struct prestera_msg_event id; + __le32 vid; + union { + __le32 port_id; + __le16 lag_id; + } dest; + union prestera_msg_event_fdb_param param; + u8 dest_type; +}; + +struct prestera_msg_flood_domain_create_req { + struct prestera_msg_cmd cmd; +}; + +struct prestera_msg_flood_domain_create_resp { + struct prestera_msg_ret ret; + __le32 flood_domain_idx; +}; + +struct prestera_msg_flood_domain_destroy_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; +}; + +struct prestera_msg_flood_domain_ports_set_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; + __le32 ports_num; +}; + +struct prestera_msg_flood_domain_ports_reset_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; +}; + +struct prestera_msg_flood_domain_port { + union { + struct { + __le32 port_num; + __le32 dev_num; + }; + __le16 lag_id; + }; + __le16 vid; + __le16 port_type; +}; + +struct prestera_msg_mdb_create_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; + __le16 vid; + u8 mac[ETH_ALEN]; +}; + +struct prestera_msg_mdb_destroy_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; + __le16 vid; + u8 mac[ETH_ALEN]; +}; + +static void prestera_hw_build_tests(void) +{ + /* check requests */ + BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4); + BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 144); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28); + BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12); + + /* structure that are part of req/resp fw messages */ + BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28); + + /* check responses */ + BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 136); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032); + BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12); + + /* check events */ + BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20); +} + +static u8 prestera_hw_mdix_to_eth(u8 mode); +static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause); + +static int __prestera_cmd_ret(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen, + int waitms) +{ + struct prestera_device *dev = sw->dev; + int err; + + cmd->type = __cpu_to_le32(type); + + err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms); + if (err) + return err; + + if (ret->cmd.type != __cpu_to_le32(PRESTERA_CMD_TYPE_ACK)) + return -EBADE; + if (ret->status != __cpu_to_le32(PRESTERA_CMD_ACK_OK)) + return -EINVAL; + + return 0; +} + +static int prestera_cmd_ret(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen) +{ + return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0); +} + +static int prestera_cmd_ret_wait(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen, + struct prestera_msg_ret *ret, size_t rlen, + int waitms) +{ + return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms); +} + +static int prestera_cmd(struct prestera_switch *sw, + enum prestera_cmd_type_t type, + struct prestera_msg_cmd *cmd, size_t clen) +{ + struct prestera_msg_common_resp resp; + + return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp)); +} + +static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt) +{ + struct prestera_msg_event_port *hw_evt; + + hw_evt = (struct prestera_msg_event_port *)msg; + + evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id); + + if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { + evt->port_evt.data.mac.oper = hw_evt->param.mac.oper; + evt->port_evt.data.mac.mode = + __le32_to_cpu(hw_evt->param.mac.mode); + evt->port_evt.data.mac.speed = + __le32_to_cpu(hw_evt->param.mac.speed); + evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex; + evt->port_evt.data.mac.fc = hw_evt->param.mac.fc; + evt->port_evt.data.mac.fec = hw_evt->param.mac.fec; + } else { + return -EINVAL; + } + + return 0; +} + +static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt) +{ + struct prestera_msg_event_fdb *hw_evt = msg; + + switch (hw_evt->dest_type) { + case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT: + evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT; + evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id); + break; + case PRESTERA_HW_FDB_ENTRY_TYPE_LAG: + evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG; + evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id); + break; + default: + return -EINVAL; + } + + evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid); + + ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac); + + return 0; +} + +static struct prestera_fw_evt_parser { + int (*func)(void *msg, struct prestera_event *evt); +} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = { + [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt }, + [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt }, +}; + +static struct prestera_fw_event_handler * +__find_event_handler(const struct prestera_switch *sw, + enum prestera_event_type type) +{ + struct prestera_fw_event_handler *eh; + + list_for_each_entry_rcu(eh, &sw->event_handlers, list) { + if (eh->type == type) + return eh; + } + + return NULL; +} + +static int prestera_find_event_handler(const struct prestera_switch *sw, + enum prestera_event_type type, + struct prestera_fw_event_handler *eh) +{ + struct prestera_fw_event_handler *tmp; + int err = 0; + + rcu_read_lock(); + tmp = __find_event_handler(sw, type); + if (tmp) + *eh = *tmp; + else + err = -ENOENT; + rcu_read_unlock(); + + return err; +} + +static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size) +{ + struct prestera_switch *sw = dev->priv; + struct prestera_msg_event *msg = buf; + struct prestera_fw_event_handler eh; + struct prestera_event evt; + u16 msg_type; + int err; + + msg_type = __le16_to_cpu(msg->type); + if (msg_type >= PRESTERA_EVENT_TYPE_MAX) + return -EINVAL; + if (!fw_event_parsers[msg_type].func) + return -ENOENT; + + err = prestera_find_event_handler(sw, msg_type, &eh); + if (err) + return err; + + evt.id = __le16_to_cpu(msg->id); + + err = fw_event_parsers[msg_type].func(buf, &evt); + if (err) + return err; + + eh.func(sw, &evt, eh.arg); + + return 0; +} + +static void prestera_pkt_recv(struct prestera_device *dev) +{ + struct prestera_switch *sw = dev->priv; + struct prestera_fw_event_handler eh; + struct prestera_event ev; + int err; + + ev.id = PRESTERA_RXTX_EVENT_RCV_PKT; + + err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh); + if (err) + return; + + eh.func(sw, &ev, eh.arg); +} + +static u8 prestera_hw_mdix_to_eth(u8 mode) +{ + switch (mode) { + case PRESTERA_PORT_TP_MDI: + return ETH_TP_MDI; + case PRESTERA_PORT_TP_MDIX: + return ETH_TP_MDI_X; + case PRESTERA_PORT_TP_AUTO: + return ETH_TP_MDI_AUTO; + default: + return ETH_TP_MDI_INVALID; + } +} + +static u8 prestera_hw_mdix_from_eth(u8 mode) +{ + switch (mode) { + case ETH_TP_MDI: + return PRESTERA_PORT_TP_MDI; + case ETH_TP_MDI_X: + return PRESTERA_PORT_TP_MDIX; + case ETH_TP_MDI_AUTO: + return PRESTERA_PORT_TP_AUTO; + default: + return PRESTERA_PORT_TP_NA; + } +} + +int prestera_hw_port_info_get(const struct prestera_port *port, + u32 *dev_id, u32 *hw_id, u16 *fp_id) +{ + struct prestera_msg_port_info_req req = { + .port = __cpu_to_le32(port->id), + }; + struct prestera_msg_port_info_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *dev_id = __le32_to_cpu(resp.dev_id); + *hw_id = __le32_to_cpu(resp.hw_id); + *fp_id = __le16_to_cpu(resp.fp_id); + + return 0; +} + +int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac) +{ + struct prestera_msg_switch_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC), + }; + + ether_addr_copy(req.param.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_switch_init(struct prestera_switch *sw) +{ + struct prestera_msg_switch_init_resp resp; + struct prestera_msg_common_req req; + int err; + + INIT_LIST_HEAD(&sw->event_handlers); + + prestera_hw_build_tests(); + + err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT, + &req.cmd, sizeof(req), + &resp.ret, sizeof(resp), + PRESTERA_SWITCH_INIT_TIMEOUT_MS); + if (err) + return err; + + sw->dev->recv_msg = prestera_evt_recv; + sw->dev->recv_pkt = prestera_pkt_recv; + sw->port_count = __le32_to_cpu(resp.port_count); + sw->mtu_min = PRESTERA_MIN_MTU; + sw->mtu_max = __le32_to_cpu(resp.mtu_max); + sw->id = resp.switch_id; + sw->lag_member_max = resp.lag_member_max; + sw->lag_max = resp.lag_max; + sw->size_tbl_router_nexthop = + __le32_to_cpu(resp.size_tbl_router_nexthop); + + return 0; +} + +void prestera_hw_switch_fini(struct prestera_switch *sw) +{ + WARN_ON(!list_empty(&sw->event_handlers)); +} + +int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms) +{ + struct prestera_msg_switch_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING), + .param = { + .ageing_timeout_ms = __cpu_to_le32(ageing_ms), + }, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mac_mode_get(const struct prestera_port *port, + u32 *mode, u32 *speed, u8 *duplex, u8 *fec) +{ + struct prestera_msg_port_attr_resp resp; + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id) + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + if (mode) + *mode = __le32_to_cpu(resp.param.link_evt.mac.mode); + + if (speed) + *speed = __le32_to_cpu(resp.param.link_evt.mac.speed); + + if (duplex) + *duplex = resp.param.link_evt.mac.duplex; + + if (fec) + *fec = resp.param.link_evt.mac.fec; + + return err; +} + +int prestera_hw_port_mac_mode_set(const struct prestera_port *port, + bool admin, u32 mode, u8 inband, + u32 speed, u8 duplex, u8 fec) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .link = { + .mac = { + .admin = admin, + .reg_mode.mode = __cpu_to_le32(mode), + .reg_mode.inband = inband, + .reg_mode.speed = __cpu_to_le32(speed), + .reg_mode.duplex = duplex, + .reg_mode.fec = fec + } + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_phy_mode_get(const struct prestera_port *port, + u8 *mdix, u64 *lmode_bmap, + bool *fc_pause, bool *fc_asym) +{ + struct prestera_msg_port_attr_resp resp; + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id) + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + if (mdix) + *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix); + + if (lmode_bmap) + *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap); + + if (fc_pause && fc_asym) + prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc, + fc_pause, fc_asym); + + return err; +} + +int prestera_hw_port_phy_mode_set(const struct prestera_port *port, + bool admin, bool adv, u32 mode, u64 modes, + u8 mdix) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .link = { + .phy = { + .admin = admin, + .adv_enable = adv ? 1 : 0, + .mode = __cpu_to_le32(mode), + .modes = __cpu_to_le64(modes), + } + } + } + }; + + req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .mtu = __cpu_to_le32(mtu), + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + + ether_addr_copy(req.param.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_accept_frm_type(struct prestera_port *port, + enum prestera_accept_frm_type type) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .accept_frm_type = type, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_cap_get(const struct prestera_port *port, + struct prestera_port_caps *caps) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode); + caps->transceiver = resp.param.cap.transceiver; + caps->supp_fec = resp.param.cap.fec; + caps->type = resp.param.cap.type; + + return err; +} + +static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause) +{ + switch (fc) { + case PRESTERA_FC_SYMMETRIC: + *pause = true; + *asym_pause = false; + break; + case PRESTERA_FC_ASYMMETRIC: + *pause = false; + *asym_pause = true; + break; + case PRESTERA_FC_SYMM_ASYMM: + *pause = true; + *asym_pause = true; + break; + default: + *pause = false; + *asym_pause = false; + } +} + +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t dir) +{ + int err; + struct prestera_msg_vtcam_resp resp; + struct prestera_msg_vtcam_create_req req = { + .lookup = lookup, + .direction = dir, + }; + + if (keymask) + memcpy(req.keymask, keymask, sizeof(req.keymask)); + else + memset(req.keymask, 0, sizeof(req.keymask)); + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *vtcam_id = __le32_to_cpu(resp.vtcam_id); + return 0; +} + +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id) +{ + struct prestera_msg_vtcam_destroy_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY, + &req.cmd, sizeof(req)); +} + +static int +prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, + struct prestera_acl_hw_action_info *info) +{ + action->id = __cpu_to_le32(info->id); + + switch (info->id) { + case PRESTERA_ACL_RULE_ACTION_ACCEPT: + case PRESTERA_ACL_RULE_ACTION_DROP: + case PRESTERA_ACL_RULE_ACTION_TRAP: + /* just rule action id, no specific data */ + break; + case PRESTERA_ACL_RULE_ACTION_JUMP: + action->jump.index = __cpu_to_le32(info->jump.index); + break; + case PRESTERA_ACL_RULE_ACTION_POLICE: + action->police.id = __cpu_to_le32(info->police.id); + break; + case PRESTERA_ACL_RULE_ACTION_COUNT: + action->count.id = __cpu_to_le32(info->count.id); + break; + default: + return -EINVAL; + } + + return 0; +} + +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, + u32 vtcam_id, u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id) +{ + struct prestera_msg_acl_action *actions_msg; + struct prestera_msg_vtcam_rule_add_req *req; + struct prestera_msg_vtcam_resp resp; + void *buff; + u32 size; + int err; + u8 i; + + size = sizeof(*req) + sizeof(*actions_msg) * n_act; + + buff = kzalloc(size, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + req = buff; + req->n_act = __cpu_to_le32(n_act); + actions_msg = buff + sizeof(*req); + + /* put acl matches into the message */ + memcpy(req->key, key, sizeof(req->key)); + memcpy(req->keymask, keymask, sizeof(req->keymask)); + + /* put acl actions into the message */ + for (i = 0; i < n_act; i++) { + err = prestera_acl_rule_add_put_action(&actions_msg[i], + &act[i]); + if (err) + goto free_buff; + } + + req->vtcam_id = __cpu_to_le32(vtcam_id); + req->prio = __cpu_to_le32(prio); + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD, + &req->cmd, size, &resp.ret, sizeof(resp)); + if (err) + goto free_buff; + + *rule_id = __le32_to_cpu(resp.rule_id); +free_buff: + kfree(buff); + return err; +} + +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id) +{ + struct prestera_msg_vtcam_rule_del_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .id = __cpu_to_le32(rule_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id) +{ + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type), + .pcl_id = __cpu_to_le16(pcl_id) + }; + + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id) +{ + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type) + }; + + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id) +{ + struct prestera_msg_span_resp resp; + struct prestera_msg_span_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *span_id = resp.id; + + return 0; +} + +int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id, + bool ingress) +{ + struct prestera_msg_span_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .id = span_id, + }; + enum prestera_cmd_type_t cmd_type; + + if (ingress) + cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND; + else + cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND; + + return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req)); + +} + +int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress) +{ + struct prestera_msg_span_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + enum prestera_cmd_type_t cmd_type; + + if (ingress) + cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND; + else + cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND; + + return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req)); +} + +int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id) +{ + struct prestera_msg_span_req req = { + .id = span_id + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *type = resp.param.type; + + return 0; +} + +int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + struct prestera_msg_port_attr_resp resp; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *speed = __le32_to_cpu(resp.param.speed); + + return 0; +} + +int prestera_hw_port_autoneg_restart(struct prestera_port *port) +{ + struct prestera_msg_port_attr_req req = { + .attr = + __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_stats_get(const struct prestera_port *port, + struct prestera_port_stats *st) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + struct prestera_msg_port_stats_resp resp; + __le64 *hw = resp.stats; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + st->good_octets_received = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]); + st->bad_octets_received = + __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]); + st->mac_trans_error = + __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]); + st->broadcast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]); + st->multicast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]); + st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]); + st->frames_65_to_127_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]); + st->frames_128_to_255_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]); + st->frames_256_to_511_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]); + st->frames_512_to_1023_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]); + st->frames_1024_to_max_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]); + st->excessive_collision = + __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]); + st->multicast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]); + st->broadcast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]); + st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]); + st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]); + st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]); + st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]); + st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]); + st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]); + st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]); + st->rx_error_frame_received = + __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]); + st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]); + st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]); + st->late_collision = + __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]); + st->unicast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]); + st->unicast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]); + st->sent_multiple = + __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]); + st->sent_deferred = + __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]); + st->good_octets_sent = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]); + + return 0; +} + +int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .learning = enable, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .flood_ext = { + .type = PRESTERA_PORT_FLOOD_TYPE_UC, + .enable = flood, + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .flood_ext = { + .type = PRESTERA_PORT_FLOOD_TYPE_MC, + .enable = flood, + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_port_br_locked_set(const struct prestera_port *port, + bool br_locked) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .br_locked = br_locked, + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .vid = __cpu_to_le16(vid), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .vid = __cpu_to_le16(vid), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, + bool is_member, bool untagged) +{ + struct prestera_msg_vlan_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), + .is_member = is_member, + .is_tagged = !untagged, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid) +{ + struct prestera_msg_vlan_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state) +{ + struct prestera_msg_stp_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), + .state = state, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, + u16 vid, bool dynamic) +{ + struct prestera_msg_fdb_req req = { + .dest = { + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), + }, + .vid = __cpu_to_le16(vid), + .dynamic = dynamic, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, + u16 vid) +{ + struct prestera_msg_fdb_req req = { + .dest = { + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), + }, + .vid = __cpu_to_le16(vid), + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid, bool dynamic) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = __cpu_to_le16(lag_id), + }, + .vid = __cpu_to_le16(vid), + .dynamic = dynamic, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = __cpu_to_le16(lag_id), + }, + .vid = __cpu_to_le16(vid), + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest = { + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), + }, + .flush_mode = __cpu_to_le32(mode), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest = { + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), + }, + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, + u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = __cpu_to_le16(lag_id), + }, + .flush_mode = __cpu_to_le32(mode), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, + u16 lag_id, u16 vid, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = __cpu_to_le16(lag_id), + }, + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) +{ + struct prestera_msg_bridge_resp resp; + struct prestera_msg_bridge_req req; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE, + &req.cmd, sizeof(req), + &resp.ret, sizeof(resp)); + if (err) + return err; + + *bridge_id = __le16_to_cpu(resp.bridge); + + return 0; +} + +int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = __cpu_to_le16(bridge_id), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = __cpu_to_le16(bridge_id), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) +{ + struct prestera_msg_bridge_req req = { + .bridge = __cpu_to_le16(bridge_id), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE, + &req.cmd, sizeof(req)); +} + +static int prestera_iface_to_msg(struct prestera_iface *iface, + struct prestera_msg_iface *msg_if) +{ + switch (iface->type) { + case PRESTERA_IF_PORT_E: + case PRESTERA_IF_VID_E: + msg_if->port = __cpu_to_le32(iface->dev_port.port_num); + msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num); + break; + case PRESTERA_IF_LAG_E: + msg_if->lag_id = __cpu_to_le16(iface->lag_id); + break; + default: + return -EOPNOTSUPP; + } + + msg_if->vr_id = __cpu_to_le16(iface->vr_id); + msg_if->vid = __cpu_to_le16(iface->vlan_id); + msg_if->type = iface->type; + return 0; +} + +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id) +{ + struct prestera_msg_rif_resp resp; + struct prestera_msg_rif_req req; + int err; + + memcpy(req.mac, mac, ETH_ALEN); + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *rif_id = __le16_to_cpu(resp.rif_id); + return err; +} + +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif) +{ + struct prestera_msg_rif_req req = { + .rif_id = __cpu_to_le16(rif_id), + }; + int err; + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id) +{ + struct prestera_msg_vr_resp resp; + struct prestera_msg_vr_req req; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *vr_id = __le16_to_cpu(resp.vr_id); + return err; +} + +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id) +{ + struct prestera_msg_vr_req req = { + .vr_id = __cpu_to_le16(vr_id), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len, u32 grp_id) +{ + struct prestera_msg_lpm_req req = { + .dst_len = __cpu_to_le32(dst_len), + .vr_id = __cpu_to_le16(vr_id), + .grp_id = __cpu_to_le32(grp_id), + .dst.u.ipv4 = dst + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd, + sizeof(req)); +} + +int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len) +{ + struct prestera_msg_lpm_req req = { + .dst_len = __cpu_to_le32(dst_len), + .vr_id = __cpu_to_le16(vr_id), + .dst.u.ipv4 = dst + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count, + struct prestera_neigh_info *nhs, u32 grp_id) +{ + struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count), + .grp_id = __cpu_to_le32(grp_id) }; + int i, err; + + for (i = 0; i < count; i++) { + req.nh[i].is_active = nhs[i].connected; + memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN); + err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif); + if (err) + return err; + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd, + sizeof(req)); +} + +int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw, + u8 *hw_state, u32 buf_size /* Buffer in bytes */) +{ + static struct prestera_msg_nh_chunk_resp resp; + struct prestera_msg_nh_chunk_req req; + u32 buf_offset; + int err; + + memset(&hw_state[0], 0, buf_size); + buf_offset = 0; + while (1) { + if (buf_offset >= buf_size) + break; + + memset(&req, 0, sizeof(req)); + req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */ + err = prestera_cmd_ret(sw, + PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET, + &req.cmd, sizeof(req), &resp.ret, + sizeof(resp)); + if (err) + return err; + + memcpy(&hw_state[buf_offset], &resp.hw_state[0], + buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ? + buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE); + buf_offset += PRESTERA_MSG_CHUNK_SIZE; + } + + return 0; +} + +int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count, + u32 *grp_id) +{ + struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) }; + struct prestera_msg_nh_grp_resp resp; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *grp_id = __le32_to_cpu(resp.grp_id); + return err; +} + +int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count, + u32 grp_id) +{ + struct prestera_msg_nh_grp_req req = { + .grp_id = __cpu_to_le32(grp_id), + .size = __cpu_to_le32(nh_count) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_rxtx_init(struct prestera_switch *sw, + struct prestera_rxtx_params *params) +{ + struct prestera_msg_rxtx_resp resp; + struct prestera_msg_rxtx_req req; + int err; + + req.use_sdma = params->use_sdma; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + params->map_addr = __le32_to_cpu(resp.map_addr); + + return 0; +} + +int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id) +{ + struct prestera_msg_lag_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id) +{ + struct prestera_msg_lag_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, + bool enable) +{ + struct prestera_msg_lag_req req = { + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), + }; + u32 cmd; + + cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE : + PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE; + + return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req)); +} + +int +prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, + enum prestera_hw_cpu_code_cnt_t counter_type, + u64 *packet_count) +{ + struct prestera_msg_cpu_code_counter_req req = { + .counter_type = counter_type, + .code = code, + }; + struct mvsw_msg_cpu_code_counter_ret resp; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *packet_count = __le64_to_cpu(resp.packet_count); + + return 0; +} + +int prestera_hw_event_handler_register(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn, + void *arg) +{ + struct prestera_fw_event_handler *eh; + + eh = __find_event_handler(sw, type); + if (eh) + return -EEXIST; + + eh = kmalloc(sizeof(*eh), GFP_KERNEL); + if (!eh) + return -ENOMEM; + + eh->type = type; + eh->func = fn; + eh->arg = arg; + + INIT_LIST_HEAD(&eh->list); + + list_add_rcu(&eh->list, &sw->event_handlers); + + return 0; +} + +void prestera_hw_event_handler_unregister(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn) +{ + struct prestera_fw_event_handler *eh; + + eh = __find_event_handler(sw, type); + if (!eh) + return; + + list_del_rcu(&eh->list); + kfree_rcu(eh, rcu); +} + +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_abort(struct prestera_switch *sw) +{ + struct prestera_msg_counter_req req; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats) +{ + struct prestera_msg_counter_resp *resp; + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(idx), + .num_counters = __cpu_to_le32(*len), + }; + size_t size = struct_size(resp, stats, *len); + int err, i; + + resp = kmalloc(size, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET, + &req.cmd, sizeof(req), &resp->ret, size); + if (err) + goto free_buff; + + for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) { + stats[i].packets += __le64_to_cpu(resp->stats[i].packets); + stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes); + } + + *len = __le32_to_cpu(resp->num_counters); + *done = __le32_to_cpu(resp->done); + +free_buff: + kfree(resp); + return err; +} + +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters) +{ + struct prestera_msg_counter_resp resp; + struct prestera_msg_counter_req req = { + .client = __cpu_to_le32(client) + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *block_id = __le32_to_cpu(resp.block_id); + *offset = __le32_to_cpu(resp.offset); + *num_counters = __le32_to_cpu(resp.num_counters); + + return 0; +} + +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id), + .num_counters = __cpu_to_le32(counter_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, + &req.cmd, sizeof(req)); +} + +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id) +{ + struct prestera_msg_policer_resp resp; + struct prestera_msg_policer_req req = { + .type = type + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *policer_id = __le32_to_cpu(resp.id); + return 0; +} + +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id) +{ + struct prestera_msg_policer_req req = { + .id = __cpu_to_le32(policer_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs) +{ + struct prestera_msg_policer_req req = { + .mode = PRESTERA_POLICER_MODE_SR_TCM, + .id = __cpu_to_le32(policer_id), + .sr_tcm = { + .cir = __cpu_to_le64(cir), + .cbs = __cpu_to_le32(cbs) + } + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET, + &req.cmd, sizeof(req)); +} + +int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain) +{ + struct prestera_msg_flood_domain_create_resp resp; + struct prestera_msg_flood_domain_create_req req; + int err; + + err = prestera_cmd_ret(domain->sw, + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd, + sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + domain->idx = __le32_to_cpu(resp.flood_domain_idx); + + return 0; +} + +int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain) +{ + struct prestera_msg_flood_domain_destroy_req req = { + .flood_domain_idx = __cpu_to_le32(domain->idx), + }; + + return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY, + &req.cmd, sizeof(req)); +} + +int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain) +{ + struct prestera_flood_domain_port *flood_domain_port; + struct prestera_msg_flood_domain_ports_set_req *req; + struct prestera_msg_flood_domain_port *ports; + struct prestera_switch *sw = domain->sw; + struct prestera_port *port; + u32 ports_num = 0; + int buf_size; + void *buff; + u16 lag_id; + int err; + + list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list, + flood_domain_port_node) + ports_num++; + + if (!ports_num) + return -EINVAL; + + buf_size = sizeof(*req) + sizeof(*ports) * ports_num; + + buff = kmalloc(buf_size, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + req = buff; + ports = buff + sizeof(*req); + + req->flood_domain_idx = __cpu_to_le32(domain->idx); + req->ports_num = __cpu_to_le32(ports_num); + + list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list, + flood_domain_port_node) { + if (netif_is_lag_master(flood_domain_port->dev)) { + if (prestera_lag_id(sw, flood_domain_port->dev, + &lag_id)) { + kfree(buff); + return -EINVAL; + } + + ports->port_type = + __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG); + ports->lag_id = __cpu_to_le16(lag_id); + } else { + port = prestera_port_dev_lower_find(flood_domain_port->dev); + + ports->port_type = + __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT); + ports->dev_num = __cpu_to_le32(port->dev_id); + ports->port_num = __cpu_to_le32(port->hw_id); + } + + ports->vid = __cpu_to_le16(flood_domain_port->vid); + + ports++; + } + + err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET, + &req->cmd, buf_size); + + kfree(buff); + + return err; +} + +int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain) +{ + struct prestera_msg_flood_domain_ports_reset_req req = { + .flood_domain_idx = __cpu_to_le32(domain->idx), + }; + + return prestera_cmd(domain->sw, + PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd, + sizeof(req)); +} + +int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb) +{ + struct prestera_msg_mdb_create_req req = { + .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx), + .vid = __cpu_to_le16(mdb->vid), + }; + + memcpy(req.mac, mdb->addr, ETH_ALEN); + + return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb) +{ + struct prestera_msg_mdb_destroy_req req = { + .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx), + .vid = __cpu_to_le16(mdb->vid), + }; + + memcpy(req.mac, mdb->addr, ETH_ALEN); + + return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd, + sizeof(req)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h new file mode 100644 index 0000000000..0a929279e1 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_HW_H_ +#define _PRESTERA_HW_H_ + +#include <linux/types.h> +#include "prestera_acl.h" + +enum prestera_accept_frm_type { + PRESTERA_ACCEPT_FRAME_TYPE_TAGGED, + PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED, + PRESTERA_ACCEPT_FRAME_TYPE_ALL, +}; + +enum prestera_fdb_flush_mode { + PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0), + PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1), + PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC + | PRESTERA_FDB_FLUSH_MODE_STATIC, +}; + +enum { + PRESTERA_MAC_MODE_INTERNAL, + PRESTERA_MAC_MODE_SGMII, + PRESTERA_MAC_MODE_1000BASE_X, + PRESTERA_MAC_MODE_KR, + PRESTERA_MAC_MODE_KR2, + PRESTERA_MAC_MODE_KR4, + PRESTERA_MAC_MODE_CR, + PRESTERA_MAC_MODE_CR2, + PRESTERA_MAC_MODE_CR4, + PRESTERA_MAC_MODE_SR_LR, + PRESTERA_MAC_MODE_SR_LR2, + PRESTERA_MAC_MODE_SR_LR4, + + PRESTERA_MAC_MODE_MAX +}; + +enum { + PRESTERA_LINK_MODE_10baseT_Half, + PRESTERA_LINK_MODE_10baseT_Full, + PRESTERA_LINK_MODE_100baseT_Half, + PRESTERA_LINK_MODE_100baseT_Full, + PRESTERA_LINK_MODE_1000baseT_Half, + PRESTERA_LINK_MODE_1000baseT_Full, + PRESTERA_LINK_MODE_1000baseX_Full, + PRESTERA_LINK_MODE_1000baseKX_Full, + PRESTERA_LINK_MODE_2500baseX_Full, + PRESTERA_LINK_MODE_10GbaseKR_Full, + PRESTERA_LINK_MODE_10GbaseSR_Full, + PRESTERA_LINK_MODE_10GbaseLR_Full, + PRESTERA_LINK_MODE_20GbaseKR2_Full, + PRESTERA_LINK_MODE_25GbaseCR_Full, + PRESTERA_LINK_MODE_25GbaseKR_Full, + PRESTERA_LINK_MODE_25GbaseSR_Full, + PRESTERA_LINK_MODE_40GbaseKR4_Full, + PRESTERA_LINK_MODE_40GbaseCR4_Full, + PRESTERA_LINK_MODE_40GbaseSR4_Full, + PRESTERA_LINK_MODE_50GbaseCR2_Full, + PRESTERA_LINK_MODE_50GbaseKR2_Full, + PRESTERA_LINK_MODE_50GbaseSR2_Full, + PRESTERA_LINK_MODE_100GbaseKR4_Full, + PRESTERA_LINK_MODE_100GbaseSR4_Full, + PRESTERA_LINK_MODE_100GbaseCR4_Full, + + PRESTERA_LINK_MODE_MAX +}; + +enum { + PRESTERA_PORT_TYPE_NONE, + PRESTERA_PORT_TYPE_TP, + PRESTERA_PORT_TYPE_AUI, + PRESTERA_PORT_TYPE_MII, + PRESTERA_PORT_TYPE_FIBRE, + PRESTERA_PORT_TYPE_BNC, + PRESTERA_PORT_TYPE_DA, + PRESTERA_PORT_TYPE_OTHER, + + PRESTERA_PORT_TYPE_MAX +}; + +enum { + PRESTERA_PORT_TCVR_COPPER, + PRESTERA_PORT_TCVR_SFP, + + PRESTERA_PORT_TCVR_MAX +}; + +enum { + PRESTERA_PORT_FEC_OFF, + PRESTERA_PORT_FEC_BASER, + PRESTERA_PORT_FEC_RS, + + PRESTERA_PORT_FEC_MAX +}; + +enum { + PRESTERA_PORT_DUPLEX_HALF, + PRESTERA_PORT_DUPLEX_FULL, +}; + +enum { + PRESTERA_STP_DISABLED, + PRESTERA_STP_BLOCK_LISTEN, + PRESTERA_STP_LEARN, + PRESTERA_STP_FORWARD, +}; + +enum { + PRESTERA_POLICER_TYPE_INGRESS, + PRESTERA_POLICER_TYPE_EGRESS +}; + +enum prestera_hw_cpu_code_cnt_t { + PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0, + PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, +}; + +enum prestera_hw_vtcam_direction_t { + PRESTERA_HW_VTCAM_DIR_INGRESS = 0, + PRESTERA_HW_VTCAM_DIR_EGRESS = 1, +}; + +enum { + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0, + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1, + PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2, + PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3, +}; + +struct prestera_switch; +struct prestera_port; +struct prestera_port_stats; +struct prestera_port_caps; +enum prestera_event_type; +struct prestera_event; + +typedef void (*prestera_event_cb_t) + (struct prestera_switch *sw, struct prestera_event *evt, void *arg); + +struct prestera_rxtx_params; +struct prestera_acl_hw_action_info; +struct prestera_acl_iface; +struct prestera_counter_stats; +struct prestera_iface; +struct prestera_flood_domain; +struct prestera_mdb_entry; +struct prestera_neigh_info; + +/* Switch API */ +int prestera_hw_switch_init(struct prestera_switch *sw); +void prestera_hw_switch_fini(struct prestera_switch *sw); +int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms); +int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac); + +/* Port API */ +int prestera_hw_port_info_get(const struct prestera_port *port, + u32 *dev_id, u32 *hw_id, u16 *fp_id); + +int prestera_hw_port_mac_mode_get(const struct prestera_port *port, + u32 *mode, u32 *speed, u8 *duplex, u8 *fec); +int prestera_hw_port_mac_mode_set(const struct prestera_port *port, + bool admin, u32 mode, u8 inband, + u32 speed, u8 duplex, u8 fec); +int prestera_hw_port_phy_mode_get(const struct prestera_port *port, + u8 *mdix, u64 *lmode_bmap, + bool *fc_pause, bool *fc_asym); +int prestera_hw_port_phy_mode_set(const struct prestera_port *port, + bool admin, bool adv, u32 mode, u64 modes, + u8 mdix); + +int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu); +int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu); +int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac); +int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac); +int prestera_hw_port_cap_get(const struct prestera_port *port, + struct prestera_port_caps *caps); +int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type); +int prestera_hw_port_autoneg_restart(struct prestera_port *port); +int prestera_hw_port_stats_get(const struct prestera_port *port, + struct prestera_port_stats *stats); +int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed); +int prestera_hw_port_learning_set(struct prestera_port *port, bool enable); +int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood); +int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood); +int prestera_hw_port_br_locked_set(const struct prestera_port *port, + bool br_locked); +int prestera_hw_port_accept_frm_type(struct prestera_port *port, + enum prestera_accept_frm_type type); +/* Vlan API */ +int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid); +int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid); +int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, + bool is_member, bool untagged); +int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid); +int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state); + +/* FDB API */ +int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, + u16 vid, bool dynamic); +int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, + u16 vid); +int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode); +int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode); +int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode); + +/* Bridge API */ +int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id); +int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id); +int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id); +int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id); + +/* vTCAM API */ +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t direction); +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id, + u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id); +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id); +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id); +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id); +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id); + +/* Counter API */ +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id); +int prestera_hw_counter_abort(struct prestera_switch *sw); +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats); +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters); +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id); +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id); + +/* SPAN API */ +int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id); +int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id, + bool ingress); +int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress); +int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id); + +/* Router API */ +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id); +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif); + +/* Virtual Router API */ +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id); +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id); + +/* LPM PI */ +int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len, u32 grp_id); +int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len); + +/* NH API */ +int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count, + struct prestera_neigh_info *nhs, u32 grp_id); +int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw, + u8 *hw_state, u32 buf_size /* Buffer in bytes */); +int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count, + u32 *grp_id); +int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count, + u32 grp_id); + +/* Event handlers */ +int prestera_hw_event_handler_register(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn, + void *arg); +void prestera_hw_event_handler_unregister(struct prestera_switch *sw, + enum prestera_event_type type, + prestera_event_cb_t fn); + +/* RX/TX */ +int prestera_hw_rxtx_init(struct prestera_switch *sw, + struct prestera_rxtx_params *params); + +/* LAG API */ +int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id); +int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id); +int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, + bool enable); +int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid, bool dynamic); +int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid); +int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, + u32 mode); +int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, + u16 lag_id, u16 vid, u32 mode); + +/* HW trap/drop counters API */ +int +prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, + enum prestera_hw_cpu_code_cnt_t counter_type, + u64 *packet_count); + +/* Policer API */ +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id); +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id); +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs); + +/* Flood domain / MDB API */ +int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain); +int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain); +int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain); +int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain); + +int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb); +int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb); + +#endif /* _PRESTERA_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c new file mode 100644 index 0000000000..4fb886c57c --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -0,0 +1,1525 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/etherdevice.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdev_features.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/if_vlan.h> +#include <linux/phylink.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_span.h" +#include "prestera_rxtx.h" +#include "prestera_devlink.h" +#include "prestera_ethtool.h" +#include "prestera_counter.h" +#include "prestera_switchdev.h" + +#define PRESTERA_MTU_DEFAULT 1536 + +#define PRESTERA_STATS_DELAY_MS 1000 + +#define PRESTERA_MAC_ADDR_NUM_MAX 255 + +static struct workqueue_struct *prestera_wq; +static struct workqueue_struct *prestera_owq; + +void prestera_queue_work(struct work_struct *work) +{ + queue_work(prestera_owq, work); +} + +void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay) +{ + queue_delayed_work(prestera_wq, work, delay); +} + +void prestera_queue_drain(void) +{ + drain_workqueue(prestera_wq); + drain_workqueue(prestera_owq); +} + +int prestera_port_learning_set(struct prestera_port *port, bool learn) +{ + return prestera_hw_port_learning_set(port, learn); +} + +int prestera_port_uc_flood_set(struct prestera_port *port, bool flood) +{ + return prestera_hw_port_uc_flood_set(port, flood); +} + +int prestera_port_mc_flood_set(struct prestera_port *port, bool flood) +{ + return prestera_hw_port_mc_flood_set(port, flood); +} + +int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked) +{ + return prestera_hw_port_br_locked_set(port, br_locked); +} + +int prestera_port_pvid_set(struct prestera_port *port, u16 vid) +{ + enum prestera_accept_frm_type frm_type; + int err; + + frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED; + + if (vid) { + err = prestera_hw_vlan_port_vid_set(port, vid); + if (err) + return err; + + frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL; + } + + err = prestera_hw_port_accept_frm_type(port, frm_type); + if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL) + prestera_hw_vlan_port_vid_set(port, port->pvid); + + port->pvid = vid; + return 0; +} + +struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, + u32 dev_id, u32 hw_id) +{ + struct prestera_port *port = NULL, *tmp; + + read_lock(&sw->port_list_lock); + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) { + port = tmp; + break; + } + } + read_unlock(&sw->port_list_lock); + + return port; +} + +struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id) +{ + struct prestera_port *port = NULL, *tmp; + + read_lock(&sw->port_list_lock); + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->id == id) { + port = tmp; + break; + } + } + read_unlock(&sw->port_list_lock); + + return port; +} + +struct prestera_switch *prestera_switch_get(struct net_device *dev) +{ + struct prestera_port *port; + + port = prestera_port_dev_lower_find(dev); + return port ? port->sw : NULL; +} + +int prestera_port_cfg_mac_read(struct prestera_port *port, + struct prestera_port_mac_config *cfg) +{ + *cfg = port->cfg_mac; + return 0; +} + +int prestera_port_cfg_mac_write(struct prestera_port *port, + struct prestera_port_mac_config *cfg) +{ + int err; + + err = prestera_hw_port_mac_mode_set(port, cfg->admin, + cfg->mode, cfg->inband, cfg->speed, + cfg->duplex, cfg->fec); + if (err) + return err; + + port->cfg_mac = *cfg; + return 0; +} + +static int prestera_port_open(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_mac_config cfg_mac; + int err = 0; + + if (port->phy_link) { + phylink_start(port->phy_link); + } else { + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (!err) { + cfg_mac.admin = true; + err = prestera_port_cfg_mac_write(port, + &cfg_mac); + } + } else { + port->cfg_phy.admin = true; + err = prestera_hw_port_phy_mode_set(port, true, + port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } + } + + netif_start_queue(dev); + + return err; +} + +static int prestera_port_close(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_mac_config cfg_mac; + int err = 0; + + netif_stop_queue(dev); + + if (port->phy_link) { + phylink_stop(port->phy_link); + phylink_disconnect_phy(port->phy_link); + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (!err) { + cfg_mac.admin = false; + prestera_port_cfg_mac_write(port, &cfg_mac); + } + } else { + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (!err) { + cfg_mac.admin = false; + prestera_port_cfg_mac_write(port, &cfg_mac); + } + } else { + port->cfg_phy.admin = false; + err = prestera_hw_port_phy_mode_set(port, false, port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } + } + + return err; +} + +static void +prestera_port_mac_state_cache_read(struct prestera_port *port, + struct prestera_port_mac_state *state) +{ + spin_lock(&port->state_mac_lock); + *state = port->state_mac; + spin_unlock(&port->state_mac_lock); +} + +static void +prestera_port_mac_state_cache_write(struct prestera_port *port, + struct prestera_port_mac_state *state) +{ + spin_lock(&port->state_mac_lock); + port->state_mac = *state; + spin_unlock(&port->state_mac_lock); +} + +static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct prestera_port, phylink_pcs); +} + +static void prestera_mac_config(struct phylink_config *config, + unsigned int an_mode, + const struct phylink_link_state *state) +{ +} + +static void prestera_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct prestera_port *port = netdev_priv(ndev); + struct prestera_port_mac_state state_mac; + + /* Invalidate. Parameters will update on next link event. */ + memset(&state_mac, 0, sizeof(state_mac)); + state_mac.valid = false; + prestera_port_mac_state_cache_write(port, &state_mac); +} + +static void prestera_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ +} + +static struct phylink_pcs * +prestera_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct net_device *dev = to_net_dev(config->dev); + struct prestera_port *port = netdev_priv(dev); + + return &port->phylink_pcs; +} + +static void prestera_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct prestera_port *port = container_of(pcs, struct prestera_port, + phylink_pcs); + struct prestera_port_mac_state smac; + + prestera_port_mac_state_cache_read(port, &smac); + + if (smac.valid) { + state->link = smac.oper ? 1 : 0; + /* AN is completed, when port is up */ + state->an_complete = (smac.oper && port->autoneg) ? 1 : 0; + state->speed = smac.speed; + state->duplex = smac.duplex; + } else { + state->link = 0; + state->an_complete = 0; + } +} + +static int prestera_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct prestera_port *port = prestera_pcs_to_port(pcs); + struct prestera_port_mac_config cfg_mac; + int err; + + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (err) + return err; + + cfg_mac.admin = true; + cfg_mac.fec = PRESTERA_PORT_FEC_OFF; + cfg_mac.inband = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED; + + switch (interface) { + case PHY_INTERFACE_MODE_10GBASER: + cfg_mac.speed = SPEED_10000; + cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR; + break; + case PHY_INTERFACE_MODE_2500BASEX: + cfg_mac.speed = SPEED_2500; + cfg_mac.duplex = DUPLEX_FULL; + cfg_mac.mode = PRESTERA_MAC_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_SGMII: + cfg_mac.mode = PRESTERA_MAC_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_1000BASEX: + default: + cfg_mac.speed = SPEED_1000; + cfg_mac.duplex = DUPLEX_FULL; + cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X; + break; + } + + err = prestera_port_cfg_mac_write(port, &cfg_mac); + if (err) + return err; + + return 0; +} + +static void prestera_pcs_an_restart(struct phylink_pcs *pcs) +{ + /* TODO: add 1000basex AN restart support + * (Currently FW has no support for 1000baseX AN restart, but it will in the future, + * so as for now the function would stay empty.) + */ +} + +static const struct phylink_mac_ops prestera_mac_ops = { + .mac_select_pcs = prestera_mac_select_pcs, + .mac_config = prestera_mac_config, + .mac_link_down = prestera_mac_link_down, + .mac_link_up = prestera_mac_link_up, +}; + +static const struct phylink_pcs_ops prestera_pcs_ops = { + .pcs_get_state = prestera_pcs_get_state, + .pcs_config = prestera_pcs_config, + .pcs_an_restart = prestera_pcs_an_restart, +}; + +static int prestera_port_sfp_bind(struct prestera_port *port) +{ + struct prestera_switch *sw = port->sw; + struct device_node *ports, *node; + struct fwnode_handle *fwnode; + struct phylink *phy_link; + int err; + + if (!sw->np) + return 0; + + of_node_get(sw->np); + ports = of_find_node_by_name(sw->np, "ports"); + + for_each_child_of_node(ports, node) { + int num; + + err = of_property_read_u32(node, "prestera,port-num", &num); + if (err) { + dev_err(sw->dev->dev, + "device node %pOF has no valid reg property: %d\n", + node, err); + goto out; + } + + if (port->fp_id != num) + continue; + + port->phylink_pcs.ops = &prestera_pcs_ops; + port->phylink_pcs.neg_mode = true; + + port->phy_config.dev = &port->dev->dev; + port->phy_config.type = PHYLINK_NETDEV; + + fwnode = of_fwnode_handle(node); + + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phy_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phy_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phy_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phy_config.supported_interfaces); + + port->phy_config.mac_capabilities = + MAC_1000 | MAC_2500FD | MAC_10000FD; + + phy_link = phylink_create(&port->phy_config, fwnode, + PHY_INTERFACE_MODE_INTERNAL, + &prestera_mac_ops); + if (IS_ERR(phy_link)) { + netdev_err(port->dev, "failed to create phylink\n"); + err = PTR_ERR(phy_link); + goto out; + } + + port->phy_link = phy_link; + break; + } + +out: + of_node_put(node); + of_node_put(ports); + return err; +} + +static int prestera_port_sfp_unbind(struct prestera_port *port) +{ + if (port->phy_link) + phylink_destroy(port->phy_link); + + return 0; +} + +static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + return prestera_rxtx_xmit(netdev_priv(dev), skb); +} + +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr) +{ + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* firmware requires that port's MAC address contains first 5 bytes + * of the base MAC address + */ + if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1)) + return -EINVAL; + + return 0; +} + +static int prestera_port_set_mac_address(struct net_device *dev, void *p) +{ + struct prestera_port *port = netdev_priv(dev); + struct sockaddr *addr = p; + int err; + + err = prestera_is_valid_mac_addr(port, addr->sa_data); + if (err) + return err; + + err = prestera_hw_port_mac_set(port, addr->sa_data); + if (err) + return err; + + eth_hw_addr_set(dev, addr->sa_data); + + return 0; +} + +static int prestera_port_change_mtu(struct net_device *dev, int mtu) +{ + struct prestera_port *port = netdev_priv(dev); + int err; + + err = prestera_hw_port_mtu_set(port, mtu); + if (err) + return err; + + dev->mtu = mtu; + + return 0; +} + +static void prestera_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats; + + stats->rx_packets = port_stats->broadcast_frames_received + + port_stats->multicast_frames_received + + port_stats->unicast_frames_received; + + stats->tx_packets = port_stats->broadcast_frames_sent + + port_stats->multicast_frames_sent + + port_stats->unicast_frames_sent; + + stats->rx_bytes = port_stats->good_octets_received; + + stats->tx_bytes = port_stats->good_octets_sent; + + stats->rx_errors = port_stats->rx_error_frame_received; + stats->tx_errors = port_stats->mac_trans_error; + + stats->rx_dropped = port_stats->buffer_overrun; + stats->tx_dropped = 0; + + stats->multicast = port_stats->multicast_frames_received; + stats->collisions = port_stats->excessive_collision; + + stats->rx_crc_errors = port_stats->bad_crc; +} + +static void prestera_port_get_hw_stats(struct prestera_port *port) +{ + prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats); +} + +static void prestera_port_stats_update(struct work_struct *work) +{ + struct prestera_port *port = + container_of(work, struct prestera_port, + cached_hw_stats.caching_dw.work); + + prestera_port_get_hw_stats(port); + + queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw, + msecs_to_jiffies(PRESTERA_STATS_DELAY_MS)); +} + +static int prestera_port_setup_tc(struct net_device *dev, + enum tc_setup_type type, + void *type_data) +{ + struct prestera_port *port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: + return prestera_flow_block_setup(port, type_data); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops prestera_netdev_ops = { + .ndo_open = prestera_port_open, + .ndo_stop = prestera_port_close, + .ndo_start_xmit = prestera_port_xmit, + .ndo_setup_tc = prestera_port_setup_tc, + .ndo_change_mtu = prestera_port_change_mtu, + .ndo_get_stats64 = prestera_port_get_stats64, + .ndo_set_mac_address = prestera_port_set_mac_address, +}; + +int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes) +{ + int err; + + if (port->autoneg && port->adver_link_modes == link_modes) + return 0; + + err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + true, 0, link_modes, + port->cfg_phy.mdix); + if (err) + return err; + + port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); + port->adver_link_modes = link_modes; + port->cfg_phy.mode = 0; + port->autoneg = true; + + return 0; +} + +static void prestera_port_list_add(struct prestera_port *port) +{ + write_lock(&port->sw->port_list_lock); + list_add(&port->list, &port->sw->port_list); + write_unlock(&port->sw->port_list_lock); +} + +static void prestera_port_list_del(struct prestera_port *port) +{ + write_lock(&port->sw->port_list_lock); + list_del(&port->list); + write_unlock(&port->sw->port_list_lock); +} + +static int prestera_port_create(struct prestera_switch *sw, u32 id) +{ + struct prestera_port_mac_config cfg_mac; + struct prestera_port *port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(*port)); + if (!dev) + return -ENOMEM; + + port = netdev_priv(dev); + + INIT_LIST_HEAD(&port->vlans_list); + port->pvid = PRESTERA_DEFAULT_VID; + port->lag = NULL; + port->dev = dev; + port->id = id; + port->sw = sw; + + spin_lock_init(&port->state_mac_lock); + + err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id, + &port->fp_id); + if (err) { + dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id); + goto err_port_info_get; + } + + err = prestera_devlink_port_register(port); + if (err) + goto err_dl_port_register; + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; + dev->netdev_ops = &prestera_netdev_ops; + dev->ethtool_ops = &prestera_ethtool_ops; + SET_NETDEV_DEV(dev, sw->dev->dev); + SET_NETDEV_DEVLINK_PORT(dev, &port->dl_port); + + if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) + netif_carrier_off(dev); + + dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT); + dev->min_mtu = sw->mtu_min; + dev->max_mtu = sw->mtu_max; + + err = prestera_hw_port_mtu_set(port, dev->mtu); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n", + id, dev->mtu); + goto err_port_init; + } + + if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) { + err = -EINVAL; + goto err_port_init; + } + + eth_hw_addr_gen(dev, sw->base_mac, port->fp_id); + /* firmware requires that port's MAC address consist of the first + * 5 bytes of the base MAC address + */ + if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) { + dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id); + dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1); + } + + err = prestera_hw_port_mac_set(port, dev->dev_addr); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id); + goto err_port_init; + } + + err = prestera_hw_port_cap_get(port, &port->caps); + if (err) { + dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id); + goto err_port_init; + } + + port->adver_link_modes = port->caps.supp_link_modes; + port->adver_fec = 0; + port->autoneg = true; + + /* initialize config mac */ + if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { + cfg_mac.admin = true; + cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL; + } else { + cfg_mac.admin = false; + cfg_mac.mode = PRESTERA_MAC_MODE_MAX; + } + cfg_mac.inband = 0; + cfg_mac.speed = 0; + cfg_mac.duplex = DUPLEX_UNKNOWN; + cfg_mac.fec = PRESTERA_PORT_FEC_OFF; + + err = prestera_port_cfg_mac_write(port, &cfg_mac); + if (err) { + dev_err(prestera_dev(sw), + "Failed to set port(%u) mac mode\n", id); + goto err_port_init; + } + + /* initialize config phy (if this is inegral) */ + if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { + port->cfg_phy.mdix = ETH_TP_MDI_AUTO; + port->cfg_phy.admin = false; + err = prestera_hw_port_phy_mode_set(port, + port->cfg_phy.admin, + false, 0, 0, + port->cfg_phy.mdix); + if (err) { + dev_err(prestera_dev(sw), + "Failed to set port(%u) phy mode\n", id); + goto err_port_init; + } + } + + err = prestera_rxtx_port_init(port); + if (err) + goto err_port_init; + + INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw, + &prestera_port_stats_update); + + prestera_port_list_add(port); + + err = register_netdev(dev); + if (err) + goto err_register_netdev; + + err = prestera_port_sfp_bind(port); + if (err) + goto err_sfp_bind; + + return 0; + +err_sfp_bind: + unregister_netdev(dev); +err_register_netdev: + prestera_port_list_del(port); +err_port_init: + prestera_devlink_port_unregister(port); +err_dl_port_register: +err_port_info_get: + free_netdev(dev); + return err; +} + +static void prestera_port_destroy(struct prestera_port *port) +{ + struct net_device *dev = port->dev; + + cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw); + unregister_netdev(dev); + prestera_port_list_del(port); + prestera_devlink_port_unregister(port); + free_netdev(dev); +} + +static void prestera_destroy_ports(struct prestera_switch *sw) +{ + struct prestera_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &sw->port_list, list) + prestera_port_destroy(port); +} + +static int prestera_create_ports(struct prestera_switch *sw) +{ + struct prestera_port *port, *tmp; + u32 port_idx; + int err; + + for (port_idx = 0; port_idx < sw->port_count; port_idx++) { + err = prestera_port_create(sw, port_idx); + if (err) + goto err_port_create; + } + + return 0; + +err_port_create: + list_for_each_entry_safe(port, tmp, &sw->port_list, list) { + prestera_port_sfp_unbind(port); + prestera_port_destroy(port); + } + + return err; +} + +static void prestera_port_handle_event(struct prestera_switch *sw, + struct prestera_event *evt, void *arg) +{ + struct prestera_port_mac_state smac; + struct prestera_port_event *pevt; + struct delayed_work *caching_dw; + struct prestera_port *port; + + if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { + pevt = &evt->port_evt; + port = prestera_find_port(sw, pevt->port_id); + if (!port || !port->dev) + return; + + caching_dw = &port->cached_hw_stats.caching_dw; + + memset(&smac, 0, sizeof(smac)); + smac.valid = true; + smac.oper = pevt->data.mac.oper; + if (smac.oper) { + smac.mode = pevt->data.mac.mode; + smac.speed = pevt->data.mac.speed; + smac.duplex = pevt->data.mac.duplex; + smac.fc = pevt->data.mac.fc; + smac.fec = pevt->data.mac.fec; + } + prestera_port_mac_state_cache_write(port, &smac); + + if (port->state_mac.oper) { + if (port->phy_link) + phylink_mac_change(port->phy_link, true); + else + netif_carrier_on(port->dev); + + if (!delayed_work_pending(caching_dw)) + queue_delayed_work(prestera_wq, caching_dw, 0); + } else { + if (port->phy_link) + phylink_mac_change(port->phy_link, false); + else if (netif_running(port->dev) && netif_carrier_ok(port->dev)) + netif_carrier_off(port->dev); + + if (delayed_work_pending(caching_dw)) + cancel_delayed_work(caching_dw); + } + } +} + +static int prestera_event_handlers_register(struct prestera_switch *sw) +{ + return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT, + prestera_port_handle_event, + NULL); +} + +static void prestera_event_handlers_unregister(struct prestera_switch *sw) +{ + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT, + prestera_port_handle_event); +} + +static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) +{ + int ret; + + if (sw->np) + ret = of_get_mac_address(sw->np, sw->base_mac); + if (!is_valid_ether_addr(sw->base_mac) || ret) { + eth_random_addr(sw->base_mac); + dev_info(prestera_dev(sw), "using random base mac address\n"); + } + + return prestera_hw_switch_mac_set(sw, sw->base_mac); +} + +struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id) +{ + return id < sw->lag_max ? &sw->lags[id] : NULL; +} + +static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw, + struct net_device *dev) +{ + struct prestera_lag *lag; + u16 id; + + for (id = 0; id < sw->lag_max; id++) { + lag = &sw->lags[id]; + if (lag->dev == dev) + return lag; + } + + return NULL; +} + +int prestera_lag_id(struct prestera_switch *sw, + struct net_device *lag_dev, u16 *lag_id) +{ + struct prestera_lag *lag; + int free_id = -1; + int id; + + for (id = 0; id < sw->lag_max; id++) { + lag = prestera_lag_by_id(sw, id); + if (lag->member_count) { + if (lag->dev == lag_dev) { + *lag_id = id; + return 0; + } + } else if (free_id < 0) { + free_id = id; + } + } + if (free_id < 0) + return -ENOSPC; + *lag_id = free_id; + return 0; +} + +static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw, + struct net_device *lag_dev) +{ + struct prestera_lag *lag = NULL; + u16 id; + + for (id = 0; id < sw->lag_max; id++) { + lag = &sw->lags[id]; + if (!lag->dev) + break; + } + if (lag) { + INIT_LIST_HEAD(&lag->members); + lag->dev = lag_dev; + } + + return lag; +} + +static void prestera_lag_destroy(struct prestera_switch *sw, + struct prestera_lag *lag) +{ + WARN_ON(!list_empty(&lag->members)); + lag->member_count = 0; + lag->dev = NULL; +} + +static int prestera_lag_port_add(struct prestera_port *port, + struct net_device *lag_dev) +{ + struct prestera_switch *sw = port->sw; + struct prestera_lag *lag; + int err; + + lag = prestera_lag_by_dev(sw, lag_dev); + if (!lag) { + lag = prestera_lag_create(sw, lag_dev); + if (!lag) + return -ENOSPC; + } + + if (lag->member_count >= sw->lag_member_max) + return -ENOSPC; + + err = prestera_hw_lag_member_add(port, lag->lag_id); + if (err) { + if (!lag->member_count) + prestera_lag_destroy(sw, lag); + return err; + } + + list_add(&port->lag_member, &lag->members); + lag->member_count++; + port->lag = lag; + + return 0; +} + +static int prestera_lag_port_del(struct prestera_port *port) +{ + struct prestera_switch *sw = port->sw; + struct prestera_lag *lag = port->lag; + int err; + + if (!lag || !lag->member_count) + return -EINVAL; + + err = prestera_hw_lag_member_del(port, lag->lag_id); + if (err) + return err; + + list_del(&port->lag_member); + lag->member_count--; + port->lag = NULL; + + if (netif_is_bridge_port(lag->dev)) { + struct net_device *br_dev; + + br_dev = netdev_master_upper_dev_get(lag->dev); + + prestera_bridge_port_leave(br_dev, port); + } + + if (!lag->member_count) + prestera_lag_destroy(sw, lag); + + return 0; +} + +bool prestera_port_is_lag_member(const struct prestera_port *port) +{ + return !!port->lag; +} + +u16 prestera_port_lag_id(const struct prestera_port *port) +{ + return port->lag->lag_id; +} + +static int prestera_lag_init(struct prestera_switch *sw) +{ + u16 id; + + sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL); + if (!sw->lags) + return -ENOMEM; + + for (id = 0; id < sw->lag_max; id++) + sw->lags[id].lag_id = id; + + return 0; +} + +static void prestera_lag_fini(struct prestera_switch *sw) +{ + u8 idx; + + for (idx = 0; idx < sw->lag_max; idx++) + WARN_ON(sw->lags[idx].member_count); + + kfree(sw->lags); +} + +bool prestera_netdev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &prestera_netdev_ops; +} + +static int prestera_lower_dev_walk(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct prestera_port **pport = (struct prestera_port **)priv->data; + + if (prestera_netdev_check(dev)) { + *pport = netdev_priv(dev); + return 1; + } + + return 0; +} + +struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev) +{ + struct prestera_port *port = NULL; + struct netdev_nested_priv priv = { + .data = (void *)&port, + }; + + if (prestera_netdev_check(dev)) + return netdev_priv(dev); + + netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv); + + return port; +} + +static int prestera_netdev_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info = ptr; + struct netdev_lag_lower_state_info *lower_state_info; + struct prestera_port *port = netdev_priv(dev); + bool enabled; + + if (!netif_is_lag_port(dev)) + return 0; + if (!prestera_port_is_lag_member(port)) + return 0; + + lower_state_info = info->lower_state_info; + enabled = lower_state_info->link_up && lower_state_info->tx_enabled; + + return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled); +} + +static bool prestera_lag_master_check(struct net_device *lag_dev, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *ext_ack) +{ + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type"); + return false; + } + + return true; +} + +static int prestera_netdev_port_event(struct net_device *lower, + struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_info *info = ptr; + struct netdev_notifier_changeupper_info *cu_info; + struct prestera_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + struct net_device *upper; + + extack = netdev_notifier_info_to_extack(info); + cu_info = container_of(info, + struct netdev_notifier_changeupper_info, + info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper = cu_info->upper_dev; + if (!netif_is_bridge_master(upper) && + !netif_is_lag_master(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EINVAL; + } + + if (!cu_info->linking) + break; + + if (netdev_has_any_upper_dev(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved"); + return -EINVAL; + } + + if (netif_is_lag_master(upper) && + !prestera_lag_master_check(upper, cu_info->upper_info, extack)) + return -EOPNOTSUPP; + if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Master device is a LAG master and port has a VLAN"); + return -EINVAL; + } + if (netif_is_lag_port(dev) && is_vlan_dev(upper) && + !netif_is_lag_master(vlan_dev_real_dev(upper))) { + NL_SET_ERR_MSG_MOD(extack, + "Can not put a VLAN on a LAG port"); + return -EINVAL; + } + break; + + case NETDEV_CHANGEUPPER: + upper = cu_info->upper_dev; + if (netif_is_bridge_master(upper)) { + if (cu_info->linking) + return prestera_bridge_port_join(upper, port, + extack); + else + prestera_bridge_port_leave(upper, port); + } else if (netif_is_lag_master(upper)) { + if (cu_info->linking) + return prestera_lag_port_add(port, upper); + else + prestera_lag_port_del(port); + } + break; + + case NETDEV_CHANGELOWERSTATE: + return prestera_netdev_port_lower_event(dev, event, ptr); + } + + return 0; +} + +static int prestera_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (prestera_netdev_check(dev)) { + err = prestera_netdev_port_event(lag_dev, dev, event, + ptr); + if (err) + return err; + } + } + + return 0; +} + +static int prestera_netdev_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0; + + if (prestera_netdev_check(dev)) + err = prestera_netdev_port_event(dev, dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = prestera_netdevice_lag_event(dev, event, ptr); + + return notifier_from_errno(err); +} + +struct prestera_mdb_entry * +prestera_mdb_entry_create(struct prestera_switch *sw, + const unsigned char *addr, u16 vid) +{ + struct prestera_flood_domain *flood_domain; + struct prestera_mdb_entry *mdb_entry; + + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + goto err_mdb_alloc; + + flood_domain = prestera_flood_domain_create(sw); + if (!flood_domain) + goto err_flood_domain_create; + + mdb_entry->sw = sw; + mdb_entry->vid = vid; + mdb_entry->flood_domain = flood_domain; + ether_addr_copy(mdb_entry->addr, addr); + + if (prestera_hw_mdb_create(mdb_entry)) + goto err_mdb_hw_create; + + return mdb_entry; + +err_mdb_hw_create: + prestera_flood_domain_destroy(flood_domain); +err_flood_domain_create: + kfree(mdb_entry); +err_mdb_alloc: + return NULL; +} + +void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry) +{ + prestera_hw_mdb_destroy(mdb_entry); + prestera_flood_domain_destroy(mdb_entry->flood_domain); + kfree(mdb_entry); +} + +struct prestera_flood_domain * +prestera_flood_domain_create(struct prestera_switch *sw) +{ + struct prestera_flood_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + + domain->sw = sw; + + if (prestera_hw_flood_domain_create(domain)) { + kfree(domain); + return NULL; + } + + INIT_LIST_HEAD(&domain->flood_domain_port_list); + + return domain; +} + +void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain) +{ + WARN_ON(!list_empty(&flood_domain->flood_domain_port_list)); + WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain)); + kfree(flood_domain); +} + +int +prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain, + struct net_device *dev, + u16 vid) +{ + struct prestera_flood_domain_port *flood_domain_port; + bool is_first_port_in_list = false; + int err; + + flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL); + if (!flood_domain_port) { + err = -ENOMEM; + goto err_port_alloc; + } + + flood_domain_port->vid = vid; + + if (list_empty(&flood_domain->flood_domain_port_list)) + is_first_port_in_list = true; + + list_add(&flood_domain_port->flood_domain_port_node, + &flood_domain->flood_domain_port_list); + + flood_domain_port->flood_domain = flood_domain; + flood_domain_port->dev = dev; + + if (!is_first_port_in_list) { + err = prestera_hw_flood_domain_ports_reset(flood_domain); + if (err) + goto err_prestera_mdb_port_create_hw; + } + + err = prestera_hw_flood_domain_ports_set(flood_domain); + if (err) + goto err_prestera_mdb_port_create_hw; + + return 0; + +err_prestera_mdb_port_create_hw: + list_del(&flood_domain_port->flood_domain_port_node); + kfree(flood_domain_port); +err_port_alloc: + return err; +} + +void +prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port) +{ + struct prestera_flood_domain *flood_domain = port->flood_domain; + + list_del(&port->flood_domain_port_node); + + WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain)); + + if (!list_empty(&flood_domain->flood_domain_port_list)) + WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain)); + + kfree(port); +} + +struct prestera_flood_domain_port * +prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain, + struct net_device *dev, u16 vid) +{ + struct prestera_flood_domain_port *flood_domain_port; + + list_for_each_entry(flood_domain_port, + &flood_domain->flood_domain_port_list, + flood_domain_port_node) + if (flood_domain_port->dev == dev && + vid == flood_domain_port->vid) + return flood_domain_port; + + return NULL; +} + +static int prestera_netdev_event_handler_register(struct prestera_switch *sw) +{ + sw->netdev_nb.notifier_call = prestera_netdev_event_handler; + + return register_netdevice_notifier(&sw->netdev_nb); +} + +static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw) +{ + unregister_netdevice_notifier(&sw->netdev_nb); +} + +static int prestera_switch_init(struct prestera_switch *sw) +{ + int err; + + sw->np = sw->dev->dev->of_node; + + err = prestera_hw_switch_init(sw); + if (err) { + dev_err(prestera_dev(sw), "Failed to init Switch device\n"); + return err; + } + + rwlock_init(&sw->port_list_lock); + INIT_LIST_HEAD(&sw->port_list); + + err = prestera_switch_set_base_mac_addr(sw); + if (err) + return err; + + err = prestera_netdev_event_handler_register(sw); + if (err) + return err; + + err = prestera_router_init(sw); + if (err) + goto err_router_init; + + err = prestera_switchdev_init(sw); + if (err) + goto err_swdev_register; + + err = prestera_rxtx_switch_init(sw); + if (err) + goto err_rxtx_register; + + err = prestera_event_handlers_register(sw); + if (err) + goto err_handlers_register; + + err = prestera_counter_init(sw); + if (err) + goto err_counter_init; + + err = prestera_acl_init(sw); + if (err) + goto err_acl_init; + + err = prestera_span_init(sw); + if (err) + goto err_span_init; + + err = prestera_devlink_traps_register(sw); + if (err) + goto err_dl_register; + + err = prestera_lag_init(sw); + if (err) + goto err_lag_init; + + err = prestera_create_ports(sw); + if (err) + goto err_ports_create; + + prestera_devlink_register(sw); + return 0; + +err_ports_create: + prestera_lag_fini(sw); +err_lag_init: + prestera_devlink_traps_unregister(sw); +err_dl_register: + prestera_span_fini(sw); +err_span_init: + prestera_acl_fini(sw); +err_acl_init: + prestera_counter_fini(sw); +err_counter_init: + prestera_event_handlers_unregister(sw); +err_handlers_register: + prestera_rxtx_switch_fini(sw); +err_rxtx_register: + prestera_switchdev_fini(sw); +err_swdev_register: + prestera_router_fini(sw); +err_router_init: + prestera_netdev_event_handler_unregister(sw); + prestera_hw_switch_fini(sw); + + return err; +} + +static void prestera_switch_fini(struct prestera_switch *sw) +{ + prestera_devlink_unregister(sw); + prestera_destroy_ports(sw); + prestera_lag_fini(sw); + prestera_devlink_traps_unregister(sw); + prestera_span_fini(sw); + prestera_acl_fini(sw); + prestera_counter_fini(sw); + prestera_event_handlers_unregister(sw); + prestera_rxtx_switch_fini(sw); + prestera_switchdev_fini(sw); + prestera_router_fini(sw); + prestera_netdev_event_handler_unregister(sw); + prestera_hw_switch_fini(sw); + of_node_put(sw->np); +} + +int prestera_device_register(struct prestera_device *dev) +{ + struct prestera_switch *sw; + int err; + + sw = prestera_devlink_alloc(dev); + if (!sw) + return -ENOMEM; + + dev->priv = sw; + sw->dev = dev; + + err = prestera_switch_init(sw); + if (err) { + prestera_devlink_free(sw); + return err; + } + + return 0; +} +EXPORT_SYMBOL(prestera_device_register); + +void prestera_device_unregister(struct prestera_device *dev) +{ + struct prestera_switch *sw = dev->priv; + + prestera_switch_fini(sw); + prestera_devlink_free(sw); +} +EXPORT_SYMBOL(prestera_device_unregister); + +static int __init prestera_module_init(void) +{ + prestera_wq = alloc_workqueue("prestera", 0, 0); + if (!prestera_wq) + return -ENOMEM; + + prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0); + if (!prestera_owq) { + destroy_workqueue(prestera_wq); + return -ENOMEM; + } + + return 0; +} + +static void __exit prestera_module_exit(void) +{ + destroy_workqueue(prestera_wq); + destroy_workqueue(prestera_owq); +} + +module_init(prestera_module_init); +module_exit(prestera_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Marvell Prestera switch driver"); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c new file mode 100644 index 0000000000..1da9c1bc1e --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_flow.h" +#include "prestera_flower.h" +#include "prestera_matchall.h" +#include "prestera_span.h" + +static int prestera_mall_prio_check(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + u32 flower_prio_min; + u32 flower_prio_max; + int err; + + err = prestera_flower_prio_get(block, f->common.chain_index, + &flower_prio_min, &flower_prio_max); + if (err == -ENOENT) + /* No flower filters installed on this chain. */ + return 0; + + if (err) { + NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities"); + return err; + } + + if (f->common.prio <= flower_prio_max && !block->ingress) { + NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules"); + return -EOPNOTSUPP; + } + if (f->common.prio >= flower_prio_min && block->ingress) { + NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules"); + return -EOPNOTSUPP; + } + + return 0; +} + +int prestera_mall_prio_get(struct prestera_flow_block *block, + u32 *prio_min, u32 *prio_max) +{ + if (!block->mall.bound) + return -ENOENT; + + *prio_min = block->mall.prio_min; + *prio_max = block->mall.prio_max; + return 0; +} + +static void prestera_mall_prio_update(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + block->mall.prio_min = min(block->mall.prio_min, f->common.prio); + block->mall.prio_max = max(block->mall.prio_max, f->common.prio); +} + +int prestera_mall_replace(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + struct prestera_flow_block_binding *binding; + __be16 protocol = f->common.protocol; + struct flow_action_entry *act; + struct prestera_port *port; + int err; + + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG(f->common.extack, + "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + + if (!prestera_netdev_check(act->dev)) { + NL_SET_ERR_MSG(f->common.extack, + "Only Marvell Prestera port is supported"); + return -EINVAL; + } + if (!tc_cls_can_offload_and_chain0(act->dev, &f->common)) + return -EOPNOTSUPP; + if (act->id != FLOW_ACTION_MIRRED) + return -EOPNOTSUPP; + if (protocol != htons(ETH_P_ALL)) + return -EOPNOTSUPP; + + err = prestera_mall_prio_check(block, f); + if (err) + return err; + + port = netdev_priv(act->dev); + + list_for_each_entry(binding, &block->binding_list, list) { + err = prestera_span_rule_add(binding, port, block->ingress); + if (err == -EEXIST) + return err; + if (err) + goto rollback; + } + + prestera_mall_prio_update(block, f); + + block->mall.bound = true; + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, + &block->binding_list, list) + prestera_span_rule_del(binding, block->ingress); + return err; +} + +void prestera_mall_destroy(struct prestera_flow_block *block) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + prestera_span_rule_del(binding, block->ingress); + + block->mall.prio_min = UINT_MAX; + block->mall.prio_max = 0; + block->mall.bound = false; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h new file mode 100644 index 0000000000..fed08be802 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_MATCHALL_H_ +#define _PRESTERA_MATCHALL_H_ + +#include <net/pkt_cls.h> + +struct prestera_flow_block; + +int prestera_mall_replace(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f); +void prestera_mall_destroy(struct prestera_flow_block *block); +int prestera_mall_prio_get(struct prestera_flow_block *block, + u32 *prio_min, u32 *prio_max); + +#endif /* _PRESTERA_MATCHALL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c new file mode 100644 index 0000000000..35857dc195 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/bitfield.h> +#include <linux/circ_buf.h> +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "prestera.h" + +#define PRESTERA_MSG_MAX_SIZE 1500 + +#define PRESTERA_SUPP_FW_MAJ_VER 4 +#define PRESTERA_SUPP_FW_MIN_VER 1 + +#define PRESTERA_PREV_FW_MAJ_VER 4 +#define PRESTERA_PREV_FW_MIN_VER 0 + +#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img" +#define PRESTERA_FW_ARM64_PATH_FMT "mrvl/prestera/mvsw_prestera_fw_arm64-v%u.%u.img" + +#define PRESTERA_FW_HDR_MAGIC 0x351D9D06 +#define PRESTERA_FW_DL_TIMEOUT_MS 50000 +#define PRESTERA_FW_BLK_SZ 1024 + +#define PRESTERA_FW_VER_MAJ_MUL 1000000 +#define PRESTERA_FW_VER_MIN_MUL 1000 + +#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL) + +#define PRESTERA_FW_VER_MIN(v) \ + (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \ + PRESTERA_FW_VER_MIN_MUL) + +#define PRESTERA_FW_VER_PATCH(v) \ + ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \ + (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL)) + +enum prestera_pci_bar_t { + PRESTERA_PCI_BAR_FW = 2, + PRESTERA_PCI_BAR_PP = 4, +}; + +struct prestera_fw_header { + __be32 magic_number; + __be32 version_value; + u8 reserved[8]; +}; + +struct prestera_ldr_regs { + u32 ldr_ready; + u32 pad1; + + u32 ldr_img_size; + u32 ldr_ctl_flags; + + u32 ldr_buf_offs; + u32 ldr_buf_size; + + u32 ldr_buf_rd; + u32 pad2; + u32 ldr_buf_wr; + + u32 ldr_status; +}; + +#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f) + +#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed + +#define PRESTERA_LDR_STATUS_IMG_DL BIT(0) +#define PRESTERA_LDR_STATUS_START_FW BIT(1) +#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2) +#define PRESTERA_LDR_STATUS_NOMEM BIT(3) + +#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs) +#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg)) + +/* fw loader registers */ +#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready) +#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size) +#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags) +#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size) +#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs) +#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd) +#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr) +#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status) + +#define PRESTERA_LDR_CTL_DL_START BIT(0) + +#define PRESTERA_EVT_QNUM_MAX 4 + +struct prestera_fw_evtq_regs { + u32 rd_idx; + u32 pad1; + u32 wr_idx; + u32 pad2; + u32 offs; + u32 len; +}; + +#define PRESTERA_CMD_QNUM_MAX 4 + +struct prestera_fw_cmdq_regs { + u32 req_ctl; + u32 req_len; + u32 rcv_ctl; + u32 rcv_len; + u32 offs; + u32 len; +}; + +struct prestera_fw_regs { + u32 fw_ready; + u32 cmd_offs; + u32 cmd_len; + u32 cmd_qnum; + u32 evt_offs; + u32 evt_qnum; + + u32 fw_status; + u32 rx_status; + + struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX]; + struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX]; +}; + +#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f) + +#define PRESTERA_FW_READY_MAGIC 0xcafebabe + +/* fw registers */ +#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready) + +#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs) +#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len) +#define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum) +#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs) +#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum) + +#define PRESTERA_CMDQ_REG_OFFSET(q, f) \ + (PRESTERA_FW_REG_OFFSET(cmdq_list) + \ + (q) * sizeof(struct prestera_fw_cmdq_regs) + \ + offsetof(struct prestera_fw_cmdq_regs, f)) + +#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl) +#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len) +#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl) +#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len) +#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs) +#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len) + +#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status) +#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status) + +/* PRESTERA_CMD_REQ_CTL_REG flags */ +#define PRESTERA_CMD_F_REQ_SENT BIT(0) +#define PRESTERA_CMD_F_REPL_RCVD BIT(1) + +/* PRESTERA_CMD_RCV_CTL_REG flags */ +#define PRESTERA_CMD_F_REPL_SENT BIT(0) + +#define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0) + +#define PRESTERA_FW_EVT_CTL_STATUS_ON 0 +#define PRESTERA_FW_EVT_CTL_STATUS_OFF 1 + +#define PRESTERA_EVTQ_REG_OFFSET(q, f) \ + (PRESTERA_FW_REG_OFFSET(evtq_list) + \ + (q) * sizeof(struct prestera_fw_evtq_regs) + \ + offsetof(struct prestera_fw_evtq_regs, f)) + +#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx) +#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx) +#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs) +#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len) + +#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs) +#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg) + +#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000 +#define PRESTERA_FW_READY_WAIT_MS 20000 + +#define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804 +#define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C +#define PRESTERA_DEV_ID_ALDRIN2 0xCC1E +#define PRESTERA_DEV_ID_98DX7312M 0x981F +#define PRESTERA_DEV_ID_98DX3500 0x9820 +#define PRESTERA_DEV_ID_98DX3501 0x9826 +#define PRESTERA_DEV_ID_98DX3510 0x9821 +#define PRESTERA_DEV_ID_98DX3520 0x9822 + +struct prestera_fw_evtq { + u8 __iomem *addr; + size_t len; +}; + +struct prestera_fw_cmdq { + /* serialize access to dev->send_req */ + struct mutex cmd_mtx; + u8 __iomem *addr; + size_t len; +}; + +struct prestera_fw { + struct prestera_fw_rev rev_supp; + const struct firmware *bin; + struct workqueue_struct *wq; + struct prestera_device dev; + struct pci_dev *pci_dev; + u8 __iomem *ldr_regs; + u8 __iomem *ldr_ring_buf; + u32 ldr_buf_len; + u32 ldr_wr_idx; + size_t cmd_mbox_len; + u8 __iomem *cmd_mbox; + struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX]; + u8 cmd_qnum; + struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX]; + u8 evt_qnum; + struct work_struct evt_work; + u8 __iomem *evt_buf; + u8 *evt_msg; +}; + +static int prestera_fw_load(struct prestera_fw *fw); + +static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val) +{ + writel(val, PRESTERA_FW_REG_ADDR(fw, reg)); +} + +static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg) +{ + return readl(PRESTERA_FW_REG_ADDR(fw, reg)); +} + +static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) +{ + return fw->evt_queue[qid].len; +} + +static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) +{ + u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); + u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + + return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); +} + +static void prestera_fw_evtq_rd_set(struct prestera_fw *fw, + u8 qid, u32 idx) +{ + u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); + + prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); +} + +static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) +{ + return fw->evt_queue[qid].addr; +} + +static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid) +{ + u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + u32 val; + + val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx); + prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4); + return val; +} + +static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw, + u8 qid, void *buf, size_t len) +{ + u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid); + u32 *buf32 = buf; + int i; + + for (i = 0; i < len / 4; buf32++, i++) { + *buf32 = readl_relaxed(evtq_addr + idx); + idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1); + } + + prestera_fw_evtq_rd_set(fw, qid, idx); + + return i; +} + +static u8 prestera_fw_evtq_pick(struct prestera_fw *fw) +{ + int qid; + + for (qid = 0; qid < fw->evt_qnum; qid++) { + if (prestera_fw_evtq_avail(fw, qid) >= 4) + return qid; + } + + return PRESTERA_EVT_QNUM_MAX; +} + +static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val) +{ + u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG); + + u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK); + + prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status); +} + +static void prestera_fw_evt_work_fn(struct work_struct *work) +{ + struct prestera_fw *fw; + void *msg; + u8 qid; + + fw = container_of(work, struct prestera_fw, evt_work); + msg = fw->evt_msg; + + prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF); + + while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) { + u32 idx; + u32 len; + + len = prestera_fw_evtq_read32(fw, qid); + idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); + + WARN_ON(prestera_fw_evtq_avail(fw, qid) < len); + + if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) { + prestera_fw_evtq_rd_set(fw, qid, idx + len); + continue; + } + + prestera_fw_evtq_read_buf(fw, qid, msg, len); + + if (fw->dev.recv_msg) + fw->dev.recv_msg(&fw->dev, msg, len); + } + + prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON); +} + +static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, + unsigned int waitms) +{ + u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg); + u32 val; + + return readl_poll_timeout(addr, val, cmp == val, + 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); +} + +static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid) +{ + mutex_lock(&fw->cmd_queue[qid].cmd_mtx); +} + +static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid) +{ + mutex_unlock(&fw->cmd_queue[qid].cmd_mtx); +} + +static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid) +{ + return fw->cmd_queue[qid].len; +} + +static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid) +{ + return fw->cmd_queue[qid].addr; +} + +static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid, + void *in_msg, size_t in_size, + void *out_msg, size_t out_size, + unsigned int waitms) +{ + u32 ret_size; + int err; + + if (!waitms) + waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS; + + if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid)) + return -EMSGSIZE; + + /* wait for finish previous reply from FW */ + err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30); + if (err) { + dev_err(fw->dev.dev, "finish reply from FW is timed out\n"); + return err; + } + + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size); + + memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size); + + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), + PRESTERA_CMD_F_REQ_SENT); + + /* wait for reply from FW */ + err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), + PRESTERA_CMD_F_REPL_SENT, waitms); + if (err) { + dev_err(fw->dev.dev, "reply from FW is timed out\n"); + goto cmd_exit; + } + + ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid)); + if (ret_size > out_size) { + dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n", + ret_size, out_size); + err = -EMSGSIZE; + goto cmd_exit; + } + + memcpy_fromio(out_msg, + prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size); + +cmd_exit: + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), + PRESTERA_CMD_F_REPL_RCVD); + return err; +} + +static int prestera_fw_send_req(struct prestera_device *dev, int qid, + void *in_msg, size_t in_size, void *out_msg, + size_t out_size, unsigned int waitms) +{ + struct prestera_fw *fw; + ssize_t ret; + + fw = container_of(dev, struct prestera_fw, dev); + + prestera_fw_cmdq_lock(fw, qid); + ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size, + waitms); + prestera_fw_cmdq_unlock(fw, qid); + + return ret; +} + +static int prestera_fw_init(struct prestera_fw *fw) +{ + u8 __iomem *base; + int err; + u8 qid; + + fw->dev.send_req = prestera_fw_send_req; + fw->ldr_regs = fw->dev.ctl_regs; + + err = prestera_fw_load(fw); + if (err) + return err; + + err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG, + PRESTERA_FW_READY_MAGIC, + PRESTERA_FW_READY_WAIT_MS); + if (err) { + dev_err(fw->dev.dev, "FW failed to start\n"); + return err; + } + + base = fw->dev.ctl_regs; + + fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG); + fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG); + fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG); + + for (qid = 0; qid < fw->cmd_qnum; qid++) { + u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid)); + struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid]; + + cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid)); + cmdq->addr = fw->cmd_mbox + offs; + mutex_init(&cmdq->cmd_mtx); + } + + fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG); + fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG); + fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL); + if (!fw->evt_msg) + return -ENOMEM; + + for (qid = 0; qid < fw->evt_qnum; qid++) { + u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid)); + struct prestera_fw_evtq *evtq = &fw->evt_queue[qid]; + + evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid)); + evtq->addr = fw->evt_buf + offs; + } + + return 0; +} + +static void prestera_fw_uninit(struct prestera_fw *fw) +{ + kfree(fw->evt_msg); +} + +static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id) +{ + struct prestera_fw *fw = dev_id; + + if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) { + prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0); + + if (fw->dev.recv_pkt) + fw->dev.recv_pkt(&fw->dev); + } + + queue_work(fw->wq, &fw->evt_work); + + return IRQ_HANDLED; +} + +static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val) +{ + writel(val, PRESTERA_LDR_REG_ADDR(fw, reg)); +} + +static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg) +{ + return readl(PRESTERA_LDR_REG_ADDR(fw, reg)); +} + +static int prestera_ldr_wait_reg32(struct prestera_fw *fw, + u32 reg, u32 cmp, unsigned int waitms) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg); + u32 val; + + return readl_poll_timeout(addr, val, cmp == val, + 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); +} + +static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG); + u32 buf_len = fw->ldr_buf_len; + u32 wr_idx = fw->ldr_wr_idx; + u32 rd_idx; + + return readl_poll_timeout(addr, rd_idx, + CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len, + 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC); +} + +static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw) +{ + u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG); + unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL); + u32 val; + int err; + + err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC, + PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC); + if (err) { + dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]", + prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG)); + return err; + } + + return 0; +} + +static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n) +{ + fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1); +} + +static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw) +{ + prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx); +} + +static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw) +{ + return fw->ldr_ring_buf + fw->ldr_wr_idx; +} + +static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len) +{ + int err; + int i; + + err = prestera_ldr_wait_buf(fw, len); + if (err) { + dev_err(fw->dev.dev, "failed wait for sending firmware\n"); + return err; + } + + for (i = 0; i < len; i += 4) { + writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw)); + prestera_ldr_wr_idx_move(fw, 4); + } + + prestera_ldr_wr_idx_commit(fw); + return 0; +} + +static int prestera_ldr_fw_send(struct prestera_fw *fw, + const char *img, u32 fw_size) +{ + u32 status; + u32 pos; + int err; + + err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG, + PRESTERA_LDR_STATUS_IMG_DL, + 5 * MSEC_PER_SEC); + if (err) { + dev_err(fw->dev.dev, "Loader is not ready to load image\n"); + return err; + } + + for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) { + if (pos + PRESTERA_FW_BLK_SZ > fw_size) + break; + + err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ); + if (err) + return err; + } + + if (pos < fw_size) { + err = prestera_ldr_send(fw, img + pos, fw_size - pos); + if (err) + return err; + } + + err = prestera_ldr_wait_dl_finish(fw); + if (err) + return err; + + status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG); + + switch (status) { + case PRESTERA_LDR_STATUS_INVALID_IMG: + dev_err(fw->dev.dev, "FW img has bad CRC\n"); + return -EINVAL; + case PRESTERA_LDR_STATUS_NOMEM: + dev_err(fw->dev.dev, "Loader has no enough mem\n"); + return -ENOMEM; + } + + return 0; +} + +static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, + struct prestera_fw_rev *rev) +{ + u32 version = be32_to_cpu(hdr->version_value); + + rev->maj = PRESTERA_FW_VER_MAJ(version); + rev->min = PRESTERA_FW_VER_MIN(version); + rev->sub = PRESTERA_FW_VER_PATCH(version); +} + +static int prestera_fw_rev_check(struct prestera_fw *fw) +{ + struct prestera_fw_rev *rev = &fw->dev.fw_rev; + + if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min) + return 0; + + dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'", + fw->rev_supp.maj, fw->rev_supp.min); + + return -EINVAL; +} + +static int prestera_fw_hdr_parse(struct prestera_fw *fw) +{ + struct prestera_fw_rev *rev = &fw->dev.fw_rev; + struct prestera_fw_header *hdr; + u32 magic; + + hdr = (struct prestera_fw_header *)fw->bin->data; + + magic = be32_to_cpu(hdr->magic_number); + if (magic != PRESTERA_FW_HDR_MAGIC) { + dev_err(fw->dev.dev, "FW img hdr magic is invalid"); + return -EINVAL; + } + + prestera_fw_rev_parse(hdr, rev); + + dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n", + rev->maj, rev->min, rev->sub); + + return prestera_fw_rev_check(fw); +} + +static const char *prestera_fw_path_fmt_get(struct prestera_fw *fw) +{ + switch (fw->pci_dev->device) { + case PRESTERA_DEV_ID_98DX3500: + case PRESTERA_DEV_ID_98DX3501: + case PRESTERA_DEV_ID_98DX3510: + case PRESTERA_DEV_ID_98DX3520: + return PRESTERA_FW_ARM64_PATH_FMT; + + default: + return PRESTERA_FW_PATH_FMT; + } +} + +static int prestera_fw_get(struct prestera_fw *fw) +{ + int ver_maj = PRESTERA_SUPP_FW_MAJ_VER; + int ver_min = PRESTERA_SUPP_FW_MIN_VER; + char fw_path[128]; + int err; + +pick_fw_ver: + snprintf(fw_path, sizeof(fw_path), prestera_fw_path_fmt_get(fw), + ver_maj, ver_min); + + err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); + if (err) { + if (ver_maj != PRESTERA_PREV_FW_MAJ_VER || + ver_min != PRESTERA_PREV_FW_MIN_VER) { + ver_maj = PRESTERA_PREV_FW_MAJ_VER; + ver_min = PRESTERA_PREV_FW_MIN_VER; + + dev_warn(fw->dev.dev, + "missing latest %s firmware, fall-back to previous %u.%u version\n", + fw_path, ver_maj, ver_min); + + goto pick_fw_ver; + } else { + dev_err(fw->dev.dev, "failed to request previous firmware: %s\n", + fw_path); + return err; + } + } + + dev_info(fw->dev.dev, "Loading %s ...", fw_path); + + fw->rev_supp.maj = ver_maj; + fw->rev_supp.min = ver_min; + fw->rev_supp.sub = 0; + + return 0; +} + +static void prestera_fw_put(struct prestera_fw *fw) +{ + release_firmware(fw->bin); +} + +static int prestera_fw_load(struct prestera_fw *fw) +{ + size_t hlen = sizeof(struct prestera_fw_header); + int err; + + err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG, + PRESTERA_LDR_READY_MAGIC, + 5 * MSEC_PER_SEC); + if (err) { + dev_err(fw->dev.dev, "waiting for FW loader is timed out"); + return err; + } + + fw->ldr_ring_buf = fw->ldr_regs + + prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG); + + fw->ldr_buf_len = + prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG); + + fw->ldr_wr_idx = 0; + + err = prestera_fw_get(fw); + if (err) + return err; + + err = prestera_fw_hdr_parse(fw); + if (err) { + dev_err(fw->dev.dev, "FW image header is invalid\n"); + goto out_release; + } + + prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen); + prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START); + + err = prestera_ldr_fw_send(fw, fw->bin->data + hlen, + fw->bin->size - hlen); + +out_release: + prestera_fw_put(fw); + return err; +} + +static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PRESTERA_DEV_ID_98DX7312M: + case PRESTERA_DEV_ID_98DX3500: + case PRESTERA_DEV_ID_98DX3501: + case PRESTERA_DEV_ID_98DX3510: + case PRESTERA_DEV_ID_98DX3520: + return true; + + default: + return false; + } +} + +static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev) +{ + if (pci_resource_len(pdev, 2) == 0x1000000) + return 0x0; + else + return (pci_resource_len(pdev, 2) / 2); +} + +static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev) +{ + if (pci_resource_len(pdev, 2) == 0x1000000) + return 0x400000; + else + return 0x0; +} + +static int prestera_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const char *driver_name = dev_driver_string(&pdev->dev); + u8 __iomem *mem_addr, *pp_addr = NULL; + struct prestera_fw *fw; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30)); + if (err) { + dev_err(&pdev->dev, "fail to set DMA mask\n"); + goto err_dma_mask; + } + + mem_addr = pcim_iomap(pdev, 2, 0); + if (!mem_addr) { + dev_err(&pdev->dev, "pci mem ioremap failed\n"); + err = -EIO; + goto err_mem_ioremap; + } + + /* AC5X devices use second half of BAR2 */ + if (prestera_pci_pp_use_bar2(pdev)) { + pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev); + mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev); + } else { + pp_addr = pcim_iomap(pdev, 4, 0); + if (!pp_addr) { + dev_err(&pdev->dev, "pp regs ioremap failed\n"); + err = -EIO; + goto err_pp_ioremap; + } + } + + pci_set_master(pdev); + + fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) { + err = -ENOMEM; + goto err_pci_dev_alloc; + } + + fw->pci_dev = pdev; + fw->dev.ctl_regs = mem_addr; + fw->dev.pp_regs = pp_addr; + fw->dev.dev = &pdev->dev; + + pci_set_drvdata(pdev, fw); + + err = prestera_fw_init(fw); + if (err) + goto err_prestera_fw_init; + + dev_info(fw->dev.dev, "Prestera FW is ready\n"); + + fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1); + if (!fw->wq) { + err = -ENOMEM; + goto err_wq_alloc; + } + + INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn); + + err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (err < 0) { + dev_err(&pdev->dev, "MSI IRQ init failed\n"); + goto err_irq_alloc; + } + + err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler, + 0, driver_name, fw); + if (err) { + dev_err(&pdev->dev, "fail to request IRQ\n"); + goto err_request_irq; + } + + err = prestera_device_register(&fw->dev); + if (err) + goto err_prestera_dev_register; + + return 0; + +err_prestera_dev_register: + free_irq(pci_irq_vector(pdev, 0), fw); +err_request_irq: + pci_free_irq_vectors(pdev); +err_irq_alloc: + destroy_workqueue(fw->wq); +err_wq_alloc: + prestera_fw_uninit(fw); +err_prestera_fw_init: +err_pci_dev_alloc: +err_pp_ioremap: +err_mem_ioremap: +err_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: +err_pci_enable_device: + return err; +} + +static void prestera_pci_remove(struct pci_dev *pdev) +{ + struct prestera_fw *fw = pci_get_drvdata(pdev); + + prestera_device_unregister(&fw->dev); + free_irq(pci_irq_vector(pdev, 0), fw); + pci_free_irq_vectors(pdev); + destroy_workqueue(fw->wq); + prestera_fw_uninit(fw); + pci_release_regions(pdev); +} + +static const struct pci_device_id prestera_pci_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_55) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_65) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_ALDRIN2) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX7312M) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3500) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3501) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3510) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3520) }, + { } +}; +MODULE_DEVICE_TABLE(pci, prestera_pci_devices); + +static struct pci_driver prestera_pci_driver = { + .name = "Prestera DX", + .id_table = prestera_pci_devices, + .probe = prestera_pci_probe, + .remove = prestera_pci_remove, +}; +module_pci_driver(prestera_pci_driver); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Marvell Prestera switch PCI interface"); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c new file mode 100644 index 0000000000..de317179a7 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -0,0 +1,1645 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/inetdevice.h> +#include <net/inet_dscp.h> +#include <net/switchdev.h> +#include <linux/rhashtable.h> +#include <net/nexthop.h> +#include <net/arp.h> +#include <linux/if_vlan.h> +#include <linux/if_macvlan.h> +#include <net/netevent.h> + +#include "prestera.h" +#include "prestera_router_hw.h" + +#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH +#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */ + +struct prestera_kern_neigh_cache_key { + struct prestera_ip_addr addr; + struct net_device *dev; +}; + +struct prestera_kern_neigh_cache { + struct prestera_kern_neigh_cache_key key; + struct rhash_head ht_node; + struct list_head kern_fib_cache_list; + /* Hold prepared nh_neigh info if is in_kernel */ + struct prestera_neigh_info nh_neigh_info; + /* Indicate if neighbour is reachable by direct route */ + bool reachable; + /* Lock cache if neigh is present in kernel */ + bool in_kernel; +}; + +struct prestera_kern_fib_cache_key { + struct prestera_ip_addr addr; + u32 prefix_len; + u32 kern_tb_id; /* tb_id from kernel (not fixed) */ +}; + +/* Subscribing on neighbours in kernel */ +struct prestera_kern_fib_cache { + struct prestera_kern_fib_cache_key key; + struct { + struct prestera_fib_key fib_key; + enum prestera_fib_type fib_type; + struct prestera_nexthop_group_key nh_grp_key; + } lpm_info; /* hold prepared lpm info */ + /* Indicate if route is not overlapped by another table */ + struct rhash_head ht_node; /* node of prestera_router */ + struct prestera_kern_neigh_cache_head { + struct prestera_kern_fib_cache *this; + struct list_head head; + struct prestera_kern_neigh_cache *n_cache; + } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX]; + union { + struct fib_notifier_info info; /* point to any of 4/6 */ + struct fib_entry_notifier_info fen4_info; + }; + bool reachable; +}; + +static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = { + .key_offset = offsetof(struct prestera_kern_neigh_cache, key), + .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node), + .key_len = sizeof(struct prestera_kern_neigh_cache_key), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = { + .key_offset = offsetof(struct prestera_kern_fib_cache, key), + .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node), + .key_len = sizeof(struct prestera_kern_fib_cache_key), + .automatic_shrinking = true, +}; + +/* This util to be used, to convert kernel rules for default vr in hw_vr */ +static u32 prestera_fix_tb_id(u32 tb_id) +{ + if (tb_id == RT_TABLE_UNSPEC || + tb_id == RT_TABLE_LOCAL || + tb_id == RT_TABLE_DEFAULT) + tb_id = RT_TABLE_MAIN; + + return tb_id; +} + +static void +prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info, + struct prestera_kern_fib_cache_key *key) +{ + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + + memset(key, 0, sizeof(*key)); + key->addr.v = PRESTERA_IPV4; + key->addr.u.ipv4 = cpu_to_be32(fen_info->dst); + key->prefix_len = fen_info->dst_len; + key->kern_tb_id = fen_info->tb_id; +} + +static int prestera_util_nhc2nc_key(struct prestera_switch *sw, + struct fib_nh_common *nhc, + struct prestera_kern_neigh_cache_key *nk) +{ + memset(nk, 0, sizeof(*nk)); + if (nhc->nhc_gw_family == AF_INET) { + nk->addr.v = PRESTERA_IPV4; + nk->addr.u.ipv4 = nhc->nhc_gw.ipv4; + } else { + nk->addr.v = PRESTERA_IPV6; + nk->addr.u.ipv6 = nhc->nhc_gw.ipv6; + } + + nk->dev = nhc->nhc_dev; + return 0; +} + +static void +prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck, + struct prestera_nh_neigh_key *nk) +{ + memset(nk, 0, sizeof(*nk)); + nk->addr = ck->addr; + nk->rif = (void *)ck->dev; +} + +static bool +prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw, + struct fib_nh_common *nhc, + struct prestera_kern_neigh_cache_key *nk) +{ + struct prestera_kern_neigh_cache_key tk; + int err; + + err = prestera_util_nhc2nc_key(sw, nhc, &tk); + if (err) + return false; + + if (memcmp(&tk, nk, sizeof(tk))) + return false; + + return true; +} + +static int +prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n, + struct prestera_kern_neigh_cache_key *key) +{ + memset(key, 0, sizeof(*key)); + if (n->tbl->family == AF_INET) { + key->addr.v = PRESTERA_IPV4; + key->addr.u.ipv4 = *(__be32 *)n->primary_key; + } else { + return -ENOENT; + } + + key->dev = n->dev; + + return 0; +} + +static bool __prestera_fi_is_direct(struct fib_info *fi) +{ + struct fib_nh_common *fib_nhc; + + if (fib_info_num_path(fi) == 1) { + fib_nhc = fib_info_nhc(fi, 0); + if (fib_nhc->nhc_gw_family == AF_UNSPEC) + return true; + } + + return false; +} + +static bool prestera_fi_is_direct(struct fib_info *fi) +{ + if (fi->fib_type != RTN_UNICAST) + return false; + + return __prestera_fi_is_direct(fi); +} + +static bool prestera_fi_is_nh(struct fib_info *fi) +{ + if (fi->fib_type != RTN_UNICAST) + return false; + + return !__prestera_fi_is_direct(fi); +} + +static bool __prestera_fi6_is_direct(struct fib6_info *fi) +{ + if (!fi->fib6_nh->nh_common.nhc_gw_family) + return true; + + return false; +} + +static bool prestera_fi6_is_direct(struct fib6_info *fi) +{ + if (fi->fib6_type != RTN_UNICAST) + return false; + + return __prestera_fi6_is_direct(fi); +} + +static bool prestera_fi6_is_nh(struct fib6_info *fi) +{ + if (fi->fib6_type != RTN_UNICAST) + return false; + + return !__prestera_fi6_is_direct(fi); +} + +static bool prestera_fib_info_is_direct(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info = + container_of(info, struct fib6_entry_notifier_info, info); + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + + if (info->family == AF_INET) + return prestera_fi_is_direct(fen_info->fi); + else + return prestera_fi6_is_direct(fen6_info->rt); +} + +static bool prestera_fib_info_is_nh(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info = + container_of(info, struct fib6_entry_notifier_info, info); + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + + if (info->family == AF_INET) + return prestera_fi_is_nh(fen_info->fi); + else + return prestera_fi6_is_nh(fen6_info->rt); +} + +/* must be called with rcu_read_lock() */ +static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id, + __be32 *addr) +{ + struct flowi4 fl4; + + /* TODO: walkthrough appropriate tables in kernel + * to know if the same prefix exists in several tables + */ + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = *addr; + return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */); +} + +static bool +__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr, + struct net_device *dev) +{ + struct fib_nh_common *fib_nhc; + struct fib_result res; + bool reachable; + + reachable = false; + + if (!prestera_util_kern_get_route(&res, tb_id, addr)) + if (prestera_fi_is_direct(res.fi)) { + fib_nhc = fib_info_nhc(res.fi, 0); + if (dev == fib_nhc->nhc_dev) + reachable = true; + } + + return reachable; +} + +/* Check if neigh route is reachable */ +static bool +prestera_util_kern_n_is_reachable(u32 tb_id, + struct prestera_ip_addr *addr, + struct net_device *dev) +{ + if (addr->v == PRESTERA_IPV4) + return __prestera_util_kern_n_is_reachable_v4(tb_id, + &addr->u.ipv4, + dev); + else + return false; +} + +static void prestera_util_kern_set_neigh_offload(struct neighbour *n, + bool offloaded) +{ + if (offloaded) + n->flags |= NTF_OFFLOADED; + else + n->flags &= ~NTF_OFFLOADED; +} + +static void +prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap) +{ + if (offloaded) + nhc->nhc_flags |= RTNH_F_OFFLOAD; + else + nhc->nhc_flags &= ~RTNH_F_OFFLOAD; + + if (trap) + nhc->nhc_flags |= RTNH_F_TRAP; + else + nhc->nhc_flags &= ~RTNH_F_TRAP; +} + +static struct fib_nh_common * +prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + struct fib6_info *iter; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return fib_info_nhc(fen4_info->fi, n); + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + if (!n) + return &fen6_info->rt->fib6_nh->nh_common; + + list_for_each_entry(iter, &fen6_info->rt->fib6_siblings, + fib6_siblings) { + if (!--n) + return &iter->fib6_nh->nh_common; + } + } + + /* if family is incorrect - than upper functions has BUG */ + /* if doesn't find requested index - there is alsi bug, because + * valid index must be produced by nhs, which checks list length + */ + WARN(1, "Invalid parameters passed to %s n=%d i=%p", + __func__, n, info); + return NULL; +} + +static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return fib_info_num_path(fen4_info->fi); + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + return fen6_info->rt->fib6_nsiblings + 1; + } + + return 0; +} + +static unsigned char +prestera_kern_fib_info_type(struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen6_info; + struct fib_entry_notifier_info *fen4_info; + + if (info->family == AF_INET) { + fen4_info = container_of(info, struct fib_entry_notifier_info, + info); + return fen4_info->fi->fib_type; + } else if (info->family == AF_INET6) { + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + /* TODO: ECMP in ipv6 is several routes. + * Every route has single nh. + */ + return fen6_info->rt->fib6_type; + } + + return RTN_UNSPEC; +} + +/* Decided, that uc_nh route with key==nh is obviously neighbour route */ +static bool +prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node) +{ + if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH) + return false; + + if (fib_node->info.nh_grp->nh_neigh_head[1].neigh) + return false; + + if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh) + return false; + + if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr, + &fib_node->key.addr, sizeof(struct prestera_ip_addr))) + return false; + + return true; +} + +static int prestera_dev_if_type(const struct net_device *dev) +{ + struct macvlan_dev *vlan; + + if (is_vlan_dev(dev) && + netif_is_bridge_master(vlan_dev_real_dev(dev))) { + return PRESTERA_IF_VID_E; + } else if (netif_is_bridge_master(dev)) { + return PRESTERA_IF_VID_E; + } else if (netif_is_lag_master(dev)) { + return PRESTERA_IF_LAG_E; + } else if (netif_is_macvlan(dev)) { + vlan = netdev_priv(dev); + return prestera_dev_if_type(vlan->lowerdev); + } else { + return PRESTERA_IF_PORT_E; + } +} + +static int +prestera_neigh_iface_init(struct prestera_switch *sw, + struct prestera_iface *iface, + struct neighbour *n) +{ + struct prestera_port *port; + + iface->vlan_id = 0; /* TODO: vlan egress */ + iface->type = prestera_dev_if_type(n->dev); + if (iface->type != PRESTERA_IF_PORT_E) + return -EINVAL; + + if (!prestera_netdev_check(n->dev)) + return -EINVAL; + + port = netdev_priv(n->dev); + iface->dev_port.hw_dev_num = port->dev_id; + iface->dev_port.port_num = port->hw_id; + + return 0; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_find(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + + n_cache = + rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key, + __prestera_kern_neigh_cache_ht_params); + return n_cache; +} + +static void +__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + dev_put(n_cache->key.dev); +} + +static void +__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht, + &n_cache->ht_node, + __prestera_kern_neigh_cache_ht_params); + __prestera_kern_neigh_cache_destruct(sw, n_cache); + kfree(n_cache); +} + +static struct prestera_kern_neigh_cache * +__prestera_kern_neigh_cache_create(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + int err; + + n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL); + if (!n_cache) + goto err_kzalloc; + + memcpy(&n_cache->key, key, sizeof(*key)); + dev_hold(n_cache->key.dev); + + INIT_LIST_HEAD(&n_cache->kern_fib_cache_list); + err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht, + &n_cache->ht_node, + __prestera_kern_neigh_cache_ht_params); + if (err) + goto err_ht_insert; + + return n_cache; + +err_ht_insert: + dev_put(n_cache->key.dev); + kfree(n_cache); +err_kzalloc: + return NULL; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_get(struct prestera_switch *sw, + struct prestera_kern_neigh_cache_key *key) +{ + struct prestera_kern_neigh_cache *n_cache; + + n_cache = prestera_kern_neigh_cache_find(sw, key); + if (!n_cache) + n_cache = __prestera_kern_neigh_cache_create(sw, key); + + return n_cache; +} + +static struct prestera_kern_neigh_cache * +prestera_kern_neigh_cache_put(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache) +{ + if (!n_cache->in_kernel && + list_empty(&n_cache->kern_fib_cache_list)) { + __prestera_kern_neigh_cache_destroy(sw, n_cache); + return NULL; + } + + return n_cache; +} + +static struct prestera_kern_fib_cache * +prestera_kern_fib_cache_find(struct prestera_switch *sw, + struct prestera_kern_fib_cache_key *key) +{ + struct prestera_kern_fib_cache *fib_cache; + + fib_cache = + rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key, + __prestera_kern_fib_cache_ht_params); + return fib_cache; +} + +static void +__prestera_kern_fib_cache_destruct(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fib_cache) +{ + struct prestera_kern_neigh_cache *n_cache; + int i; + + for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) { + n_cache = fib_cache->kern_neigh_cache_head[i].n_cache; + if (n_cache) { + list_del(&fib_cache->kern_neigh_cache_head[i].head); + prestera_kern_neigh_cache_put(sw, n_cache); + } + } + + fib_info_put(fib_cache->fen4_info.fi); +} + +static void +prestera_kern_fib_cache_destroy(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fib_cache) +{ + rhashtable_remove_fast(&sw->router->kern_fib_cache_ht, + &fib_cache->ht_node, + __prestera_kern_fib_cache_ht_params); + __prestera_kern_fib_cache_destruct(sw, fib_cache); + kfree(fib_cache); +} + +static int +__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_neigh_cache_key nc_key; + struct prestera_kern_neigh_cache *n_cache; + struct fib_nh_common *nhc; + int i, nhs, err; + + if (!prestera_fib_info_is_nh(&fc->info)) + return 0; + + nhs = prestera_kern_fib_info_nhs(&fc->info); + if (nhs > PRESTERA_NHGR_SIZE_MAX) + return 0; + + for (i = 0; i < nhs; i++) { + nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i); + err = prestera_util_nhc2nc_key(sw, nhc, &nc_key); + if (err) + return 0; + + n_cache = prestera_kern_neigh_cache_get(sw, &nc_key); + if (!n_cache) + return 0; + + fc->kern_neigh_cache_head[i].this = fc; + fc->kern_neigh_cache_head[i].n_cache = n_cache; + list_add(&fc->kern_neigh_cache_head[i].head, + &n_cache->kern_fib_cache_list); + } + + return 0; +} + +/* Operations on fi (offload, etc) must be wrapped in utils. + * This function just create storage. + */ +static struct prestera_kern_fib_cache * +prestera_kern_fib_cache_create(struct prestera_switch *sw, + struct prestera_kern_fib_cache_key *key, + struct fib_notifier_info *info) +{ + struct fib_entry_notifier_info *fen_info = + container_of(info, struct fib_entry_notifier_info, info); + struct prestera_kern_fib_cache *fib_cache; + int err; + + fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL); + if (!fib_cache) + goto err_kzalloc; + + memcpy(&fib_cache->key, key, sizeof(*key)); + fib_info_hold(fen_info->fi); + memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info)); + + err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, + &fib_cache->ht_node, + __prestera_kern_fib_cache_ht_params); + if (err) + goto err_ht_insert; + + /* Handle nexthops */ + err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache); + if (err) + goto out; /* Not critical */ + +out: + return fib_cache; + +err_ht_insert: + fib_info_put(fen_info->fi); + kfree(fib_cache); +err_kzalloc: + return NULL; +} + +static void +__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fibc, + struct prestera_kern_neigh_cache *nc, + bool offloaded, bool trap) +{ + struct fib_nh_common *nhc; + int i, nhs; + + nhs = prestera_kern_fib_info_nhs(&fibc->info); + for (i = 0; i < nhs; i++) { + nhc = prestera_kern_fib_info_nhc(&fibc->info, i); + if (!nc) { + prestera_util_kern_set_nh_offload(nhc, offloaded, trap); + continue; + } + + if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) { + prestera_util_kern_set_nh_offload(nhc, offloaded, trap); + break; + } + } +} + +static void +__prestera_k_arb_n_offload_set(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc, + bool offloaded) +{ + struct neighbour *n; + + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + if (!n) + return; + + prestera_util_kern_set_neigh_offload(n, offloaded); + neigh_release(n); +} + +static void +__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc, + bool fail, bool offload, bool trap) +{ + struct fib_rt_info fri; + + switch (fc->key.addr.v) { + case PRESTERA_IPV4: + fri.fi = fc->fen4_info.fi; + fri.tb_id = fc->key.kern_tb_id; + fri.dst = fc->key.addr.u.ipv4; + fri.dst_len = fc->key.prefix_len; + fri.dscp = fc->fen4_info.dscp; + fri.type = fc->fen4_info.type; + /* flags begin */ + fri.offload = offload; + fri.trap = trap; + fri.offload_failed = fail; + /* flags end */ + fib_alias_hw_flags_set(&init_net, &fri); + return; + case PRESTERA_IPV6: + /* TODO */ + return; + } +} + +static void +__prestera_k_arb_n_lpm_set(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *n_cache, + bool enabled) +{ + struct prestera_nexthop_group_key nh_grp_key; + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *fib_cache; + struct prestera_fib_node *fib_node; + struct prestera_fib_key fib_key; + + /* Exception for fc with prefix 32: LPM entry is already used by fib */ + memset(&fc_key, 0, sizeof(fc_key)); + fc_key.addr = n_cache->key.addr; + fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); + /* But better to use tb_id of route, which pointed to this neighbour. */ + /* We take it from rif, because rif inconsistent. + * Must be separated in_rif and out_rif. + * Also note: for each fib pointed to this neigh should be separated + * neigh lpm entry (for each ingress vr) + */ + fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev); + fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); + memset(&fib_key, 0, sizeof(fib_key)); + fib_key.addr = n_cache->key.addr; + fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); + fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id); + fib_node = prestera_fib_node_find(sw, &fib_key); + if (!fib_cache || !fib_cache->reachable) { + if (!enabled && fib_node) { + if (prestera_fib_node_util_is_neighbour(fib_node)) + prestera_fib_node_destroy(sw, fib_node); + return; + } + } + + if (enabled && !fib_node) { + memset(&nh_grp_key, 0, sizeof(nh_grp_key)); + prestera_util_nc_key2nh_key(&n_cache->key, + &nh_grp_key.neigh[0]); + fib_node = prestera_fib_node_create(sw, &fib_key, + PRESTERA_FIB_TYPE_UC_NH, + &nh_grp_key); + if (!fib_node) + pr_err("%s failed ip=%pI4n", "prestera_fib_node_create", + &fib_key.addr.u.ipv4); + return; + } +} + +static void +__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev), + &nc->key.addr, nc->key.dev)) + nc->reachable = true; + else + nc->reachable = false; +} + +/* Kernel neighbour -> neigh_cache info */ +static void +__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct neighbour *n; + int err; + + memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info)); + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); + if (!n) + goto out; + + read_lock_bh(&n->lock); + if (n->nud_state & NUD_VALID && !n->dead) { + err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface, + n); + if (err) + goto n_read_out; + + memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN); + nc->nh_neigh_info.connected = true; + } +n_read_out: + read_unlock_bh(&n->lock); +out: + nc->in_kernel = nc->nh_neigh_info.connected; + if (n) + neigh_release(n); +} + +/* neigh_cache info -> lpm update */ +static void +__prestera_k_arb_nc_apply(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct prestera_kern_neigh_cache_head *nhead; + struct prestera_nh_neigh_key nh_key; + struct prestera_nh_neigh *nh_neigh; + int err; + + __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel); + __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel); + + prestera_util_nc_key2nh_key(&nc->key, &nh_key); + nh_neigh = prestera_nh_neigh_find(sw, &nh_key); + if (!nh_neigh) + goto out; + + /* Do hw update only if something changed to prevent nh flap */ + if (memcmp(&nc->nh_neigh_info, &nh_neigh->info, + sizeof(nh_neigh->info))) { + memcpy(&nh_neigh->info, &nc->nh_neigh_info, + sizeof(nh_neigh->info)); + err = prestera_nh_neigh_set(sw, nh_neigh); + if (err) { + pr_err("%s failed with err=%d ip=%pI4n mac=%pM", + "prestera_nh_neigh_set", err, + &nh_neigh->key.addr.u.ipv4, + &nh_neigh->info.ha[0]); + goto out; + } + } + +out: + list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) { + __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc, + nc->in_kernel, + !nc->in_kernel); + } +} + +static int +__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct fib_nh_common *nhc; + int nh_cnt; + + memset(&fc->lpm_info, 0, sizeof(fc->lpm_info)); + + switch (prestera_kern_fib_info_type(&fc->info)) { + case RTN_UNICAST: + if (prestera_fib_info_is_direct(&fc->info) && + fc->key.prefix_len == + PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) { + /* This is special case. + * When prefix is 32. Than we will have conflict in lpm + * for direct route - once TRAP added, there is no + * place for neighbour entry. So represent direct route + * with prefix 32, as NH. So neighbour will be resolved + * as nexthop of this route. + */ + nhc = prestera_kern_fib_info_nhc(&fc->info, 0); + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH; + fc->lpm_info.nh_grp_key.neigh[0].addr = + fc->key.addr; + fc->lpm_info.nh_grp_key.neigh[0].rif = + nhc->nhc_dev; + + break; + } + + /* We can also get nh_grp_key from fi. This will be correct to + * because cache not always represent, what actually written to + * lpm. But we use nh cache, as well for now (for this case). + */ + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + if (!fc->kern_neigh_cache_head[nh_cnt].n_cache) + break; + + fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr = + fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr; + fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif = + fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev; + } + + fc->lpm_info.fib_type = nh_cnt ? + PRESTERA_FIB_TYPE_UC_NH : + PRESTERA_FIB_TYPE_TRAP; + break; + /* Unsupported. Leave it for kernel: */ + case RTN_BROADCAST: + case RTN_MULTICAST: + /* Routes we must trap by design: */ + case RTN_LOCAL: + case RTN_UNREACHABLE: + case RTN_PROHIBIT: + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP; + break; + case RTN_BLACKHOLE: + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP; + break; + default: + dev_err(sw->dev->dev, "Unsupported fib_type"); + return -EOPNOTSUPP; + } + + fc->lpm_info.fib_key.addr = fc->key.addr; + fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len; + fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id); + + return 0; +} + +static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc, + bool enabled) +{ + struct prestera_fib_node *fib_node; + + fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key); + if (fib_node) + prestera_fib_node_destroy(sw, fib_node); + + if (!enabled) + return 0; + + fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key, + fc->lpm_info.fib_type, + &fc->lpm_info.nh_grp_key); + + if (!fib_node) { + dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d", + &fc->key.addr.u.ipv4, fc->key.prefix_len, + fc->key.kern_tb_id); + return -ENOENT; + } + + return 0; +} + +static int __prestera_k_arb_fc_apply(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + int err; + + err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc); + if (err) + return err; + + err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable); + if (err) { + __prestera_k_arb_fib_lpm_offload_set(sw, fc, + true, false, false); + return err; + } + + switch (fc->lpm_info.fib_type) { + case PRESTERA_FIB_TYPE_UC_NH: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, + fc->reachable, false); + break; + case PRESTERA_FIB_TYPE_TRAP: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, + false, fc->reachable); + break; + case PRESTERA_FIB_TYPE_DROP: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true, + fc->reachable); + break; + case PRESTERA_FIB_TYPE_INVALID: + break; + } + + return 0; +} + +static struct prestera_kern_fib_cache * +__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *rfc; + + /* TODO: parse kernel rules */ + rfc = NULL; + if (fc->key.kern_tb_id == RT_TABLE_LOCAL) { + memcpy(&fc_key, &fc->key, sizeof(fc_key)); + fc_key.kern_tb_id = RT_TABLE_MAIN; + rfc = prestera_kern_fib_cache_find(sw, &fc_key); + } + + return rfc; +} + +static struct prestera_kern_fib_cache * +__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *rfc; + + /* TODO: parse kernel rules */ + rfc = NULL; + if (fc->key.kern_tb_id == RT_TABLE_MAIN) { + memcpy(&fc_key, &fc->key, sizeof(fc_key)); + fc_key.kern_tb_id = RT_TABLE_LOCAL; + rfc = prestera_kern_fib_cache_find(sw, &fc_key); + } + + return rfc; +} + +static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw, + struct prestera_kern_neigh_cache *nc) +{ + struct prestera_nh_neigh_key nh_key; + struct prestera_nh_neigh *nh_neigh; + struct neighbour *n; + bool hw_active; + + prestera_util_nc_key2nh_key(&nc->key, &nh_key); + nh_neigh = prestera_nh_neigh_find(sw, &nh_key); + if (!nh_neigh) { + pr_err("Cannot find nh_neigh for cached %pI4n", + &nc->key.addr.u.ipv4); + return; + } + + hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh); + +#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH + if (!hw_active && nc->in_kernel) + goto out; +#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ + if (!hw_active) + goto out; +#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ + + if (nc->key.addr.v == PRESTERA_IPV4) { + n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + if (!n) + n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4, + nc->key.dev); + } else { + n = NULL; + } + + if (!IS_ERR(n) && n) { + neigh_event_send(n, NULL); + neigh_release(n); + } else { + pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4); + } + +out: + return; +} + +/* Propagate hw state to kernel */ +static void prestera_k_arb_hw_evt(struct prestera_switch *sw) +{ + struct prestera_kern_neigh_cache *n_cache; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); + rhashtable_walk_start(&iter); + while (1) { + n_cache = rhashtable_walk_next(&iter); + + if (!n_cache) + break; + + if (IS_ERR(n_cache)) + continue; + + rhashtable_walk_stop(&iter); + __prestera_k_arb_hw_state_upd(sw, n_cache); + rhashtable_walk_start(&iter); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + +/* Propagate kernel event to hw */ +static void prestera_k_arb_n_evt(struct prestera_switch *sw, + struct neighbour *n) +{ + struct prestera_kern_neigh_cache_key n_key; + struct prestera_kern_neigh_cache *n_cache; + int err; + + err = prestera_util_neigh2nc_key(sw, n, &n_key); + if (err) + return; + + n_cache = prestera_kern_neigh_cache_find(sw, &n_key); + if (!n_cache) { + n_cache = prestera_kern_neigh_cache_get(sw, &n_key); + if (!n_cache) + return; + __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); + } + + __prestera_k_arb_nc_kern_n_fetch(sw, n_cache); + __prestera_k_arb_nc_apply(sw, n_cache); + + prestera_kern_neigh_cache_put(sw, n_cache); +} + +static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw) +{ + struct prestera_kern_neigh_cache *n_cache; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); + rhashtable_walk_start(&iter); + while (1) { + n_cache = rhashtable_walk_next(&iter); + + if (!n_cache) + break; + + if (IS_ERR(n_cache)) + continue; + + rhashtable_walk_stop(&iter); + __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); + __prestera_k_arb_nc_apply(sw, n_cache); + rhashtable_walk_start(&iter); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + +static int +prestera_k_arb_fib_evt(struct prestera_switch *sw, + bool replace, /* replace or del */ + struct fib_notifier_info *info) +{ + struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *fib_cache; + int err; + + prestera_util_fen_info2fib_cache_key(info, &fc_key); + fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); + if (fib_cache) { + fib_cache->reachable = false; + err = __prestera_k_arb_fc_apply(sw, fib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying destroyed fib_cache failed"); + + bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); + tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); + if (!tfib_cache && bfib_cache) { + bfib_cache->reachable = true; + err = __prestera_k_arb_fc_apply(sw, bfib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying fib_cache btm failed"); + } + + prestera_kern_fib_cache_destroy(sw, fib_cache); + } + + if (replace) { + fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info); + if (!fib_cache) { + dev_err(sw->dev->dev, "fib_cache == NULL"); + return -ENOENT; + } + + bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); + tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); + if (!tfib_cache) + fib_cache->reachable = true; + + if (bfib_cache) { + bfib_cache->reachable = false; + err = __prestera_k_arb_fc_apply(sw, bfib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying fib_cache btm failed"); + } + + err = __prestera_k_arb_fc_apply(sw, fib_cache); + if (err) + dev_err(sw->dev->dev, "Applying fib_cache failed"); + } + + /* Update all neighs to resolve overlapped and apply related */ + __prestera_k_arb_fib_evt2nc(sw); + + return 0; +} + +static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg) +{ + struct prestera_kern_neigh_cache *n_cache = ptr; + struct prestera_switch *sw = arg; + + if (!list_empty(&n_cache->kern_fib_cache_list)) { + WARN_ON(1); /* BUG */ + return; + } + __prestera_k_arb_n_offload_set(sw, n_cache, false); + n_cache->in_kernel = false; + /* No need to destroy lpm. + * It will be aborted by destroy_ht + */ + __prestera_kern_neigh_cache_destruct(sw, n_cache); + kfree(n_cache); +} + +static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg) +{ + struct prestera_kern_fib_cache *fib_cache = ptr; + struct prestera_switch *sw = arg; + + __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache, + false, false, + false); + __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL, + false, false); + /* No need to destroy lpm. + * It will be aborted by destroy_ht + */ + __prestera_kern_fib_cache_destruct(sw, fib_cache); + kfree(fib_cache); +} + +static void prestera_k_arb_abort(struct prestera_switch *sw) +{ + /* Function to remove all arbiter entries and related hw objects. */ + /* Sequence: + * 1) Clear arbiter tables, but don't touch hw + * 2) Clear hw + * We use such approach, because arbiter object is not directly mapped + * to hw. So deletion of one arbiter object may even lead to creation of + * hw object (e.g. in case of overlapped routes). + */ + rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht, + __prestera_k_arb_abort_fib_ht_cb, + sw); + rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht, + __prestera_k_arb_abort_neigh_ht_cb, + sw); +} + +static int __prestera_inetaddr_port_event(struct net_device *port_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(port_dev); + struct prestera_rif_entry_key re_key = {}; + struct prestera_rif_entry *re; + u32 kern_tb_id; + int err; + + err = prestera_is_valid_mac_addr(port, port_dev->dev_addr); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix"); + return err; + } + + kern_tb_id = l3mdev_fib_table(port_dev); + re_key.iface.type = PRESTERA_IF_PORT_E; + re_key.iface.dev_port.hw_dev_num = port->dev_id; + re_key.iface.dev_port.port_num = port->hw_id; + re = prestera_rif_entry_find(port->sw, &re_key); + + switch (event) { + case NETDEV_UP: + if (re) { + NL_SET_ERR_MSG_MOD(extack, "RIF already exist"); + return -EEXIST; + } + re = prestera_rif_entry_create(port->sw, &re_key, + prestera_fix_tb_id(kern_tb_id), + port_dev->dev_addr); + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "Can't create RIF"); + return -EINVAL; + } + dev_hold(port_dev); + break; + case NETDEV_DOWN: + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "Can't find RIF"); + return -EEXIST; + } + prestera_rif_entry_destroy(port->sw, re); + dev_put(port_dev); + break; + } + + return 0; +} + +static int __prestera_inetaddr_event(struct prestera_switch *sw, + struct net_device *dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) || + netif_is_lag_port(dev)) + return 0; + + return __prestera_inetaddr_port_event(dev, event, extack); +} + +static int __prestera_inetaddr_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_DOWN) + goto out; + + /* Ignore if this is not latest address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + err = __prestera_inetaddr_event(router->sw, dev, event, NULL); +out: + return notifier_from_errno(err); +} + +static int __prestera_inetaddr_valid_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_validator_info *ivi = (struct in_validator_info *)ptr; + struct net_device *dev = ivi->ivi_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_valid_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_UP) + goto out; + + /* Ignore if this is not first address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + if (ipv4_is_multicast(ivi->ivi_addr)) { + NL_SET_ERR_MSG_MOD(ivi->extack, + "Multicast addr on RIF is not supported"); + err = -EINVAL; + goto out; + } + + err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack); +out: + return notifier_from_errno(err); +} + +struct prestera_fib_event_work { + struct work_struct work; + struct prestera_switch *sw; + struct fib_entry_notifier_info fen_info; + unsigned long event; +}; + +static void __prestera_router_fib_event_work(struct work_struct *work) +{ + struct prestera_fib_event_work *fib_work = + container_of(work, struct prestera_fib_event_work, work); + struct prestera_switch *sw = fib_work->sw; + int err; + + rtnl_lock(); + + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: + err = prestera_k_arb_fib_evt(sw, true, + &fib_work->fen_info.info); + if (err) + goto err_out; + + break; + case FIB_EVENT_ENTRY_DEL: + err = prestera_k_arb_fib_evt(sw, false, + &fib_work->fen_info.info); + if (err) + goto err_out; + + break; + } + + goto out; + +err_out: + dev_err(sw->dev->dev, "Error when processing %pI4h/%d", + &fib_work->fen_info.dst, + fib_work->fen_info.dst_len); +out: + fib_info_put(fib_work->fen_info.fi); + rtnl_unlock(); + kfree(fib_work); +} + +/* Called with rcu_read_lock() */ +static int __prestera_router_fib_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct prestera_fib_event_work *fib_work; + struct fib_entry_notifier_info *fen_info; + struct fib_notifier_info *info = ptr; + struct prestera_router *router; + + if (info->family != AF_INET) + return NOTIFY_DONE; + + router = container_of(nb, struct prestera_router, fib_nb); + + switch (event) { + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_DEL: + fen_info = container_of(info, struct fib_entry_notifier_info, + info); + if (!fen_info->fi) + return NOTIFY_DONE; + + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); + if (WARN_ON(!fib_work)) + return NOTIFY_BAD; + + fib_info_hold(fen_info->fi); + fib_work->fen_info = *fen_info; + fib_work->event = event; + fib_work->sw = router->sw; + INIT_WORK(&fib_work->work, __prestera_router_fib_event_work); + prestera_queue_work(&fib_work->work); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_DONE; +} + +struct prestera_netevent_work { + struct work_struct work; + struct prestera_switch *sw; + struct neighbour *n; +}; + +static void prestera_router_neigh_event_work(struct work_struct *work) +{ + struct prestera_netevent_work *net_work = + container_of(work, struct prestera_netevent_work, work); + struct prestera_switch *sw = net_work->sw; + struct neighbour *n = net_work->n; + + /* neigh - its not hw related object. It stored only in kernel. So... */ + rtnl_lock(); + + prestera_k_arb_n_evt(sw, n); + + neigh_release(n); + rtnl_unlock(); + kfree(net_work); +} + +static int prestera_router_netevent_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct prestera_netevent_work *net_work; + struct prestera_router *router; + struct neighbour *n = ptr; + + router = container_of(nb, struct prestera_router, netevent_nb); + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl->family != AF_INET) + return NOTIFY_DONE; + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (WARN_ON(!net_work)) + return NOTIFY_BAD; + + neigh_clone(n); + net_work->n = n; + net_work->sw = router->sw; + INIT_WORK(&net_work->work, prestera_router_neigh_event_work); + prestera_queue_work(&net_work->work); + } + + return NOTIFY_DONE; +} + +static void prestera_router_update_neighs_work(struct work_struct *work) +{ + struct prestera_router *router; + + router = container_of(work, struct prestera_router, + neighs_update.dw.work); + rtnl_lock(); + + prestera_k_arb_hw_evt(router->sw); + + rtnl_unlock(); + prestera_queue_delayed_work(&router->neighs_update.dw, + msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL)); +} + +static int prestera_neigh_work_init(struct prestera_switch *sw) +{ + INIT_DELAYED_WORK(&sw->router->neighs_update.dw, + prestera_router_update_neighs_work); + prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0); + return 0; +} + +static void prestera_neigh_work_fini(struct prestera_switch *sw) +{ + cancel_delayed_work_sync(&sw->router->neighs_update.dw); +} + +int prestera_router_init(struct prestera_switch *sw) +{ + struct prestera_router *router; + int err, nhgrp_cache_bytes; + + router = kzalloc(sizeof(*sw->router), GFP_KERNEL); + if (!router) + return -ENOMEM; + + sw->router = router; + router->sw = sw; + + err = prestera_router_hw_init(sw); + if (err) + goto err_router_lib_init; + + err = rhashtable_init(&router->kern_fib_cache_ht, + &__prestera_kern_fib_cache_ht_params); + if (err) + goto err_kern_fib_cache_ht_init; + + err = rhashtable_init(&router->kern_neigh_cache_ht, + &__prestera_kern_neigh_cache_ht_params); + if (err) + goto err_kern_neigh_cache_ht_init; + + nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1; + router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL); + if (!router->nhgrp_hw_state_cache) { + err = -ENOMEM; + goto err_nh_state_cache_alloc; + } + + err = prestera_neigh_work_init(sw); + if (err) + goto err_neigh_work_init; + + router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; + err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); + if (err) + goto err_register_inetaddr_validator_notifier; + + router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + router->netevent_nb.notifier_call = prestera_router_netevent_event; + err = register_netevent_notifier(&router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + + router->fib_nb.notifier_call = __prestera_router_fib_event; + err = register_fib_notifier(&init_net, &router->fib_nb, + /* TODO: flush fib entries */ NULL, NULL); + if (err) + goto err_register_fib_notifier; + + return 0; + +err_register_fib_notifier: + unregister_netevent_notifier(&router->netevent_nb); +err_register_netevent_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); +err_register_inetaddr_notifier: + unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); +err_register_inetaddr_validator_notifier: + prestera_neigh_work_fini(sw); +err_neigh_work_init: + kfree(router->nhgrp_hw_state_cache); +err_nh_state_cache_alloc: + rhashtable_destroy(&router->kern_neigh_cache_ht); +err_kern_neigh_cache_ht_init: + rhashtable_destroy(&router->kern_fib_cache_ht); +err_kern_fib_cache_ht_init: + prestera_router_hw_fini(sw); +err_router_lib_init: + kfree(sw->router); + return err; +} + +void prestera_router_fini(struct prestera_switch *sw) +{ + unregister_fib_notifier(&init_net, &sw->router->fib_nb); + unregister_netevent_notifier(&sw->router->netevent_nb); + unregister_inetaddr_notifier(&sw->router->inetaddr_nb); + unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); + prestera_neigh_work_fini(sw); + prestera_queue_drain(); + + prestera_k_arb_abort(sw); + + kfree(sw->router->nhgrp_hw_state_cache); + rhashtable_destroy(&sw->router->kern_fib_cache_ht); + prestera_router_hw_fini(sw); + kfree(sw->router); + sw->router = NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c new file mode 100644 index 0000000000..02faaea2ae --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/rhashtable.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_router_hw.h" +#include "prestera_acl.h" + +/* Nexthop is pointed + * to port (not rif) + * +-------+ + * +>|nexthop| + * | +-------+ + * | + * +--+ +-----++ + * +------->|vr|<-+ +>|nh_grp| + * | +--+ | | +------+ + * | | | + * +-+-------+ +--+---+-+ + * |rif_entry| |fib_node| + * +---------+ +--------+ + * Rif is Fib - is exit point + * used as + * entry point + * for vr in hw + */ + +#define PRESTERA_NHGR_UNUSED (0) +#define PRESTERA_NHGR_DROP (0xFFFFFFFF) +/* Need to merge it with router_manager */ +#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */ + +static const struct rhashtable_params __prestera_fib_ht_params = { + .key_offset = offsetof(struct prestera_fib_node, key), + .head_offset = offsetof(struct prestera_fib_node, ht_node), + .key_len = sizeof(struct prestera_fib_key), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params __prestera_nh_neigh_ht_params = { + .key_offset = offsetof(struct prestera_nh_neigh, key), + .key_len = sizeof(struct prestera_nh_neigh_key), + .head_offset = offsetof(struct prestera_nh_neigh, ht_node), +}; + +static const struct rhashtable_params __prestera_nexthop_group_ht_params = { + .key_offset = offsetof(struct prestera_nexthop_group, key), + .key_len = sizeof(struct prestera_nexthop_group_key), + .head_offset = offsetof(struct prestera_nexthop_group, ht_node), +}; + +static int prestera_nexthop_group_set(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp); +static bool +prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp); +static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg); + +/* TODO: move to router.h as macros */ +static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key) +{ + return memchr_inv(key, 0, sizeof(*key)) ? true : false; +} + +int prestera_router_hw_init(struct prestera_switch *sw) +{ + int err; + + err = rhashtable_init(&sw->router->nh_neigh_ht, + &__prestera_nh_neigh_ht_params); + if (err) + goto err_nh_neigh_ht_init; + + err = rhashtable_init(&sw->router->nexthop_group_ht, + &__prestera_nexthop_group_ht_params); + if (err) + goto err_nexthop_grp_ht_init; + + err = rhashtable_init(&sw->router->fib_ht, + &__prestera_fib_ht_params); + if (err) + goto err_fib_ht_init; + + INIT_LIST_HEAD(&sw->router->vr_list); + INIT_LIST_HEAD(&sw->router->rif_entry_list); + + return 0; + +err_fib_ht_init: + rhashtable_destroy(&sw->router->nexthop_group_ht); +err_nexthop_grp_ht_init: + rhashtable_destroy(&sw->router->nh_neigh_ht); +err_nh_neigh_ht_init: + return 0; +} + +void prestera_router_hw_fini(struct prestera_switch *sw) +{ + rhashtable_free_and_destroy(&sw->router->fib_ht, + prestera_fib_node_destroy_ht_cb, sw); + WARN_ON(!list_empty(&sw->router->vr_list)); + WARN_ON(!list_empty(&sw->router->rif_entry_list)); + rhashtable_destroy(&sw->router->fib_ht); + rhashtable_destroy(&sw->router->nexthop_group_ht); + rhashtable_destroy(&sw->router->nh_neigh_ht); +} + +static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, + u32 tb_id) +{ + struct prestera_vr *vr; + + list_for_each_entry(vr, &sw->router->vr_list, router_node) { + if (vr->tb_id == tb_id) + return vr; + } + + return NULL; +} + +static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, + u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + int err; + + vr = kzalloc(sizeof(*vr), GFP_KERNEL); + if (!vr) { + err = -ENOMEM; + goto err_alloc_vr; + } + + vr->tb_id = tb_id; + + err = prestera_hw_vr_create(sw, &vr->hw_vr_id); + if (err) + goto err_hw_create; + + list_add(&vr->router_node, &sw->router->vr_list); + + return vr; + +err_hw_create: + kfree(vr); +err_alloc_vr: + return ERR_PTR(err); +} + +static void __prestera_vr_destroy(struct prestera_switch *sw, + struct prestera_vr *vr) +{ + list_del(&vr->router_node); + prestera_hw_vr_delete(sw, vr->hw_vr_id); + kfree(vr); +} + +static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + + vr = __prestera_vr_find(sw, tb_id); + if (vr) { + refcount_inc(&vr->refcount); + } else { + vr = __prestera_vr_create(sw, tb_id, extack); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + refcount_set(&vr->refcount, 1); + } + + return vr; +} + +static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr) +{ + if (refcount_dec_and_test(&vr->refcount)) + __prestera_vr_destroy(sw, vr); +} + +/* iface is overhead struct. vr_id also can be removed. */ +static int +__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in, + struct prestera_rif_entry_key *out) +{ + memset(out, 0, sizeof(*out)); + + switch (in->iface.type) { + case PRESTERA_IF_PORT_E: + out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num; + out->iface.dev_port.port_num = in->iface.dev_port.port_num; + break; + case PRESTERA_IF_LAG_E: + out->iface.lag_id = in->iface.lag_id; + break; + case PRESTERA_IF_VID_E: + out->iface.vlan_id = in->iface.vlan_id; + break; + default: + WARN(1, "Unsupported iface type"); + return -EINVAL; + } + + out->iface.type = in->iface.type; + return 0; +} + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k) +{ + struct prestera_rif_entry *rif_entry; + struct prestera_rif_entry_key lk; /* lookup key */ + + if (__prestera_rif_entry_key_copy(k, &lk)) + return NULL; + + list_for_each_entry(rif_entry, &sw->router->rif_entry_list, + router_node) { + if (!memcmp(k, &rif_entry->key, sizeof(*k))) + return rif_entry; + } + + return NULL; +} + +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e) +{ + struct prestera_iface iface; + + list_del(&e->router_node); + + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + prestera_hw_rif_delete(sw, e->hw_id, &iface); + + prestera_vr_put(sw, e->vr); + kfree(e); +} + +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr) +{ + int err; + struct prestera_rif_entry *e; + struct prestera_iface iface; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; + + if (__prestera_rif_entry_key_copy(k, &e->key)) + goto err_key_copy; + + e->vr = prestera_vr_get(sw, tb_id, NULL); + if (IS_ERR(e->vr)) + goto err_vr_get; + + memcpy(&e->addr, addr, sizeof(e->addr)); + + /* HW */ + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id); + if (err) + goto err_hw_create; + + list_add(&e->router_node, &sw->router->rif_entry_list); + + return e; + +err_hw_create: + prestera_vr_put(sw, e->vr); +err_vr_get: +err_key_copy: + kfree(e); +err_kzalloc: + return NULL; +} + +static void __prestera_nh_neigh_destroy(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + rhashtable_remove_fast(&sw->router->nh_neigh_ht, + &neigh->ht_node, + __prestera_nh_neigh_ht_params); + kfree(neigh); +} + +static struct prestera_nh_neigh * +__prestera_nh_neigh_create(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *neigh; + int err; + + neigh = kzalloc(sizeof(*neigh), GFP_KERNEL); + if (!neigh) + goto err_kzalloc; + + memcpy(&neigh->key, key, sizeof(*key)); + neigh->info.connected = false; + INIT_LIST_HEAD(&neigh->nexthop_group_list); + err = rhashtable_insert_fast(&sw->router->nh_neigh_ht, + &neigh->ht_node, + __prestera_nh_neigh_ht_params); + if (err) + goto err_rhashtable_insert; + + return neigh; + +err_rhashtable_insert: + kfree(neigh); +err_kzalloc: + return NULL; +} + +struct prestera_nh_neigh * +prestera_nh_neigh_find(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *nh_neigh; + + nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht, + key, __prestera_nh_neigh_ht_params); + return nh_neigh; +} + +struct prestera_nh_neigh * +prestera_nh_neigh_get(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key) +{ + struct prestera_nh_neigh *neigh; + + neigh = prestera_nh_neigh_find(sw, key); + if (!neigh) + return __prestera_nh_neigh_create(sw, key); + + return neigh; +} + +void prestera_nh_neigh_put(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + if (list_empty(&neigh->nexthop_group_list)) + __prestera_nh_neigh_destroy(sw, neigh); +} + +/* Updates new prestera_neigh_info */ +int prestera_nh_neigh_set(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh) +{ + struct prestera_nh_neigh_head *nh_head; + struct prestera_nexthop_group *nh_grp; + int err; + + list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) { + nh_grp = nh_head->this; + err = prestera_nexthop_group_set(sw, nh_grp); + if (err) + return err; + } + + return 0; +} + +bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, + struct prestera_nh_neigh *nh_neigh) +{ + bool state; + struct prestera_nh_neigh_head *nh_head, *tmp; + + state = false; + list_for_each_entry_safe(nh_head, tmp, + &nh_neigh->nexthop_group_list, head) { + state = prestera_nexthop_group_util_hw_state(sw, nh_head->this); + if (state) + goto out; + } + +out: + return state; +} + +static struct prestera_nexthop_group * +__prestera_nexthop_group_create(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + struct prestera_nh_neigh *nh_neigh; + int nh_cnt, err, gid; + + nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); + if (!nh_grp) + goto err_kzalloc; + + memcpy(&nh_grp->key, key, sizeof(*key)); + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt])) + break; + + nh_neigh = prestera_nh_neigh_get(sw, + &nh_grp->key.neigh[nh_cnt]); + if (!nh_neigh) + goto err_nh_neigh_get; + + nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh; + nh_grp->nh_neigh_head[nh_cnt].this = nh_grp; + list_add(&nh_grp->nh_neigh_head[nh_cnt].head, + &nh_neigh->nexthop_group_list); + } + + err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id); + if (err) + goto err_nh_group_create; + + err = prestera_nexthop_group_set(sw, nh_grp); + if (err) + goto err_nexthop_group_set; + + err = rhashtable_insert_fast(&sw->router->nexthop_group_ht, + &nh_grp->ht_node, + __prestera_nexthop_group_ht_params); + if (err) + goto err_ht_insert; + + /* reset cache for created group */ + gid = nh_grp->grp_id; + sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8); + + return nh_grp; + +err_ht_insert: +err_nexthop_group_set: + prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); +err_nh_group_create: +err_nh_neigh_get: + for (nh_cnt--; nh_cnt >= 0; nh_cnt--) { + list_del(&nh_grp->nh_neigh_head[nh_cnt].head); + prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh); + } + + kfree(nh_grp); +err_kzalloc: + return NULL; +} + +static void +__prestera_nexthop_group_destroy(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + struct prestera_nh_neigh *nh_neigh; + int nh_cnt; + + rhashtable_remove_fast(&sw->router->nexthop_group_ht, + &nh_grp->ht_node, + __prestera_nexthop_group_ht_params); + + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; + if (!nh_neigh) + break; + + list_del(&nh_grp->nh_neigh_head[nh_cnt].head); + prestera_nh_neigh_put(sw, nh_neigh); + } + + prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); + kfree(nh_grp); +} + +static struct prestera_nexthop_group * +__prestera_nexthop_group_find(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + + nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht, + key, __prestera_nexthop_group_ht_params); + return nh_grp; +} + +static struct prestera_nexthop_group * +prestera_nexthop_group_get(struct prestera_switch *sw, + struct prestera_nexthop_group_key *key) +{ + struct prestera_nexthop_group *nh_grp; + + nh_grp = __prestera_nexthop_group_find(sw, key); + if (nh_grp) { + refcount_inc(&nh_grp->refcount); + } else { + nh_grp = __prestera_nexthop_group_create(sw, key); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + + refcount_set(&nh_grp->refcount, 1); + } + + return nh_grp; +} + +static void prestera_nexthop_group_put(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + if (refcount_dec_and_test(&nh_grp->refcount)) + __prestera_nexthop_group_destroy(sw, nh_grp); +} + +/* Updates with new nh_neigh's info */ +static int prestera_nexthop_group_set(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX]; + struct prestera_nh_neigh *neigh; + int nh_cnt; + + memset(&info[0], 0, sizeof(info)); + for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { + neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; + if (!neigh) + break; + + memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info)); + } + + return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id); +} + +static bool +prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, + struct prestera_nexthop_group *nh_grp) +{ + int err; + u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1; + u32 gid = nh_grp->grp_id; + u8 *cache = sw->router->nhgrp_hw_state_cache; + + /* Antijitter + * Prevent situation, when we read state of nh_grp twice in short time, + * and state bit is still cleared on second call. So just stuck active + * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred. + */ + if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick + + msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) { + err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size); + if (err) { + pr_err("Failed to get hw state nh_grp's"); + return false; + } + + sw->router->nhgrp_hw_cache_kick = jiffies; + } + + if (cache[gid / 8] & BIT(gid % 8)) + return true; + + return false; +} + +struct prestera_fib_node * +prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key) +{ + struct prestera_fib_node *fib_node; + + fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key, + __prestera_fib_ht_params); + return fib_node; +} + +static void __prestera_fib_node_destruct(struct prestera_switch *sw, + struct prestera_fib_node *fib_node) +{ + struct prestera_vr *vr; + + vr = fib_node->info.vr; + prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4, + fib_node->key.prefix_len); + switch (fib_node->info.type) { + case PRESTERA_FIB_TYPE_UC_NH: + prestera_nexthop_group_put(sw, fib_node->info.nh_grp); + break; + case PRESTERA_FIB_TYPE_TRAP: + break; + case PRESTERA_FIB_TYPE_DROP: + break; + default: + pr_err("Unknown fib_node->info.type = %d", + fib_node->info.type); + } + + prestera_vr_put(sw, vr); +} + +void prestera_fib_node_destroy(struct prestera_switch *sw, + struct prestera_fib_node *fib_node) +{ + __prestera_fib_node_destruct(sw, fib_node); + rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node, + __prestera_fib_ht_params); + kfree(fib_node); +} + +static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg) +{ + struct prestera_fib_node *node = ptr; + struct prestera_switch *sw = arg; + + __prestera_fib_node_destruct(sw, node); + kfree(node); +} + +struct prestera_fib_node * +prestera_fib_node_create(struct prestera_switch *sw, + struct prestera_fib_key *key, + enum prestera_fib_type fib_type, + struct prestera_nexthop_group_key *nh_grp_key) +{ + struct prestera_fib_node *fib_node; + u32 grp_id; + struct prestera_vr *vr; + int err; + + fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); + if (!fib_node) + goto err_kzalloc; + + memcpy(&fib_node->key, key, sizeof(*key)); + fib_node->info.type = fib_type; + + vr = prestera_vr_get(sw, key->tb_id, NULL); + if (IS_ERR(vr)) + goto err_vr_get; + + fib_node->info.vr = vr; + + switch (fib_type) { + case PRESTERA_FIB_TYPE_TRAP: + grp_id = PRESTERA_NHGR_UNUSED; + break; + case PRESTERA_FIB_TYPE_DROP: + grp_id = PRESTERA_NHGR_DROP; + break; + case PRESTERA_FIB_TYPE_UC_NH: + fib_node->info.nh_grp = prestera_nexthop_group_get(sw, + nh_grp_key); + if (IS_ERR(fib_node->info.nh_grp)) + goto err_nh_grp_get; + + grp_id = fib_node->info.nh_grp->grp_id; + break; + default: + pr_err("Unsupported fib_type %d", fib_type); + goto err_nh_grp_get; + } + + err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4, + key->prefix_len, grp_id); + if (err) + goto err_lpm_add; + + err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node, + __prestera_fib_ht_params); + if (err) + goto err_ht_insert; + + return fib_node; + +err_ht_insert: + prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4, + key->prefix_len); +err_lpm_add: + if (fib_type == PRESTERA_FIB_TYPE_UC_NH) + prestera_nexthop_group_put(sw, fib_node->info.nh_grp); +err_nh_grp_get: + prestera_vr_put(sw, vr); +err_vr_get: + kfree(fib_node); +err_kzalloc: + return NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h new file mode 100644 index 0000000000..9ca97919c8 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_ROUTER_HW_H_ +#define _PRESTERA_ROUTER_HW_H_ + +struct prestera_vr { + struct list_head router_node; + refcount_t refcount; + u32 tb_id; /* key (kernel fib table id) */ + u16 hw_vr_id; /* virtual router ID */ + u8 __pad[2]; +}; + +struct prestera_rif_entry { + struct prestera_rif_entry_key { + struct prestera_iface iface; + } key; + struct prestera_vr *vr; + unsigned char addr[ETH_ALEN]; + u16 hw_id; /* rif_id */ + struct list_head router_node; /* ht */ +}; + +struct prestera_ip_addr { + union { + __be32 ipv4; + struct in6_addr ipv6; + } u; + enum { + PRESTERA_IPV4 = 0, + PRESTERA_IPV6 + } v; +#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \ + /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */) +}; + +struct prestera_nh_neigh_key { + struct prestera_ip_addr addr; + /* Seems like rif is obsolete, because there is iface in info ? + * Key can contain functional fields, or fields, which is used to + * filter duplicate objects on logical level (before you pass it to + * HW)... also key can be used to cover hardware restrictions. + * In our case rif - is logical interface (even can be VLAN), which + * is used in combination with IP address (which is also not related to + * hardware nexthop) to provide logical compression of created nexthops. + * You even can imagine, that rif+IPaddr is just cookie. + */ + /* struct prestera_rif *rif; */ + /* Use just as cookie, to divide ARP domains (in order with addr) */ + void *rif; +}; + +/* Used for hw call */ +struct prestera_neigh_info { + struct prestera_iface iface; + unsigned char ha[ETH_ALEN]; + u8 connected; /* bool. indicate, if mac/oif valid */ + u8 __pad[1]; +}; + +/* Used to notify nh about neigh change */ +struct prestera_nh_neigh { + struct prestera_nh_neigh_key key; + struct prestera_neigh_info info; + struct rhash_head ht_node; /* node of prestera_vr */ + struct list_head nexthop_group_list; +}; + +#define PRESTERA_NHGR_SIZE_MAX 4 + +struct prestera_nexthop_group { + struct prestera_nexthop_group_key { + struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX]; + } key; + /* Store intermediate object here. + * This prevent overhead kzalloc call. + */ + /* nh_neigh is used only to notify nexthop_group */ + struct prestera_nh_neigh_head { + struct prestera_nexthop_group *this; + struct list_head head; + /* ptr to neigh is not necessary. + * It used to prevent lookup of nh_neigh by key (n) on destroy + */ + struct prestera_nh_neigh *neigh; + } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX]; + struct rhash_head ht_node; /* node of prestera_vr */ + refcount_t refcount; + u32 grp_id; /* hw */ +}; + +struct prestera_fib_key { + struct prestera_ip_addr addr; + u32 prefix_len; + u32 tb_id; +}; + +struct prestera_fib_info { + struct prestera_vr *vr; + struct list_head vr_node; + enum prestera_fib_type { + PRESTERA_FIB_TYPE_INVALID = 0, + /* must be pointer to nh_grp id */ + PRESTERA_FIB_TYPE_UC_NH, + /* It can be connected route + * and will be overlapped with neighbours + */ + PRESTERA_FIB_TYPE_TRAP, + PRESTERA_FIB_TYPE_DROP + } type; + /* Valid only if type = UC_NH*/ + struct prestera_nexthop_group *nh_grp; +}; + +struct prestera_fib_node { + struct rhash_head ht_node; /* node of prestera_vr */ + struct prestera_fib_key key; + struct prestera_fib_info info; /* action related info */ +}; + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k); +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e); +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr); +struct prestera_nh_neigh * +prestera_nh_neigh_find(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key); +struct prestera_nh_neigh * +prestera_nh_neigh_get(struct prestera_switch *sw, + struct prestera_nh_neigh_key *key); +void prestera_nh_neigh_put(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh); +int prestera_nh_neigh_set(struct prestera_switch *sw, + struct prestera_nh_neigh *neigh); +bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, + struct prestera_nh_neigh *nh_neigh); +struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw, + struct prestera_fib_key *key); +void prestera_fib_node_destroy(struct prestera_switch *sw, + struct prestera_fib_node *fib_node); +struct prestera_fib_node * +prestera_fib_node_create(struct prestera_switch *sw, + struct prestera_fib_key *key, + enum prestera_fib_type fib_type, + struct prestera_nexthop_group_key *nh_grp_key); +int prestera_router_hw_init(struct prestera_switch *sw); +void prestera_router_hw_fini(struct prestera_switch *sw); + +#endif /* _PRESTERA_ROUTER_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c new file mode 100644 index 0000000000..cc2a9ae794 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/bitfield.h> +#include <linux/dmapool.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/platform_device.h> + +#include "prestera_dsa.h" +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_rxtx.h" +#include "prestera_devlink.h" + +#define PRESTERA_SDMA_WAIT_MUL 10 + +struct prestera_sdma_desc { + __le32 word1; + __le32 word2; + __le32 buff; + __le32 next; +} __packed __aligned(16); + +#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544 + +#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \ + ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0)) + +#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \ + ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) + +#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \ + (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN) + +#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0 +#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1 + +#define PRESTERA_SDMA_RX_QUEUE_NUM 8 + +#define PRESTERA_SDMA_RX_DESC_PER_Q 1000 + +#define PRESTERA_SDMA_TX_DESC_PER_Q 1000 +#define PRESTERA_SDMA_TX_MAX_BURST 64 + +#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \ + ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) + +#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0 +#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U + +#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \ + (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN) + +#define PRESTERA_SDMA_TX_DESC_LAST BIT(20) +#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21) +#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12) + +#define PRESTERA_SDMA_TX_DESC_SINGLE \ + (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST) + +#define PRESTERA_SDMA_TX_DESC_INIT \ + (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC) + +#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814 +#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680 +#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16) + +#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0 +#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868 + +struct prestera_sdma_buf { + struct prestera_sdma_desc *desc; + dma_addr_t desc_dma; + struct sk_buff *skb; + dma_addr_t buf_dma; + bool is_used; +}; + +struct prestera_rx_ring { + struct prestera_sdma_buf *bufs; + int next_rx; +}; + +struct prestera_tx_ring { + struct prestera_sdma_buf *bufs; + int next_tx; + int max_burst; + int burst; +}; + +struct prestera_sdma { + struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM]; + struct prestera_tx_ring tx_ring; + struct prestera_switch *sw; + struct dma_pool *desc_pool; + struct work_struct tx_work; + struct napi_struct rx_napi; + struct net_device napi_dev; + u32 map_addr; + u64 dma_mask; + /* protect SDMA with concurrent access from multiple CPUs */ + spinlock_t tx_lock; +}; + +struct prestera_rxtx { + struct prestera_sdma sdma; +}; + +static int prestera_sdma_buf_init(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct prestera_sdma_desc *desc; + dma_addr_t dma; + + desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma); + if (!desc) + return -ENOMEM; + + buf->buf_dma = DMA_MAPPING_ERROR; + buf->desc_dma = dma; + buf->desc = desc; + buf->skb = NULL; + + return 0; +} + +static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa) +{ + return sdma->map_addr + pa; +} + +static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t buf) +{ + u32 word = le32_to_cpu(desc->word2); + + u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0)); + desc->word2 = cpu_to_le32(word); + + desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); + + /* make sure buffer is set before reset the descriptor */ + wmb(); + + desc->word1 = cpu_to_le32(0xA0000000); +} + +static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t next) +{ + desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); +} + +static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct device *dev = sdma->sw->dev->dev; + struct sk_buff *skb; + dma_addr_t dma; + + skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) + goto err_dma_map; + + if (buf->skb) + dma_unmap_single(dev, buf->buf_dma, buf->skb->len, + DMA_FROM_DEVICE); + + buf->buf_dma = dma; + buf->skb = skb; + + return 0; + +err_dma_map: + kfree_skb(skb); + + return -ENOMEM; +} + +static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + dma_addr_t buf_dma = buf->buf_dma; + struct sk_buff *skb = buf->skb; + u32 len = skb->len; + int err; + + err = prestera_sdma_rx_skb_alloc(sdma, buf); + if (err) { + buf->buf_dma = buf_dma; + buf->skb = skb; + + skb = alloc_skb(skb->len, GFP_ATOMIC); + if (skb) { + skb_put(skb, len); + skb_copy_from_linear_data(buf->skb, skb->data, len); + } + } + + prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma); + + return skb; +} + +static int prestera_rxtx_process_skb(struct prestera_sdma *sdma, + struct sk_buff *skb) +{ + struct prestera_port *port; + struct prestera_dsa dsa; + u32 hw_port, dev_id; + u8 cpu_code; + int err; + + skb_pull(skb, ETH_HLEN); + + /* ethertype field is part of the dsa header */ + err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN); + if (err) + return err; + + dev_id = dsa.hw_dev_num; + hw_port = dsa.port_num; + + port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port); + if (unlikely(!port)) { + dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n", + dev_id, hw_port); + return -ENOENT; + } + + if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN))) + return -EINVAL; + + /* remove DSA tag and update checksum */ + skb_pull_rcsum(skb, PRESTERA_DSA_HLEN); + + memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN, + ETH_ALEN * 2); + + skb_push(skb, ETH_HLEN); + + skb->protocol = eth_type_trans(skb, port->dev); + + if (dsa.vlan.is_tagged) { + u16 tci = dsa.vlan.vid & VLAN_VID_MASK; + + tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT; + if (dsa.vlan.cfi_bit) + tci |= VLAN_CFI_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); + } + + cpu_code = dsa.cpu_code; + prestera_devlink_trap_report(port, skb, cpu_code); + + return 0; +} + +static int prestera_sdma_next_rx_buf_idx(int buf_idx) +{ + return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q; +} + +static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget) +{ + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + unsigned int rxq_done_map = 0; + struct prestera_sdma *sdma; + struct list_head rx_list; + unsigned int qmask; + int pkts_done = 0; + int q; + + qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + qmask = GENMASK(qnum - 1, 0); + + INIT_LIST_HEAD(&rx_list); + + sdma = container_of(napi, struct prestera_sdma, rx_napi); + + while (pkts_done < budget && rxq_done_map != qmask) { + for (q = 0; q < qnum && pkts_done < budget; q++) { + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + struct prestera_sdma_desc *desc; + struct prestera_sdma_buf *buf; + int buf_idx = ring->next_rx; + struct sk_buff *skb; + + buf = &ring->bufs[buf_idx]; + desc = buf->desc; + + if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) { + rxq_done_map &= ~BIT(q); + } else { + rxq_done_map |= BIT(q); + continue; + } + + pkts_done++; + + __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc)); + + skb = prestera_sdma_rx_skb_get(sdma, buf); + if (!skb) + goto rx_next_buf; + + if (unlikely(prestera_rxtx_process_skb(sdma, skb))) + goto rx_next_buf; + + list_add_tail(&skb->list, &rx_list); +rx_next_buf: + ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx); + } + } + + if (pkts_done < budget && napi_complete_done(napi, pkts_done)) + prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, + GENMASK(9, 2)); + + netif_receive_skb_list(&rx_list); + + return pkts_done; +} + +static void prestera_sdma_rx_fini(struct prestera_sdma *sdma) +{ + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + int q, b; + + /* disable all rx queues */ + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(15, 8)); + + for (q = 0; q < qnum; q++) { + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + + if (!ring->bufs) + break; + + for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) { + struct prestera_sdma_buf *buf = &ring->bufs[b]; + + if (buf->desc_dma) + dma_pool_free(sdma->desc_pool, buf->desc, + buf->desc_dma); + + if (!buf->skb) + continue; + + if (buf->buf_dma != DMA_MAPPING_ERROR) + dma_unmap_single(sdma->sw->dev->dev, + buf->buf_dma, buf->skb->len, + DMA_FROM_DEVICE); + kfree_skb(buf->skb); + } + } +} + +static int prestera_sdma_rx_init(struct prestera_sdma *sdma) +{ + int bnum = PRESTERA_SDMA_RX_DESC_PER_Q; + int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; + int err; + int q; + + /* disable all rx queues */ + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(15, 8)); + + for (q = 0; q < qnum; q++) { + struct prestera_sdma_buf *head, *tail, *next, *prev; + struct prestera_rx_ring *ring = &sdma->rx_ring[q]; + + ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); + if (!ring->bufs) + return -ENOMEM; + + ring->next_rx = 0; + + tail = &ring->bufs[bnum - 1]; + head = &ring->bufs[0]; + next = head; + prev = next; + + do { + err = prestera_sdma_buf_init(sdma, next); + if (err) + return err; + + err = prestera_sdma_rx_skb_alloc(sdma, next); + if (err) + return err; + + prestera_sdma_rx_desc_init(sdma, next->desc, + next->buf_dma); + + prestera_sdma_rx_desc_set_next(sdma, prev->desc, + next->desc_dma); + + prev = next; + next++; + } while (prev != tail); + + /* join tail with head to make a circular list */ + prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma); + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q), + prestera_sdma_map(sdma, head->desc_dma)); + } + + /* make sure all rx descs are filled before enabling all rx queues */ + wmb(); + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, + GENMASK(7, 0)); + + return 0; +} + +static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc) +{ + desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT); + desc->word2 = 0; +} + +static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t next) +{ + desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); +} + +static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma, + struct prestera_sdma_desc *desc, + dma_addr_t buf, size_t len) +{ + u32 word = le32_to_cpu(desc->word2); + + u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16)); + + desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); + desc->word2 = cpu_to_le32(word); +} + +static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc) +{ + u32 word = le32_to_cpu(desc->word1); + + word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31; + + /* make sure everything is written before enable xmit */ + wmb(); + + desc->word1 = cpu_to_le32(word); +} + +static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf, + struct sk_buff *skb) +{ + struct device *dma_dev = sdma->sw->dev->dev; + dma_addr_t dma; + + dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) + return -ENOMEM; + + buf->buf_dma = dma; + buf->skb = skb; + + return 0; +} + +static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma, + struct prestera_sdma_buf *buf) +{ + struct device *dma_dev = sdma->sw->dev->dev; + + dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE); +} + +static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work) +{ + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + struct prestera_tx_ring *tx_ring; + struct prestera_sdma *sdma; + int b; + + sdma = container_of(work, struct prestera_sdma, tx_work); + + tx_ring = &sdma->tx_ring; + + for (b = 0; b < bnum; b++) { + struct prestera_sdma_buf *buf = &tx_ring->bufs[b]; + + if (!buf->is_used) + continue; + + if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc)) + continue; + + prestera_sdma_tx_buf_unmap(sdma, buf); + dev_consume_skb_any(buf->skb); + buf->skb = NULL; + + /* make sure everything is cleaned up */ + wmb(); + + buf->is_used = false; + } +} + +static int prestera_sdma_tx_init(struct prestera_sdma *sdma) +{ + struct prestera_sdma_buf *head, *tail, *next, *prev; + struct prestera_tx_ring *tx_ring = &sdma->tx_ring; + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + int err; + + INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn); + spin_lock_init(&sdma->tx_lock); + + tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); + if (!tx_ring->bufs) + return -ENOMEM; + + tail = &tx_ring->bufs[bnum - 1]; + head = &tx_ring->bufs[0]; + next = head; + prev = next; + + tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST; + tx_ring->burst = tx_ring->max_burst; + tx_ring->next_tx = 0; + + do { + err = prestera_sdma_buf_init(sdma, next); + if (err) + return err; + + next->is_used = false; + + prestera_sdma_tx_desc_init(sdma, next->desc); + + prestera_sdma_tx_desc_set_next(sdma, prev->desc, + next->desc_dma); + + prev = next; + next++; + } while (prev != tail); + + /* join tail with head to make a circular list */ + prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma); + + /* make sure descriptors are written */ + wmb(); + + prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG, + prestera_sdma_map(sdma, head->desc_dma)); + + return 0; +} + +static void prestera_sdma_tx_fini(struct prestera_sdma *sdma) +{ + struct prestera_tx_ring *ring = &sdma->tx_ring; + int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; + int b; + + cancel_work_sync(&sdma->tx_work); + + if (!ring->bufs) + return; + + for (b = 0; b < bnum; b++) { + struct prestera_sdma_buf *buf = &ring->bufs[b]; + + if (buf->desc) + dma_pool_free(sdma->desc_pool, buf->desc, + buf->desc_dma); + + if (!buf->skb) + continue; + + dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma, + buf->skb->len, DMA_TO_DEVICE); + + dev_consume_skb_any(buf->skb); + } +} + +static void prestera_rxtx_handle_event(struct prestera_switch *sw, + struct prestera_event *evt, + void *arg) +{ + struct prestera_sdma *sdma = arg; + + if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT) + return; + + prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0); + napi_schedule(&sdma->rx_napi); +} + +static int prestera_sdma_switch_init(struct prestera_switch *sw) +{ + struct prestera_sdma *sdma = &sw->rxtx->sdma; + struct device *dev = sw->dev->dev; + struct prestera_rxtx_params p; + int err; + + p.use_sdma = true; + + err = prestera_hw_rxtx_init(sw, &p); + if (err) { + dev_err(dev, "failed to init rxtx by hw\n"); + return err; + } + + sdma->dma_mask = dma_get_mask(dev); + sdma->map_addr = p.map_addr; + sdma->sw = sw; + + sdma->desc_pool = dma_pool_create("desc_pool", dev, + sizeof(struct prestera_sdma_desc), + 16, 0); + if (!sdma->desc_pool) + return -ENOMEM; + + err = prestera_sdma_rx_init(sdma); + if (err) { + dev_err(dev, "failed to init rx ring\n"); + goto err_rx_init; + } + + err = prestera_sdma_tx_init(sdma); + if (err) { + dev_err(dev, "failed to init tx ring\n"); + goto err_tx_init; + } + + err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX, + prestera_rxtx_handle_event, + sdma); + if (err) + goto err_evt_register; + + init_dummy_netdev(&sdma->napi_dev); + + netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll); + napi_enable(&sdma->rx_napi); + + return 0; + +err_evt_register: +err_tx_init: + prestera_sdma_tx_fini(sdma); +err_rx_init: + prestera_sdma_rx_fini(sdma); + + dma_pool_destroy(sdma->desc_pool); + return err; +} + +static void prestera_sdma_switch_fini(struct prestera_switch *sw) +{ + struct prestera_sdma *sdma = &sw->rxtx->sdma; + + napi_disable(&sdma->rx_napi); + netif_napi_del(&sdma->rx_napi); + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX, + prestera_rxtx_handle_event); + prestera_sdma_tx_fini(sdma); + prestera_sdma_rx_fini(sdma); + dma_pool_destroy(sdma->desc_pool); +} + +static bool prestera_sdma_is_ready(struct prestera_sdma *sdma) +{ + return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1); +} + +static int prestera_sdma_tx_wait(struct prestera_sdma *sdma, + struct prestera_tx_ring *tx_ring) +{ + int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst; + + do { + if (prestera_sdma_is_ready(sdma)) + return 0; + + udelay(1); + } while (--tx_wait_num); + + return -EBUSY; +} + +static void prestera_sdma_tx_start(struct prestera_sdma *sdma) +{ + prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1); + schedule_work(&sdma->tx_work); +} + +static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma, + struct sk_buff *skb) +{ + struct device *dma_dev = sdma->sw->dev->dev; + struct net_device *dev = skb->dev; + struct prestera_tx_ring *tx_ring; + struct prestera_sdma_buf *buf; + int err; + + spin_lock(&sdma->tx_lock); + + tx_ring = &sdma->tx_ring; + + buf = &tx_ring->bufs[tx_ring->next_tx]; + if (buf->is_used) { + schedule_work(&sdma->tx_work); + goto drop_skb; + } + + if (unlikely(eth_skb_pad(skb))) + goto drop_skb_nofree; + + err = prestera_sdma_tx_buf_map(sdma, buf, skb); + if (err) + goto drop_skb; + + prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len); + + dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len, + DMA_TO_DEVICE); + + if (tx_ring->burst) { + tx_ring->burst--; + } else { + tx_ring->burst = tx_ring->max_burst; + + err = prestera_sdma_tx_wait(sdma, tx_ring); + if (err) + goto drop_skb_unmap; + } + + tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q; + prestera_sdma_tx_desc_xmit(buf->desc); + buf->is_used = true; + + prestera_sdma_tx_start(sdma); + + goto tx_done; + +drop_skb_unmap: + prestera_sdma_tx_buf_unmap(sdma, buf); +drop_skb: + dev_consume_skb_any(skb); +drop_skb_nofree: + dev->stats.tx_dropped++; +tx_done: + spin_unlock(&sdma->tx_lock); + return NETDEV_TX_OK; +} + +int prestera_rxtx_switch_init(struct prestera_switch *sw) +{ + struct prestera_rxtx *rxtx; + int err; + + rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); + if (!rxtx) + return -ENOMEM; + + sw->rxtx = rxtx; + + err = prestera_sdma_switch_init(sw); + if (err) + kfree(rxtx); + + return err; +} + +void prestera_rxtx_switch_fini(struct prestera_switch *sw) +{ + prestera_sdma_switch_fini(sw); + kfree(sw->rxtx); +} + +int prestera_rxtx_port_init(struct prestera_port *port) +{ + port->dev->needed_headroom = PRESTERA_DSA_HLEN; + return 0; +} + +netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb) +{ + struct prestera_dsa dsa; + + dsa.hw_dev_num = port->dev_id; + dsa.port_num = port->hw_id; + + if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0) + return NET_XMIT_DROP; + + skb_push(skb, PRESTERA_DSA_HLEN); + memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN); + + if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0) + return NET_XMIT_DROP; + + return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h new file mode 100644 index 0000000000..882a1225c3 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_RXTX_H_ +#define _PRESTERA_RXTX_H_ + +#include <linux/netdevice.h> + +struct prestera_switch; +struct prestera_port; + +int prestera_rxtx_switch_init(struct prestera_switch *sw); +void prestera_rxtx_switch_fini(struct prestera_switch *sw); + +int prestera_rxtx_port_init(struct prestera_port *port); + +netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb); + +#endif /* _PRESTERA_RXTX_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c new file mode 100644 index 0000000000..1005182ce3 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_span.h" + +struct prestera_span_entry { + struct list_head list; + struct prestera_port *port; + refcount_t ref_count; + u8 id; +}; + +struct prestera_span { + struct prestera_switch *sw; + struct list_head entries; +}; + +static struct prestera_span_entry * +prestera_span_entry_create(struct prestera_port *port, u8 span_id) +{ + struct prestera_span_entry *entry; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + refcount_set(&entry->ref_count, 1); + entry->port = port; + entry->id = span_id; + list_add_tail(&entry->list, &port->sw->span->entries); + + return entry; +} + +static void prestera_span_entry_del(struct prestera_span_entry *entry) +{ + list_del(&entry->list); + kfree(entry); +} + +static struct prestera_span_entry * +prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id) +{ + struct prestera_span_entry *entry; + + list_for_each_entry(entry, &span->entries, list) { + if (entry->id == span_id) + return entry; + } + + return NULL; +} + +static struct prestera_span_entry * +prestera_span_entry_find_by_port(struct prestera_span *span, + struct prestera_port *port) +{ + struct prestera_span_entry *entry; + + list_for_each_entry(entry, &span->entries, list) { + if (entry->port == port) + return entry; + } + + return NULL; +} + +static int prestera_span_get(struct prestera_port *port, u8 *span_id) +{ + u8 new_span_id; + struct prestera_switch *sw = port->sw; + struct prestera_span_entry *entry; + int err; + + entry = prestera_span_entry_find_by_port(sw->span, port); + if (entry) { + refcount_inc(&entry->ref_count); + *span_id = entry->id; + return 0; + } + + err = prestera_hw_span_get(port, &new_span_id); + if (err) + return err; + + entry = prestera_span_entry_create(port, new_span_id); + if (IS_ERR(entry)) { + prestera_hw_span_release(sw, new_span_id); + return PTR_ERR(entry); + } + + *span_id = new_span_id; + return 0; +} + +static int prestera_span_put(struct prestera_switch *sw, u8 span_id) +{ + struct prestera_span_entry *entry; + int err; + + entry = prestera_span_entry_find_by_id(sw->span, span_id); + if (!entry) + return -ENOENT; + + if (!refcount_dec_and_test(&entry->ref_count)) + return 0; + + err = prestera_hw_span_release(sw, span_id); + if (err) + return err; + + prestera_span_entry_del(entry); + return 0; +} + +int prestera_span_rule_add(struct prestera_flow_block_binding *binding, + struct prestera_port *to_port, + bool ingress) +{ + struct prestera_switch *sw = binding->port->sw; + u8 span_id; + int err; + + if (binding->span_id != PRESTERA_SPAN_INVALID_ID) + /* port already in mirroring */ + return -EEXIST; + + err = prestera_span_get(to_port, &span_id); + if (err) + return err; + + err = prestera_hw_span_bind(binding->port, span_id, ingress); + if (err) { + prestera_span_put(sw, span_id); + return err; + } + + binding->span_id = span_id; + return 0; +} + +int prestera_span_rule_del(struct prestera_flow_block_binding *binding, + bool ingress) +{ + int err; + + if (binding->span_id == PRESTERA_SPAN_INVALID_ID) + return -ENOENT; + + err = prestera_hw_span_unbind(binding->port, ingress); + if (err) + return err; + + err = prestera_span_put(binding->port->sw, binding->span_id); + if (err) + return err; + + binding->span_id = PRESTERA_SPAN_INVALID_ID; + return 0; +} + +int prestera_span_init(struct prestera_switch *sw) +{ + struct prestera_span *span; + + span = kzalloc(sizeof(*span), GFP_KERNEL); + if (!span) + return -ENOMEM; + + INIT_LIST_HEAD(&span->entries); + + sw->span = span; + span->sw = sw; + + return 0; +} + +void prestera_span_fini(struct prestera_switch *sw) +{ + struct prestera_span *span = sw->span; + + WARN_ON(!list_empty(&span->entries)); + kfree(span); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h new file mode 100644 index 0000000000..493b68524b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_SPAN_H_ +#define _PRESTERA_SPAN_H_ + +#include <net/pkt_cls.h> + +#define PRESTERA_SPAN_INVALID_ID -1 + +struct prestera_port; +struct prestera_switch; +struct prestera_flow_block_binding; + +int prestera_span_init(struct prestera_switch *sw); +void prestera_span_fini(struct prestera_switch *sw); + +int prestera_span_rule_add(struct prestera_flow_block_binding *binding, + struct prestera_port *to_port, + bool ingress); +int prestera_span_rule_del(struct prestera_flow_block_binding *binding, + bool ingress); + +#endif /* _PRESTERA_SPAN_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c new file mode 100644 index 0000000000..e548cd3258 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -0,0 +1,1918 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ + +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <net/netevent.h> +#include <net/switchdev.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_switchdev.h" + +#define PRESTERA_VID_ALL (0xffff) + +#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000 +#define PRESTERA_MAX_AGEING_TIME_MS 1000000000 +#define PRESTERA_MIN_AGEING_TIME_MS 32000 + +struct prestera_fdb_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +struct prestera_switchdev { + struct prestera_switch *sw; + struct list_head bridge_list; + bool bridge_8021q_exists; + struct notifier_block swdev_nb_blk; + struct notifier_block swdev_nb; +}; + +struct prestera_bridge { + struct list_head head; + struct net_device *dev; + struct prestera_switchdev *swdev; + struct list_head port_list; + struct list_head br_mdb_entry_list; + bool mrouter_exist; + bool vlan_enabled; + bool multicast_enabled; + u16 bridge_id; +}; + +struct prestera_bridge_port { + struct list_head head; + struct net_device *dev; + struct prestera_bridge *bridge; + struct list_head vlan_list; + struct list_head br_mdb_port_list; + refcount_t ref_count; + unsigned long flags; + bool mrouter; + u8 stp_state; +}; + +struct prestera_bridge_vlan { + struct list_head head; + struct list_head port_vlan_list; + u16 vid; +}; + +struct prestera_port_vlan { + struct list_head br_vlan_head; + struct list_head port_head; + struct prestera_port *port; + struct prestera_bridge_port *br_port; + u16 vid; +}; + +struct prestera_br_mdb_port { + struct prestera_bridge_port *br_port; + struct list_head br_mdb_port_node; +}; + +/* Software representation of MDB table. */ +struct prestera_br_mdb_entry { + struct prestera_bridge *bridge; + struct prestera_mdb_entry *mdb; + struct list_head br_mdb_port_list; + struct list_head br_mdb_entry_node; + bool enabled; +}; + +static struct workqueue_struct *swdev_wq; + +static void prestera_bridge_port_put(struct prestera_bridge_port *br_port); + +static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, + u8 state); + +static struct prestera_bridge * +prestera_bridge_find(const struct prestera_switch *sw, + const struct net_device *br_dev) +{ + struct prestera_bridge *bridge; + + list_for_each_entry(bridge, &sw->swdev->bridge_list, head) + if (bridge->dev == br_dev) + return bridge; + + return NULL; +} + +static struct prestera_bridge_port * +__prestera_bridge_port_find(const struct prestera_bridge *bridge, + const struct net_device *brport_dev) +{ + struct prestera_bridge_port *br_port; + + list_for_each_entry(br_port, &bridge->port_list, head) + if (br_port->dev == brport_dev) + return br_port; + + return NULL; +} + +static struct prestera_bridge_port * +prestera_bridge_port_find(struct prestera_switch *sw, + struct net_device *brport_dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); + struct prestera_bridge *bridge; + + if (!br_dev) + return NULL; + + bridge = prestera_bridge_find(sw, br_dev); + if (!bridge) + return NULL; + + return __prestera_bridge_port_find(bridge, brport_dev); +} + +static void +prestera_br_port_flags_reset(struct prestera_bridge_port *br_port, + struct prestera_port *port) +{ + prestera_port_uc_flood_set(port, false); + prestera_port_mc_flood_set(port, false); + prestera_port_learning_set(port, false); + prestera_port_br_locked_set(port, false); +} + +static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port, + struct prestera_port *port) +{ + int err; + + err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD); + if (err) + goto err_out; + + err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD); + if (err) + goto err_out; + + err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING); + if (err) + goto err_out; + + err = prestera_port_br_locked_set(port, + br_port->flags & BR_PORT_LOCKED); + if (err) + goto err_out; + + return 0; + +err_out: + prestera_br_port_flags_reset(br_port, port); + return err; +} + +static struct prestera_bridge_vlan * +prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid) +{ + struct prestera_bridge_vlan *br_vlan; + + br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL); + if (!br_vlan) + return NULL; + + INIT_LIST_HEAD(&br_vlan->port_vlan_list); + br_vlan->vid = vid; + list_add(&br_vlan->head, &br_port->vlan_list); + + return br_vlan; +} + +static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan) +{ + list_del(&br_vlan->head); + WARN_ON(!list_empty(&br_vlan->port_vlan_list)); + kfree(br_vlan); +} + +static struct prestera_bridge_vlan * +prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid) +{ + struct prestera_bridge_vlan *br_vlan; + + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + if (br_vlan->vid == vid) + return br_vlan; + } + + return NULL; +} + +static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge, + u16 vid) +{ + struct prestera_bridge_port *br_port; + struct prestera_bridge_vlan *br_vlan; + int count = 0; + + list_for_each_entry(br_port, &bridge->port_list, head) { + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + if (br_vlan->vid == vid) { + count += 1; + break; + } + } + } + + return count; +} + +static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan) +{ + if (list_empty(&br_vlan->port_vlan_list)) + prestera_bridge_vlan_destroy(br_vlan); +} + +static struct prestera_port_vlan * +prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid) +{ + struct prestera_port_vlan *port_vlan; + + list_for_each_entry(port_vlan, &port->vlans_list, port_head) { + if (port_vlan->vid == vid) + return port_vlan; + } + + return NULL; +} + +static struct prestera_port_vlan * +prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged) +{ + struct prestera_port_vlan *port_vlan; + int err; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (port_vlan) + return ERR_PTR(-EEXIST); + + err = prestera_hw_vlan_port_set(port, vid, true, untagged); + if (err) + return ERR_PTR(err); + + port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL); + if (!port_vlan) { + err = -ENOMEM; + goto err_port_vlan_alloc; + } + + port_vlan->port = port; + port_vlan->vid = vid; + + list_add(&port_vlan->port_head, &port->vlans_list); + + return port_vlan; + +err_port_vlan_alloc: + prestera_hw_vlan_port_set(port, vid, false, false); + return ERR_PTR(err); +} + +static int prestera_fdb_add(struct prestera_port *port, + const unsigned char *mac, u16 vid, bool dynamic) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port), + mac, vid, dynamic); + + return prestera_hw_fdb_add(port, mac, vid, dynamic); +} + +static int prestera_fdb_del(struct prestera_port *port, + const unsigned char *mac, u16 vid) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port), + mac, vid); + else + return prestera_hw_fdb_del(port, mac, vid); +} + +static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port), + vid, mode); + else + return prestera_hw_fdb_flush_port_vlan(port, vid, mode); +} + +static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port), + mode); + else + return prestera_hw_fdb_flush_port(port, mode); +} + +static void +prestera_mdb_port_del(struct prestera_mdb_entry *mdb, + struct net_device *orig_dev) +{ + struct prestera_flood_domain *fl_domain = mdb->flood_domain; + struct prestera_flood_domain_port *flood_domain_port; + + flood_domain_port = prestera_flood_domain_port_find(fl_domain, + orig_dev, + mdb->vid); + if (flood_domain_port) + prestera_flood_domain_port_destroy(flood_domain_port); +} + +static void +prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb) +{ + struct prestera_bridge_port *br_port; + + if (list_empty(&br_mdb->br_mdb_port_list)) { + list_for_each_entry(br_port, &br_mdb->bridge->port_list, head) + prestera_mdb_port_del(br_mdb->mdb, br_port->dev); + + prestera_mdb_entry_destroy(br_mdb->mdb); + list_del(&br_mdb->br_mdb_entry_node); + kfree(br_mdb); + } +} + +static void +prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb, + struct prestera_bridge_port *br_port) +{ + struct prestera_br_mdb_port *br_mdb_port, *tmp; + + list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list, + br_mdb_port_node) { + if (br_mdb_port->br_port == br_port) { + list_del(&br_mdb_port->br_mdb_port_node); + kfree(br_mdb_port); + } + } +} + +static void +prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port) +{ + struct prestera_br_mdb_port *br_mdb_port, *tmp_port; + struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp; + struct prestera_bridge *br_dev = br_port->bridge; + + list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list, + br_mdb_entry_node) { + list_for_each_entry_safe(br_mdb_port, tmp_port, + &br_mdb->br_mdb_port_list, + br_mdb_port_node) { + prestera_mdb_port_del(br_mdb->mdb, + br_mdb_port->br_port->dev); + prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port); + } + prestera_br_mdb_entry_put(br_mdb); + } +} + +static void +prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan) +{ + u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC; + struct prestera_port *port = port_vlan->port; + struct prestera_bridge_vlan *br_vlan; + struct prestera_bridge_port *br_port; + bool last_port, last_vlan; + u16 vid = port_vlan->vid; + int port_count; + + br_port = port_vlan->br_port; + port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid); + br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); + + last_vlan = list_is_singular(&br_port->vlan_list); + last_port = port_count == 1; + + if (last_vlan) + prestera_fdb_flush_port(port, fdb_flush_mode); + else if (last_port) + prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode); + else + prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode); + + prestera_mdb_flush_bridge_port(br_port); + + list_del(&port_vlan->br_vlan_head); + prestera_bridge_vlan_put(br_vlan); + prestera_bridge_port_put(br_port); + port_vlan->br_port = NULL; +} + +static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan) +{ + struct prestera_port *port = port_vlan->port; + u16 vid = port_vlan->vid; + + if (port_vlan->br_port) + prestera_port_vlan_bridge_leave(port_vlan); + + prestera_hw_vlan_port_set(port, vid, false, false); + list_del(&port_vlan->port_head); + kfree(port_vlan); +} + +static struct prestera_bridge * +prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev) +{ + bool vlan_enabled = br_vlan_enabled(dev); + struct prestera_bridge *bridge; + u16 bridge_id; + int err; + + if (vlan_enabled && swdev->bridge_8021q_exists) { + netdev_err(dev, "Only one VLAN-aware bridge is supported\n"); + return ERR_PTR(-EINVAL); + } + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return ERR_PTR(-ENOMEM); + + if (vlan_enabled) { + swdev->bridge_8021q_exists = true; + } else { + err = prestera_hw_bridge_create(swdev->sw, &bridge_id); + if (err) { + kfree(bridge); + return ERR_PTR(err); + } + + bridge->bridge_id = bridge_id; + } + + bridge->vlan_enabled = vlan_enabled; + bridge->swdev = swdev; + bridge->dev = dev; + bridge->multicast_enabled = br_multicast_enabled(dev); + + INIT_LIST_HEAD(&bridge->port_list); + INIT_LIST_HEAD(&bridge->br_mdb_entry_list); + + list_add(&bridge->head, &swdev->bridge_list); + + return bridge; +} + +static void prestera_bridge_destroy(struct prestera_bridge *bridge) +{ + struct prestera_switchdev *swdev = bridge->swdev; + + list_del(&bridge->head); + + if (bridge->vlan_enabled) + swdev->bridge_8021q_exists = false; + else + prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id); + + WARN_ON(!list_empty(&bridge->br_mdb_entry_list)); + WARN_ON(!list_empty(&bridge->port_list)); + kfree(bridge); +} + +static void prestera_bridge_put(struct prestera_bridge *bridge) +{ + if (list_empty(&bridge->port_list)) + prestera_bridge_destroy(bridge); +} + +static +struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev, + const struct net_device *dev) +{ + struct prestera_bridge *bridge; + + list_for_each_entry(bridge, &swdev->bridge_list, head) + if (bridge->dev == dev) + return bridge; + + return NULL; +} + +static struct prestera_bridge_port * +__prestera_bridge_port_by_dev(struct prestera_bridge *bridge, + struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + list_for_each_entry(br_port, &bridge->port_list, head) { + if (br_port->dev == dev) + return br_port; + } + + return NULL; +} + +static int prestera_match_upper_bridge_dev(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + if (netif_is_bridge_master(dev)) + priv->data = dev; + + return 0; +} + +static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev) +{ + struct netdev_nested_priv priv = { }; + + netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev, + &priv); + return priv.data; +} + +static struct prestera_bridge_port * +prestera_bridge_port_by_dev(struct prestera_switchdev *swdev, + struct net_device *dev) +{ + struct net_device *br_dev = prestera_get_upper_bridge_dev(dev); + struct prestera_bridge *bridge; + + if (!br_dev) + return NULL; + + bridge = prestera_bridge_by_dev(swdev, br_dev); + if (!bridge) + return NULL; + + return __prestera_bridge_port_by_dev(bridge, dev); +} + +static struct prestera_bridge_port * +prestera_bridge_port_create(struct prestera_bridge *bridge, + struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); + if (!br_port) + return NULL; + + br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | + BR_MCAST_FLOOD; + br_port->stp_state = BR_STATE_DISABLED; + refcount_set(&br_port->ref_count, 1); + br_port->bridge = bridge; + br_port->dev = dev; + + INIT_LIST_HEAD(&br_port->vlan_list); + list_add(&br_port->head, &bridge->port_list); + INIT_LIST_HEAD(&br_port->br_mdb_port_list); + + return br_port; +} + +static void +prestera_bridge_port_destroy(struct prestera_bridge_port *br_port) +{ + list_del(&br_port->head); + WARN_ON(!list_empty(&br_port->vlan_list)); + WARN_ON(!list_empty(&br_port->br_mdb_port_list)); + kfree(br_port); +} + +static void prestera_bridge_port_get(struct prestera_bridge_port *br_port) +{ + refcount_inc(&br_port->ref_count); +} + +static void prestera_bridge_port_put(struct prestera_bridge_port *br_port) +{ + struct prestera_bridge *bridge = br_port->bridge; + + if (refcount_dec_and_test(&br_port->ref_count)) { + prestera_bridge_port_destroy(br_port); + prestera_bridge_put(bridge); + } +} + +static struct prestera_bridge_port * +prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev) +{ + struct prestera_bridge_port *br_port; + + br_port = __prestera_bridge_port_by_dev(bridge, dev); + if (br_port) { + prestera_bridge_port_get(br_port); + return br_port; + } + + br_port = prestera_bridge_port_create(bridge, dev); + if (!br_port) + return ERR_PTR(-ENOMEM); + + return br_port; +} + +static int +prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + struct prestera_bridge *bridge = br_port->bridge; + int err; + + err = prestera_hw_bridge_port_add(port, bridge->bridge_id); + if (err) + return err; + + err = prestera_br_port_flags_set(br_port, port); + if (err) + goto err_flags2port_set; + + return 0; + +err_flags2port_set: + prestera_hw_bridge_port_delete(port, bridge->bridge_id); + + return err; +} + +int prestera_bridge_port_join(struct net_device *br_dev, + struct prestera_port *port, + struct netlink_ext_ack *extack) +{ + struct prestera_switchdev *swdev = port->sw->swdev; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + int err; + + bridge = prestera_bridge_by_dev(swdev, br_dev); + if (!bridge) { + bridge = prestera_bridge_create(swdev, br_dev); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + } + + br_port = prestera_bridge_port_add(bridge, port->dev); + if (IS_ERR(br_port)) { + prestera_bridge_put(bridge); + return PTR_ERR(br_port); + } + + err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, + NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; + + if (bridge->vlan_enabled) + return 0; + + err = prestera_bridge_1d_port_join(br_port); + if (err) + goto err_port_join; + + return 0; + +err_port_join: + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); +err_switchdev_offload: + prestera_bridge_port_put(br_port); + return err; +} + +static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + + prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); + prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID); +} + +static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = netdev_priv(br_port->dev); + + prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); + prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id); +} + +static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, + u8 state) +{ + u8 hw_state = state; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = PRESTERA_STP_DISABLED; + break; + + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + hw_state = PRESTERA_STP_BLOCK_LISTEN; + break; + + case BR_STATE_LEARNING: + hw_state = PRESTERA_STP_LEARN; + break; + + case BR_STATE_FORWARDING: + hw_state = PRESTERA_STP_FORWARD; + break; + + default: + return -EINVAL; + } + + return prestera_hw_vlan_port_stp_set(port, vid, hw_state); +} + +void prestera_bridge_port_leave(struct net_device *br_dev, + struct prestera_port *port) +{ + struct prestera_switchdev *swdev = port->sw->swdev; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + + bridge = prestera_bridge_by_dev(swdev, br_dev); + if (!bridge) + return; + + br_port = __prestera_bridge_port_by_dev(bridge, port->dev); + if (!br_port) + return; + + bridge = br_port->bridge; + + if (bridge->vlan_enabled) + prestera_bridge_1q_port_leave(br_port); + else + prestera_bridge_1d_port_leave(br_port); + + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); + + prestera_mdb_flush_bridge_port(br_port); + + prestera_br_port_flags_reset(br_port, port); + prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); + prestera_bridge_port_put(br_port); +} + +static int prestera_port_attr_br_flags_set(struct prestera_port *port, + struct net_device *dev, + struct switchdev_brport_flags flags) +{ + struct prestera_bridge_port *br_port; + + br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); + if (!br_port) + return 0; + + br_port->flags &= ~flags.mask; + br_port->flags |= flags.val & flags.mask; + return prestera_br_port_flags_set(br_port, port); +} + +static int prestera_port_attr_br_ageing_set(struct prestera_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies); + struct prestera_switch *sw = port->sw; + + if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS || + ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS) + return -ERANGE; + + return prestera_hw_switch_ageing_set(sw, ageing_time_ms); +} + +static int prestera_port_attr_br_vlan_set(struct prestera_port *port, + struct net_device *dev, + bool vlan_enabled) +{ + struct prestera_switch *sw = port->sw; + struct prestera_bridge *bridge; + + bridge = prestera_bridge_by_dev(sw->swdev, dev); + if (WARN_ON(!bridge)) + return -EINVAL; + + if (bridge->vlan_enabled == vlan_enabled) + return 0; + + netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n"); + + return -EINVAL; +} + +static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port, + struct prestera_bridge_vlan *br_vlan, + u8 state) +{ + struct prestera_port_vlan *port_vlan; + + list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) { + if (port_vlan->port != port) + continue; + + return prestera_port_vid_stp_set(port, br_vlan->vid, state); + } + + return 0; +} + +static int prestera_port_attr_stp_state_set(struct prestera_port *port, + struct net_device *dev, + u8 state) +{ + struct prestera_bridge_port *br_port; + struct prestera_bridge_vlan *br_vlan; + int err; + u16 vid; + + br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); + if (!br_port) + return 0; + + if (!br_port->bridge->vlan_enabled) { + vid = br_port->bridge->bridge_id; + err = prestera_port_vid_stp_set(port, vid, state); + if (err) + goto err_port_stp_set; + } else { + list_for_each_entry(br_vlan, &br_port->vlan_list, head) { + err = prestera_port_bridge_vlan_stp_set(port, br_vlan, + state); + if (err) + goto err_port_vlan_stp_set; + } + } + + br_port->stp_state = state; + + return 0; + +err_port_vlan_stp_set: + list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head) + prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state); + return err; + +err_port_stp_set: + prestera_port_vid_stp_set(port, vid, br_port->stp_state); + + return err; +} + +static int +prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port, + bool enabled) +{ + struct prestera_port *pr_port; + struct prestera_switch *sw; + u16 lag_id; + int err; + + pr_port = prestera_port_dev_lower_find(br_port->dev); + if (!pr_port) + return 0; + + sw = pr_port->sw; + err = prestera_lag_id(sw, br_port->dev, &lag_id); + if (err) + return err; + + list_for_each_entry(pr_port, &sw->port_list, list) { + if (pr_port->lag->lag_id == lag_id) { + err = prestera_port_mc_flood_set(pr_port, enabled); + if (err) + return err; + } + } + + return 0; +} + +static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev) +{ + struct prestera_bridge_port *br_port; + struct prestera_port *port; + bool enabled; + int err; + + /* if mrouter exists: + * - make sure every mrouter receives unreg mcast traffic; + * if mrouter doesn't exists: + * - make sure every port receives unreg mcast traffic; + */ + list_for_each_entry(br_port, &br_dev->port_list, head) { + if (br_dev->multicast_enabled && br_dev->mrouter_exist) + enabled = br_port->mrouter; + else + enabled = br_port->flags & BR_MCAST_FLOOD; + + if (netif_is_lag_master(br_port->dev)) { + err = prestera_br_port_lag_mdb_mc_enable_sync(br_port, + enabled); + if (err) + return err; + continue; + } + + port = prestera_port_dev_lower_find(br_port->dev); + if (!port) + continue; + + err = prestera_port_mc_flood_set(port, enabled); + if (err) + return err; + } + + return 0; +} + +static bool +prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb, + struct net_device *orig_dev) +{ + struct prestera_br_mdb_port *tmp_port; + + list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list, + br_mdb_port_node) + if (tmp_port->br_port->dev == orig_dev) + return true; + + return false; +} + +static int +prestera_mdb_port_add(struct prestera_mdb_entry *mdb, + struct net_device *orig_dev, + const unsigned char addr[ETH_ALEN], u16 vid) +{ + struct prestera_flood_domain *flood_domain = mdb->flood_domain; + int err; + + if (!prestera_flood_domain_port_find(flood_domain, + orig_dev, vid)) { + err = prestera_flood_domain_port_create(flood_domain, orig_dev, + vid); + if (err) + return err; + } + + return 0; +} + +/* Sync bridge mdb (software table) with HW table (if MC is enabled). */ +static int prestera_br_mdb_sync(struct prestera_bridge *br_dev) +{ + struct prestera_br_mdb_port *br_mdb_port; + struct prestera_bridge_port *br_port; + struct prestera_br_mdb_entry *br_mdb; + struct prestera_mdb_entry *mdb; + struct prestera_port *pr_port; + int err = 0; + + if (!br_dev->multicast_enabled) + return 0; + + list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, + br_mdb_entry_node) { + mdb = br_mdb->mdb; + /* Make sure every port that explicitly been added to the mdb + * joins the specified group. + */ + list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list, + br_mdb_port_node) { + br_port = br_mdb_port->br_port; + pr_port = prestera_port_dev_lower_find(br_port->dev); + + /* Match only mdb and br_mdb ports that belong to the + * same broadcast domain. + */ + if (br_dev->vlan_enabled && + !prestera_port_vlan_by_vid(pr_port, + mdb->vid)) + continue; + + /* If port is not in MDB or there's no Mrouter + * clear HW mdb. + */ + if (prestera_br_mdb_port_is_member(br_mdb, + br_mdb_port->br_port->dev) && + br_dev->mrouter_exist) + err = prestera_mdb_port_add(mdb, br_port->dev, + mdb->addr, + mdb->vid); + else + prestera_mdb_port_del(mdb, br_port->dev); + + if (err) + return err; + } + + /* Make sure that every mrouter port joins every MC group int + * broadcast domain. If it's not an mrouter - it should leave + */ + list_for_each_entry(br_port, &br_dev->port_list, head) { + pr_port = prestera_port_dev_lower_find(br_port->dev); + + /* Make sure mrouter woudln't receive traffci from + * another broadcast domain (e.g. from a vlan, which + * mrouter port is not a member of). + */ + if (br_dev->vlan_enabled && + !prestera_port_vlan_by_vid(pr_port, + mdb->vid)) + continue; + + if (br_port->mrouter) { + err = prestera_mdb_port_add(mdb, br_port->dev, + mdb->addr, + mdb->vid); + if (err) + return err; + } else if (!br_port->mrouter && + !prestera_br_mdb_port_is_member + (br_mdb, br_port->dev)) { + prestera_mdb_port_del(mdb, br_port->dev); + } + } + } + + return 0; +} + +static int +prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable) +{ + int err; + + if (enable != br_mdb->enabled) { + if (enable) + err = prestera_hw_mdb_create(br_mdb->mdb); + else + err = prestera_hw_mdb_destroy(br_mdb->mdb); + + if (err) + return err; + + br_mdb->enabled = enable; + } + + return 0; +} + +static int +prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable) +{ + struct prestera_br_mdb_entry *br_mdb; + int err; + + list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, + br_mdb_entry_node) { + err = prestera_mdb_enable_set(br_mdb, enable); + if (err) + return err; + } + + return 0; +} + +static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port, + struct net_device *orig_dev, + bool mc_disabled) +{ + struct prestera_switch *sw = port->sw; + struct prestera_bridge *br_dev; + + br_dev = prestera_bridge_find(sw, orig_dev); + if (!br_dev) + return 0; + + br_dev->multicast_enabled = !mc_disabled; + + /* There's no point in enabling mdb back if router is missing. */ + WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled && + br_dev->mrouter_exist)); + + WARN_ON(prestera_br_mdb_sync(br_dev)); + + WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev)); + + return 0; +} + +static bool +prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev) +{ + struct prestera_bridge_port *br_port; + + list_for_each_entry(br_port, &br_dev->port_list, head) + if (br_port->mrouter) + return true; + + return false; +} + +static int +prestera_port_attr_mrouter_set(struct prestera_port *port, + struct net_device *orig_dev, + bool is_port_mrouter) +{ + struct prestera_bridge_port *br_port; + struct prestera_bridge *br_dev; + + br_port = prestera_bridge_port_find(port->sw, orig_dev); + if (!br_port) + return 0; + + br_dev = br_port->bridge; + br_port->mrouter = is_port_mrouter; + + br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev); + + /* Enable MDB processing if both mrouter exists and mc is enabled. + * In case if MC enabled, but there is no mrouter, device would flood + * all multicast traffic (even if MDB table is not empty) with the use + * of bridge's flood capabilities (without the use of flood_domain). + */ + WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled && + br_dev->mrouter_exist)); + + WARN_ON(prestera_br_mdb_sync(br_dev)); + + WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev)); + + return 0; +} + +static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = prestera_port_attr_stp_state_set(port, attr->orig_dev, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + if (attr->u.brport_flags.mask & + ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED)) + err = -EINVAL; + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = prestera_port_attr_br_flags_set(port, attr->orig_dev, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = prestera_port_attr_br_ageing_set(port, + attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = prestera_port_attr_br_vlan_set(port, attr->orig_dev, + attr->u.vlan_filtering); + break; + case SWITCHDEV_ATTR_ID_PORT_MROUTER: + err = prestera_port_attr_mrouter_set(port, attr->orig_dev, + attr->u.mrouter); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev, + attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static void +prestera_fdb_offload_notify(struct prestera_port *port, + struct switchdev_notifier_fdb_info *info) +{ + struct switchdev_notifier_fdb_info send_info = {}; + + send_info.addr = info->addr; + send_info.vid = info->vid; + send_info.offloaded = true; + + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev, + &send_info.info, NULL); +} + +static int prestera_port_fdb_set(struct prestera_port *port, + struct switchdev_notifier_fdb_info *fdb_info, + bool adding) +{ + struct prestera_switch *sw = port->sw; + struct prestera_bridge_port *br_port; + struct prestera_bridge *bridge; + int err; + u16 vid; + + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); + if (!br_port) + return -EINVAL; + + bridge = br_port->bridge; + + if (bridge->vlan_enabled) + vid = fdb_info->vid; + else + vid = bridge->bridge_id; + + if (adding) + err = prestera_fdb_add(port, fdb_info->addr, vid, false); + else + err = prestera_fdb_del(port, fdb_info->addr, vid); + + return err; +} + +static void prestera_fdb_event_work(struct work_struct *work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct prestera_fdb_event_work *swdev_work; + struct prestera_port *port; + struct net_device *dev; + int err; + + swdev_work = container_of(work, struct prestera_fdb_event_work, work); + dev = swdev_work->dev; + + rtnl_lock(); + + port = prestera_port_dev_lower_find(dev); + if (!port) + goto out_unlock; + + switch (swdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb_info = &swdev_work->fdb_info; + if (!fdb_info->added_by_user || fdb_info->is_local) + break; + + err = prestera_port_fdb_set(port, fdb_info, true); + if (err) + break; + + prestera_fdb_offload_notify(port, fdb_info); + break; + + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = &swdev_work->fdb_info; + prestera_port_fdb_set(port, fdb_info, false); + break; + } + +out_unlock: + rtnl_unlock(); + + kfree(swdev_work->fdb_info.addr); + kfree(swdev_work); + dev_put(dev); +} + +static int prestera_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct prestera_fdb_event_work *swdev_work; + struct net_device *upper; + int err; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(dev, ptr, + prestera_netdev_check, + prestera_port_obj_attr_set); + return notifier_from_errno(err); + } + + if (!prestera_netdev_check(dev)) + return NOTIFY_DONE; + + upper = netdev_master_upper_dev_get_rcu(dev); + if (!upper) + return NOTIFY_DONE; + + if (!netif_is_bridge_master(upper)) + return NOTIFY_DONE; + + swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC); + if (!swdev_work) + return NOTIFY_BAD; + + swdev_work->event = event; + swdev_work->dev = dev; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + + INIT_WORK(&swdev_work->work, prestera_fdb_event_work); + memcpy(&swdev_work->fdb_info, ptr, + sizeof(swdev_work->fdb_info)); + + swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!swdev_work->fdb_info.addr) + goto out_bad; + + ether_addr_copy((u8 *)swdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(dev); + break; + + default: + kfree(swdev_work); + return NOTIFY_DONE; + } + + queue_work(swdev_wq, &swdev_work->work); + return NOTIFY_DONE; + +out_bad: + kfree(swdev_work); + return NOTIFY_BAD; +} + +static int +prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan, + struct prestera_bridge_port *br_port) +{ + struct prestera_port *port = port_vlan->port; + struct prestera_bridge_vlan *br_vlan; + u16 vid = port_vlan->vid; + int err; + + if (port_vlan->br_port) + return 0; + + err = prestera_br_port_flags_set(br_port, port); + if (err) + goto err_flags2port_set; + + err = prestera_port_vid_stp_set(port, vid, br_port->stp_state); + if (err) + goto err_port_vid_stp_set; + + br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); + if (!br_vlan) { + br_vlan = prestera_bridge_vlan_create(br_port, vid); + if (!br_vlan) { + err = -ENOMEM; + goto err_bridge_vlan_get; + } + } + + list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list); + + prestera_bridge_port_get(br_port); + port_vlan->br_port = br_port; + + return 0; + +err_bridge_vlan_get: + prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING); +err_port_vid_stp_set: + prestera_br_port_flags_reset(br_port, port); +err_flags2port_set: + return err; +} + +static int +prestera_bridge_port_vlan_add(struct prestera_port *port, + struct prestera_bridge_port *br_port, + u16 vid, bool is_untagged, bool is_pvid, + struct netlink_ext_ack *extack) +{ + struct prestera_port_vlan *port_vlan; + u16 old_pvid = port->pvid; + u16 pvid; + int err; + + if (is_pvid) + pvid = vid; + else + pvid = port->pvid == vid ? 0 : port->pvid; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (port_vlan && port_vlan->br_port != br_port) + return -EEXIST; + + if (!port_vlan) { + port_vlan = prestera_port_vlan_create(port, vid, is_untagged); + if (IS_ERR(port_vlan)) + return PTR_ERR(port_vlan); + } else { + err = prestera_hw_vlan_port_set(port, vid, true, is_untagged); + if (err) + goto err_port_vlan_set; + } + + err = prestera_port_pvid_set(port, pvid); + if (err) + goto err_port_pvid_set; + + err = prestera_port_vlan_bridge_join(port_vlan, br_port); + if (err) + goto err_port_vlan_bridge_join; + + return 0; + +err_port_vlan_bridge_join: + prestera_port_pvid_set(port, old_pvid); +err_port_pvid_set: + prestera_hw_vlan_port_set(port, vid, false, false); +err_port_vlan_set: + prestera_port_vlan_destroy(port_vlan); + + return err; +} + +static void +prestera_bridge_port_vlan_del(struct prestera_port *port, + struct prestera_bridge_port *br_port, u16 vid) +{ + u16 pvid = port->pvid == vid ? 0 : port->pvid; + struct prestera_port_vlan *port_vlan; + + port_vlan = prestera_port_vlan_by_vid(port, vid); + if (WARN_ON(!port_vlan)) + return; + + prestera_port_vlan_bridge_leave(port_vlan); + prestera_port_pvid_set(port, pvid); + prestera_port_vlan_destroy(port_vlan); +} + +static int prestera_port_vlans_add(struct prestera_port *port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct net_device *orig_dev = vlan->obj.orig_dev; + struct prestera_bridge_port *br_port; + struct prestera_switch *sw = port->sw; + struct prestera_bridge *bridge; + + if (netif_is_bridge_master(orig_dev)) + return 0; + + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); + if (WARN_ON(!br_port)) + return -EINVAL; + + bridge = br_port->bridge; + if (!bridge->vlan_enabled) + return 0; + + return prestera_bridge_port_vlan_add(port, br_port, + vlan->vid, flag_untagged, + flag_pvid, extack); +} + +static struct prestera_br_mdb_entry * +prestera_br_mdb_entry_create(struct prestera_switch *sw, + struct prestera_bridge *br_dev, + const unsigned char *addr, u16 vid) +{ + struct prestera_br_mdb_entry *br_mdb_entry; + struct prestera_mdb_entry *mdb_entry; + + br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL); + if (!br_mdb_entry) + return NULL; + + mdb_entry = prestera_mdb_entry_create(sw, addr, vid); + if (!mdb_entry) + goto err_mdb_alloc; + + br_mdb_entry->mdb = mdb_entry; + br_mdb_entry->bridge = br_dev; + br_mdb_entry->enabled = true; + INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list); + + list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list); + + return br_mdb_entry; + +err_mdb_alloc: + kfree(br_mdb_entry); + return NULL; +} + +static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb, + struct prestera_bridge_port *br_port) +{ + struct prestera_br_mdb_port *br_mdb_port; + + list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list, + br_mdb_port_node) + if (br_mdb_port->br_port == br_port) + return 0; + + br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL); + if (!br_mdb_port) + return -ENOMEM; + + br_mdb_port->br_port = br_port; + list_add(&br_mdb_port->br_mdb_port_node, + &br_mdb->br_mdb_port_list); + + return 0; +} + +static struct prestera_br_mdb_entry * +prestera_br_mdb_entry_find(struct prestera_bridge *br_dev, + const unsigned char *addr, u16 vid) +{ + struct prestera_br_mdb_entry *br_mdb; + + list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, + br_mdb_entry_node) + if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) && + vid == br_mdb->mdb->vid) + return br_mdb; + + return NULL; +} + +static struct prestera_br_mdb_entry * +prestera_br_mdb_entry_get(struct prestera_switch *sw, + struct prestera_bridge *br_dev, + const unsigned char *addr, u16 vid) +{ + struct prestera_br_mdb_entry *br_mdb; + + br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid); + if (br_mdb) + return br_mdb; + + return prestera_br_mdb_entry_create(sw, br_dev, addr, vid); +} + +static int +prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb) +{ + struct prestera_br_mdb_entry *br_mdb; + struct prestera_bridge_port *br_port; + struct prestera_bridge *br_dev; + struct prestera_switch *sw; + struct prestera_port *port; + int err; + + sw = prestera_switch_get(mdb->obj.orig_dev); + port = prestera_port_dev_lower_find(mdb->obj.orig_dev); + + br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev); + if (!br_port) + return 0; + + br_dev = br_port->bridge; + + if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid)) + return 0; + + if (mdb->vid) + br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0], + mdb->vid); + else + br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0], + br_dev->bridge_id); + + if (!br_mdb) + return -ENOMEM; + + /* Make sure newly allocated MDB entry gets disabled if either MC is + * disabled, or the mrouter does not exist. + */ + WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled && + br_dev->mrouter_exist)); + + err = prestera_br_mdb_port_add(br_mdb, br_port); + if (err) { + prestera_br_mdb_entry_put(br_mdb); + return err; + } + + err = prestera_br_mdb_sync(br_dev); + if (err) + return err; + + return 0; +} + +static int prestera_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(dev); + const struct switchdev_obj_port_vlan *vlan; + const struct switchdev_obj_port_mdb *mdb; + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + return prestera_port_vlans_add(port, vlan, extack); + case SWITCHDEV_OBJ_ID_PORT_MDB: + mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + err = prestera_mdb_port_addr_obj_add(mdb); + break; + case SWITCHDEV_OBJ_ID_HOST_MDB: + fallthrough; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prestera_port_vlans_del(struct prestera_port *port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct net_device *orig_dev = vlan->obj.orig_dev; + struct prestera_bridge_port *br_port; + struct prestera_switch *sw = port->sw; + + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); + if (WARN_ON(!br_port)) + return -EINVAL; + + if (!br_port->bridge->vlan_enabled) + return 0; + + prestera_bridge_port_vlan_del(port, br_port, vlan->vid); + + return 0; +} + +static int +prestera_mdb_port_addr_obj_del(struct prestera_port *port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct prestera_br_mdb_entry *br_mdb; + struct prestera_bridge_port *br_port; + struct prestera_bridge *br_dev; + int err; + + /* Bridge port no longer exists - and so does this MDB entry */ + br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev); + if (!br_port) + return 0; + + /* Removing MDB with non-existing VLAN - not supported; */ + if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid)) + return 0; + + br_dev = br_port->bridge; + + if (br_port->bridge->vlan_enabled) + br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0], + mdb->vid); + else + br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0], + br_port->bridge->bridge_id); + + if (!br_mdb) + return 0; + + /* Since there might be a situation that this port was the last in the + * MDB group, we have to both remove this port from software and HW MDB, + * sync MDB table, and then destroy software MDB (if needed). + */ + prestera_br_mdb_port_del(br_mdb, br_port); + + prestera_br_mdb_entry_put(br_mdb); + + err = prestera_br_mdb_sync(br_dev); + if (err) + return err; + + return 0; +} + +static int prestera_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct prestera_port *port = netdev_priv(dev); + const struct switchdev_obj_port_mdb *mdb; + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj)); + case SWITCHDEV_OBJ_ID_PORT_MDB: + mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + err = prestera_mdb_port_addr_obj_del(port, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prestera_switchdev_blk_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + prestera_netdev_check, + prestera_port_obj_add); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + prestera_netdev_check, + prestera_port_obj_del); + break; + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + prestera_netdev_check, + prestera_port_obj_attr_set); + break; + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(err); +} + +static void prestera_fdb_event(struct prestera_switch *sw, + struct prestera_event *evt, void *arg) +{ + struct switchdev_notifier_fdb_info info = {}; + struct net_device *dev = NULL; + struct prestera_port *port; + struct prestera_lag *lag; + + switch (evt->fdb_evt.type) { + case PRESTERA_FDB_ENTRY_TYPE_REG_PORT: + port = prestera_find_port(sw, evt->fdb_evt.dest.port_id); + if (port) + dev = port->dev; + break; + case PRESTERA_FDB_ENTRY_TYPE_LAG: + lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id); + if (lag) + dev = lag->dev; + break; + default: + return; + } + + if (!dev) + return; + + info.addr = evt->fdb_evt.data.mac; + info.vid = evt->fdb_evt.vid; + info.offloaded = true; + + rtnl_lock(); + + switch (evt->id) { + case PRESTERA_FDB_EVENT_LEARNED: + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + dev, &info.info, NULL); + break; + case PRESTERA_FDB_EVENT_AGED: + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + dev, &info.info, NULL); + break; + } + + rtnl_unlock(); +} + +static int prestera_fdb_init(struct prestera_switch *sw) +{ + int err; + + err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event, NULL); + if (err) + return err; + + err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS); + if (err) + goto err_ageing_set; + + return 0; + +err_ageing_set: + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event); + return err; +} + +static void prestera_fdb_fini(struct prestera_switch *sw) +{ + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, + prestera_fdb_event); +} + +static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev) +{ + int err; + + swdev->swdev_nb.notifier_call = prestera_switchdev_event; + err = register_switchdev_notifier(&swdev->swdev_nb); + if (err) + goto err_register_swdev_notifier; + + swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event; + err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk); + if (err) + goto err_register_blk_swdev_notifier; + + return 0; + +err_register_blk_swdev_notifier: + unregister_switchdev_notifier(&swdev->swdev_nb); +err_register_swdev_notifier: + destroy_workqueue(swdev_wq); + return err; +} + +static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev) +{ + unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk); + unregister_switchdev_notifier(&swdev->swdev_nb); +} + +int prestera_switchdev_init(struct prestera_switch *sw) +{ + struct prestera_switchdev *swdev; + int err; + + swdev = kzalloc(sizeof(*swdev), GFP_KERNEL); + if (!swdev) + return -ENOMEM; + + sw->swdev = swdev; + swdev->sw = sw; + + INIT_LIST_HEAD(&swdev->bridge_list); + + swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br"); + if (!swdev_wq) { + err = -ENOMEM; + goto err_alloc_wq; + } + + err = prestera_switchdev_handler_init(swdev); + if (err) + goto err_swdev_init; + + err = prestera_fdb_init(sw); + if (err) + goto err_fdb_init; + + return 0; + +err_fdb_init: +err_swdev_init: + destroy_workqueue(swdev_wq); +err_alloc_wq: + kfree(swdev); + + return err; +} + +void prestera_switchdev_fini(struct prestera_switch *sw) +{ + struct prestera_switchdev *swdev = sw->swdev; + + prestera_fdb_fini(sw); + prestera_switchdev_handler_fini(swdev); + destroy_workqueue(swdev_wq); + kfree(swdev); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h new file mode 100644 index 0000000000..0e93fda3d9 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_SWITCHDEV_H_ +#define _PRESTERA_SWITCHDEV_H_ + +int prestera_switchdev_init(struct prestera_switch *sw); +void prestera_switchdev_fini(struct prestera_switch *sw); + +int prestera_bridge_port_join(struct net_device *br_dev, + struct prestera_port *port, + struct netlink_ext_ack *extack); + +void prestera_bridge_port_leave(struct net_device *br_dev, + struct prestera_port *port); + +#endif /* _PRESTERA_SWITCHDEV_H_ */ |