summaryrefslogtreecommitdiffstats
path: root/drivers/net/dsa/sja1105
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/dsa/sja1105
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/dsa/sja1105')
-rw-r--r--drivers/net/dsa/sja1105/Kconfig51
-rw-r--r--drivers/net/dsa/sja1105/Makefile25
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h422
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c855
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c145
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c1413
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h41
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c629
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c542
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c3481
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c547
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c974
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h207
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c977
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c1952
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h548
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c897
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h104
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c802
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h74
20 files changed, 14686 insertions, 0 deletions
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
new file mode 100644
index 000000000..1291bba3f
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_SJA1105
+tristate "NXP SJA1105 Ethernet switch family support"
+ depends on NET_DSA && SPI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select NET_DSA_TAG_SJA1105
+ select PCS_XPCS
+ select PACKING
+ select CRC32
+ help
+ This is the driver for the NXP SJA1105 (5-port) and SJA1110 (10-port)
+ automotive Ethernet switch family. These are managed over an SPI
+ interface. Probing is handled based on OF bindings and so is the
+ linkage to PHYLINK. The driver supports the following revisions:
+ - SJA1105E (Gen. 1, No TT-Ethernet)
+ - SJA1105T (Gen. 1, TT-Ethernet)
+ - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
+ - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
+ - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
+ - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+ - SJA1110A (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 10 ports)
+ - SJA1110B (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 9 ports)
+ - SJA1110C (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 7 ports)
+ - SJA1110D (Gen. 3, SGMII, TT-Ethernet, no 100base-TX PHY, 7 ports)
+
+config NET_DSA_SJA1105_PTP
+ bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
+ depends on NET_DSA_SJA1105
+ depends on PTP_1588_CLOCK
+ help
+ This enables support for timestamping and PTP clock manipulations in
+ the SJA1105 DSA driver.
+
+config NET_DSA_SJA1105_TAS
+ bool "Support for the Time-Aware Scheduler on NXP SJA1105"
+ depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+ depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
+ help
+ This enables support for the TTEthernet-based egress scheduling
+ engine in the SJA1105 DSA driver, which is controlled using a
+ hardware offload of the tc-tqprio qdisc.
+
+config NET_DSA_SJA1105_VL
+ bool "Support for Virtual Links on NXP SJA1105"
+ depends on NET_DSA_SJA1105_TAS
+ help
+ This enables support for flow classification using capable devices
+ (SJA1105T, SJA1105Q, SJA1105S). The following actions are supported:
+ - redirect, trap, drop
+ - time-based ingress policing, via the tc-gate action
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
new file mode 100644
index 000000000..40d69e6c0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
+
+sja1105-objs := \
+ sja1105_spi.o \
+ sja1105_main.o \
+ sja1105_mdio.o \
+ sja1105_flower.o \
+ sja1105_ethtool.o \
+ sja1105_devlink.o \
+ sja1105_clocking.o \
+ sja1105_static_config.o \
+ sja1105_dynamic_config.o \
+
+ifdef CONFIG_NET_DSA_SJA1105_PTP
+sja1105-objs += sja1105_ptp.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_TAS
+sja1105-objs += sja1105_tas.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_VL
+sja1105-objs += sja1105_vl.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
new file mode 100644
index 000000000..06e0ebfe6
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_H
+#define _SJA1105_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
+#include <net/dsa.h>
+#include <linux/mutex.h>
+#include "sja1105_static_config.h"
+
+#define SJA1105ET_FDB_BIN_SIZE 4
+/* The hardware value is in multiples of 10 ms.
+ * The passed parameter is in multiples of 1 ms.
+ */
+#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
+#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
+
+enum sja1105_stats_area {
+ MAC,
+ HL1,
+ HL2,
+ ETHER,
+ __MAX_SJA1105_STATS_AREA,
+};
+
+/* Keeps the different addresses between E/T and P/Q/R/S */
+struct sja1105_regs {
+ u64 device_id;
+ u64 prod_id;
+ u64 status;
+ u64 port_control;
+ u64 rgu;
+ u64 vl_status;
+ u64 config;
+ u64 rmii_pll1;
+ u64 ptppinst;
+ u64 ptppindur;
+ u64 ptp_control;
+ u64 ptpclkval;
+ u64 ptpclkrate;
+ u64 ptpclkcorp;
+ u64 ptpsyncts;
+ u64 ptpschtm;
+ u64 ptpegr_ts[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_tx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_MAX_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_MAX_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 stats[__MAX_SJA1105_STATS_AREA][SJA1105_MAX_NUM_PORTS];
+ u64 mdio_100base_tx;
+ u64 mdio_100base_t1;
+ u64 pcs_base[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1105_mdio_private {
+ struct sja1105_private *priv;
+};
+
+enum {
+ SJA1105_SPEED_AUTO,
+ SJA1105_SPEED_10MBPS,
+ SJA1105_SPEED_100MBPS,
+ SJA1105_SPEED_1000MBPS,
+ SJA1105_SPEED_2500MBPS,
+ SJA1105_SPEED_MAX,
+};
+
+enum sja1105_internal_phy_t {
+ SJA1105_NO_PHY = 0,
+ SJA1105_PHY_BASE_TX,
+ SJA1105_PHY_BASE_T1,
+};
+
+struct sja1105_info {
+ u64 device_id;
+ /* Needed for distinction between P and R, and between Q and S
+ * (since the parts with/without SGMII share the same
+ * switch core and device_id)
+ */
+ u64 part_no;
+ /* E/T and P/Q/R/S have partial timestamps of different sizes.
+ * They must be reconstructed on both families anyway to get the full
+ * 64-bit values back.
+ */
+ int ptp_ts_bits;
+ /* Also SPI commands are of different sizes to retrieve
+ * the egress timestamps.
+ */
+ int ptpegr_ts_bytes;
+ int num_cbs_shapers;
+ int max_frame_mem;
+ int num_ports;
+ bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
+ enum dsa_tag_protocol tag_proto;
+ const struct sja1105_dynamic_table_ops *dyn_ops;
+ const struct sja1105_table_ops *static_ops;
+ const struct sja1105_regs *regs;
+ bool can_limit_mcast_flood;
+ int (*reset_cmd)(struct dsa_switch *ds);
+ int (*setup_rgmii_delay)(const void *ctx, int port);
+ /* Prototypes from include/net/dsa.h */
+ int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+ bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ int (*clocking_setup)(struct sja1105_private *priv);
+ int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
+ int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*disable_microcontroller)(struct sja1105_private *priv);
+ const char *name;
+ bool supports_mii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_sgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_2500basex[SJA1105_MAX_NUM_PORTS];
+ enum sja1105_internal_phy_t internal_phy[SJA1105_MAX_NUM_PORTS];
+ const u64 port_speed[SJA1105_SPEED_MAX];
+};
+
+enum sja1105_key_type {
+ SJA1105_KEY_BCAST,
+ SJA1105_KEY_TC,
+ SJA1105_KEY_VLAN_UNAWARE_VL,
+ SJA1105_KEY_VLAN_AWARE_VL,
+};
+
+struct sja1105_key {
+ enum sja1105_key_type type;
+
+ union {
+ /* SJA1105_KEY_TC */
+ struct {
+ int pcp;
+ } tc;
+
+ /* SJA1105_KEY_VLAN_UNAWARE_VL */
+ /* SJA1105_KEY_VLAN_AWARE_VL */
+ struct {
+ u64 dmac;
+ u16 vid;
+ u16 pcp;
+ } vl;
+ };
+};
+
+enum sja1105_rule_type {
+ SJA1105_RULE_BCAST_POLICER,
+ SJA1105_RULE_TC_POLICER,
+ SJA1105_RULE_VL,
+};
+
+enum sja1105_vl_type {
+ SJA1105_VL_NONCRITICAL,
+ SJA1105_VL_RATE_CONSTRAINED,
+ SJA1105_VL_TIME_TRIGGERED,
+};
+
+struct sja1105_rule {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned long port_mask;
+ struct sja1105_key key;
+ enum sja1105_rule_type type;
+
+ /* Action */
+ union {
+ /* SJA1105_RULE_BCAST_POLICER */
+ struct {
+ int sharindx;
+ } bcast_pol;
+
+ /* SJA1105_RULE_TC_POLICER */
+ struct {
+ int sharindx;
+ } tc_pol;
+
+ /* SJA1105_RULE_VL */
+ struct {
+ enum sja1105_vl_type type;
+ unsigned long destports;
+ int sharindx;
+ int maxlen;
+ int ipv;
+ u64 base_time;
+ u64 cycle_time;
+ int num_entries;
+ struct action_gate_entry *entries;
+ struct flow_stats stats;
+ } vl;
+ };
+};
+
+struct sja1105_flow_block {
+ struct list_head rules;
+ bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+ int num_virtual_links;
+};
+
+struct sja1105_private {
+ struct sja1105_static_config static_config;
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
+ bool fixed_link[SJA1105_MAX_NUM_PORTS];
+ unsigned long ucast_egress_floods;
+ unsigned long bcast_egress_floods;
+ unsigned long hwts_tx_en;
+ unsigned long hwts_rx_en;
+ const struct sja1105_info *info;
+ size_t max_xfer_len;
+ struct spi_device *spidev;
+ struct dsa_switch *ds;
+ u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
+ u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_flow_block flow_block;
+ /* Serializes transmission of management frames so that
+ * the switch doesn't confuse them with one another.
+ */
+ struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
+ /* PTP two-step TX timestamp ID, and its serialization lock */
+ spinlock_t ts_id_lock;
+ u8 ts_id;
+ /* Serializes access to the dynamic config interface */
+ struct mutex dynamic_config_lock;
+ struct devlink_region **regions;
+ struct sja1105_cbs_entry *cbs;
+ struct mii_bus *mdio_base_t1;
+ struct mii_bus *mdio_base_tx;
+ struct mii_bus *mdio_pcs;
+ struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_ptp_data ptp_data;
+ struct sja1105_tas_data tas_data;
+};
+
+#include "sja1105_dynamic_config.h"
+
+struct sja1105_spi_message {
+ u64 access;
+ u64 read_count;
+ u64 address;
+};
+
+/* From sja1105_main.c */
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+ SJA1105_BEST_EFFORT_POLICING,
+ SJA1105_VIRTUAL_LINKS,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct netlink_ext_ack *extack);
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+
+/* From sja1105_mdio.c */
+int sja1105_mdiobus_register(struct dsa_switch *ds);
+void sja1105_mdiobus_unregister(struct dsa_switch *ds);
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
+/* From sja1105_devlink.c */
+int sja1105_devlink_setup(struct dsa_switch *ds);
+void sja1105_devlink_teardown(struct dsa_switch *ds);
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
+/* From sja1105_spi.c */
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len);
+int sja1105_static_config_upload(struct sja1105_private *priv);
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited);
+
+extern const struct sja1105_info sja1105e_info;
+extern const struct sja1105_info sja1105t_info;
+extern const struct sja1105_info sja1105p_info;
+extern const struct sja1105_info sja1105q_info;
+extern const struct sja1105_info sja1105r_info;
+extern const struct sja1105_info sja1105s_info;
+extern const struct sja1105_info sja1110a_info;
+extern const struct sja1105_info sja1110b_info;
+extern const struct sja1105_info sja1110c_info;
+extern const struct sja1105_info sja1110d_info;
+
+/* From sja1105_clocking.c */
+
+typedef enum {
+ XMII_MAC = 0,
+ XMII_PHY = 1,
+} sja1105_mii_role_t;
+
+typedef enum {
+ XMII_MODE_MII = 0,
+ XMII_MODE_RMII = 1,
+ XMII_MODE_RGMII = 2,
+ XMII_MODE_SGMII = 3,
+} sja1105_phy_interface_t;
+
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
+int sja1110_setup_rgmii_delay(const void *ctx, int port);
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
+int sja1105_clocking_setup(struct sja1105_private *priv);
+int sja1110_disable_microcontroller(struct sja1105_private *priv);
+
+/* From sja1105_ethtool.c */
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data);
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* From sja1105_dynamic_config.c */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry);
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep);
+
+enum sja1105_iotag {
+ SJA1105_C_TAG = 0, /* Inner VLAN header */
+ SJA1105_S_TAG = 1, /* Outer VLAN header */
+};
+
+enum sja1110_vlan_type {
+ SJA1110_VLAN_INVALID = 0,
+ SJA1110_VLAN_C_TAG = 1, /* Single inner VLAN tag */
+ SJA1110_VLAN_S_TAG = 2, /* Single outer VLAN tag */
+ SJA1110_VLAN_D_TAG = 3, /* Double tagged, use outer tag for lookup */
+};
+
+enum sja1110_shaper_type {
+ SJA1110_LEAKY_BUCKET_SHAPER = 0,
+ SJA1110_CBS_SHAPER = 1,
+};
+
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+
+/* From sja1105_flower.c */
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+void sja1105_flower_setup(struct dsa_switch *ds);
+void sja1105_flower_teardown(struct dsa_switch *ds);
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
new file mode 100644
index 000000000..e3699f76f
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_CGU_CMD 4
+#define SJA1110_BASE_MCSS_CLK SJA1110_CGU_ADDR(0x70)
+#define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
+
+/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
+struct sja1105_cfg_pad_mii {
+ u64 d32_os;
+ u64 d32_ih;
+ u64 d32_ipud;
+ u64 d10_ih;
+ u64 d10_os;
+ u64 d10_ipud;
+ u64 ctrl_os;
+ u64 ctrl_ih;
+ u64 ctrl_ipud;
+ u64 clk_os;
+ u64 clk_ih;
+ u64 clk_ipud;
+};
+
+struct sja1105_cfg_pad_mii_id {
+ u64 rxc_stable_ovr;
+ u64 rxc_delay;
+ u64 rxc_bypass;
+ u64 rxc_pd;
+ u64 txc_stable_ovr;
+ u64 txc_delay;
+ u64 txc_bypass;
+ u64 txc_pd;
+};
+
+/* UM10944 Table 82.
+ * IDIV_0_C to IDIV_4_C control registers
+ * (addr. 10000Bh to 10000Fh)
+ */
+struct sja1105_cgu_idiv {
+ u64 clksrc;
+ u64 autoblock;
+ u64 idiv;
+ u64 pd;
+};
+
+/* PLL_1_C control register
+ *
+ * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
+ * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
+ */
+struct sja1105_cgu_pll_ctrl {
+ u64 pllclksrc;
+ u64 msel;
+ u64 autoblock;
+ u64 psel;
+ u64 direct;
+ u64 fbsel;
+ u64 bypass;
+ u64 pd;
+};
+
+struct sja1110_cgu_outclk {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+enum {
+ CLKSRC_MII0_TX_CLK = 0x00,
+ CLKSRC_MII0_RX_CLK = 0x01,
+ CLKSRC_MII1_TX_CLK = 0x02,
+ CLKSRC_MII1_RX_CLK = 0x03,
+ CLKSRC_MII2_TX_CLK = 0x04,
+ CLKSRC_MII2_RX_CLK = 0x05,
+ CLKSRC_MII3_TX_CLK = 0x06,
+ CLKSRC_MII3_RX_CLK = 0x07,
+ CLKSRC_MII4_TX_CLK = 0x08,
+ CLKSRC_MII4_RX_CLK = 0x09,
+ CLKSRC_PLL0 = 0x0B,
+ CLKSRC_PLL1 = 0x0E,
+ CLKSRC_IDIV0 = 0x11,
+ CLKSRC_IDIV1 = 0x12,
+ CLKSRC_IDIV2 = 0x13,
+ CLKSRC_IDIV3 = 0x14,
+ CLKSRC_IDIV4 = 0x15,
+};
+
+/* UM10944 Table 83.
+ * MIIx clock control registers 1 to 30
+ * (addresses 100013h to 100035h)
+ */
+struct sja1105_cgu_mii_ctrl {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
+ sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
+ bool enabled, int factor)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ struct sja1105_cgu_idiv idiv;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (enabled && factor != 1 && factor != 10) {
+ dev_err(dev, "idiv factor must be 1 or 10\n");
+ return -ERANGE;
+ }
+
+ /* Payload for packed_buf */
+ idiv.clksrc = 0x0A; /* 25MHz */
+ idiv.autoblock = 1; /* Block clk automatically */
+ idiv.idiv = factor - 1; /* Divide by 1 or 10 */
+ idiv.pd = enabled ? 0 : 1; /* Power down? */
+ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_mii_role_t role)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_tx_clk;
+ const int mac_clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+ const int phy_clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (role == XMII_MAC)
+ clksrc = mac_clk_sources[port];
+ else
+ clksrc = phy_clk_sources[port];
+
+ /* Payload for packed_buf */
+ mii_tx_clk.clksrc = clksrc;
+ mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_RX_CLK,
+ CLKSRC_MII1_RX_CLK,
+ CLKSRC_MII2_RX_CLK,
+ CLKSRC_MII3_RX_CLK,
+ CLKSRC_MII4_RX_CLK,
+ };
+
+ if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_rx_clk.clksrc = clk_sources[port];
+ mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_ext_tx_clk.clksrc = clk_sources[port];
+ mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_ext_rx_clk.clksrc = clk_sources[port];
+ mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring MII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* If role is MAC, disable IDIV
+ * If role is PHY, enable IDIV and configure for 1/1 divider
+ */
+ rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_TX_CLK_n
+ * * If role is MAC, select TX_CLK_n
+ * * If role is PHY, select IDIV_n
+ */
+ rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_RX_CLK_n
+ * Select RX_CLK_n
+ */
+ rc = sja1105_cgu_mii_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ if (role == XMII_PHY) {
+ /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
+
+ /* Configure CLKSRC of EXT_TX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of EXT_RX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
+ sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
+ int port, u64 speed)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl txc;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
+ clksrc = CLKSRC_PLL0;
+ } else {
+ int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
+ CLKSRC_IDIV3, CLKSRC_IDIV4};
+ clksrc = clk_sources[port];
+ }
+
+ /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
+ txc.clksrc = clksrc;
+ /* Autoblock clk while changing clksrc */
+ txc.autoblock = 1;
+ /* Power Down off => enabled */
+ txc.pd = 0;
+ sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+/* AGU */
+static void
+sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
+ sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
+ sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
+ sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
+ sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
+}
+
+static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_tx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload */
+ pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
+ pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
+ pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
+ pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
+ pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_rx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload */
+ pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage weak pull-up/down: */
+ /* pull-down */
+ pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
+ /* medium noise/fast speed (default) */
+ pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
+ /* plain input (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+static void
+sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+ u64 range = 4;
+
+ /* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
+ * 0 = 2.5MHz
+ * 1 = 25MHz
+ * 2 = 50MHz
+ * 3 = 125MHz
+ * 4 = Automatically determined by port speed.
+ * There's no point in defining a structure different than the one for
+ * SJA1105, so just hardcode the frequency range to automatic, just as
+ * before.
+ */
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
+ sja1105_packing(buf, &range, 20, 18, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
+ sja1105_packing(buf, &range, 4, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+/* The RGMII delay setup procedure is 2-step and gets called upon each
+ * .phylink_mac_config. Both are strategic.
+ * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
+ * with recovering from a frequency change of the link partner's RGMII clock.
+ * The easiest way to recover from this is to temporarily power down the TDL,
+ * as it will re-lock at the new frequency afterwards.
+ */
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int rc;
+
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
+
+ /* Stage 1: Turn the RGMII delay lines off. */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 1;
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0)
+ return rc;
+
+ /* Stage 2: Turn the RGMII delay lines on. */
+ if (rx_delay) {
+ pad_mii_id.rxc_bypass = 0;
+ pad_mii_id.rxc_pd = 0;
+ }
+ if (tx_delay) {
+ pad_mii_id.txc_bypass = 0;
+ pad_mii_id.txc_pd = 0;
+ }
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+int sja1110_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_pd = 1;
+
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 0;
+ }
+
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 0;
+ }
+
+ sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ struct sja1105_mac_config_entry *mac;
+ u64 speed;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ speed = mac[port].speed;
+
+ dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
+ port, speed);
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
+ /* 1000Mbps, IDIV disabled (125 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
+ /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 1);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
+ /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 10);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
+ /* Skip CGU configuration if there is no speed available
+ * (e.g. link is not established yet)
+ */
+ dev_dbg(dev, "Speed not available, skipping CGU config\n");
+ return 0;
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure idiv\n");
+ return rc;
+ }
+ rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure RGMII Tx clock\n");
+ return rc;
+ }
+ rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure Tx pad registers\n");
+ return rc;
+ }
+
+ if (!priv->info->setup_rgmii_delay)
+ return 0;
+
+ return priv->info->setup_rgmii_delay(priv, port);
+}
+
+static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ref_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+
+ if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ ref_clk.clksrc = clk_sources[port];
+ ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ref_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ ext_tx_clk.clksrc = CLKSRC_PLL1;
+ ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1105_cgu_pll_ctrl pll = {0};
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* PLL1 must be enabled and output 50 Mhz.
+ * This is done by writing first 0x0A010941 to
+ * the PLL_1_C register and then deasserting
+ * power down (PD) 0x0A010940.
+ */
+
+ /* Step 1: PLL1 setup for 50Mhz */
+ pll.pllclksrc = 0xA;
+ pll.msel = 0x1;
+ pll.autoblock = 0x1;
+ pll.psel = 0x1;
+ pll.direct = 0x0;
+ pll.fbsel = 0x1;
+ pll.bypass = 0x0;
+ pll.pd = 0x1;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to configure PLL1 for 50MHz\n");
+ return rc;
+ }
+
+ /* Step 2: Enable PLL1 */
+ pll.pd = 0x0;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable PLL1\n");
+ return rc;
+ }
+ return rc;
+}
+
+static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring RMII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* AH1601.pdf chapter 2.5.1. Sources */
+ if (role == XMII_MAC) {
+ /* Configure and enable PLL1 for 50Mhz output */
+ rc = sja1105_cgu_rmii_pll_config(priv);
+ if (rc < 0)
+ return rc;
+ }
+ /* Disable IDIV for this port */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ if (rc < 0)
+ return rc;
+ /* Source to sink mappings */
+ rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ if (role == XMII_MAC) {
+ rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* RGMII etc */
+ phy_mode = mii->xmii_mode[port];
+ /* MAC or PHY, for applicable types (not RGMII) */
+ role = mii->phy_mac[port];
+
+ switch (phy_mode) {
+ case XMII_MODE_MII:
+ rc = sja1105_mii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RMII:
+ rc = sja1105_rmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RGMII:
+ rc = sja1105_rgmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_SGMII:
+ /* Nothing to do in the CGU for SGMII */
+ rc = 0;
+ break;
+ default:
+ dev_err(dev, "Invalid interface mode specified: %d\n",
+ phy_mode);
+ return -EINVAL;
+ }
+ if (rc) {
+ dev_err(dev, "Clocking setup for port %d failed: %d\n",
+ port, rc);
+ return rc;
+ }
+
+ /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
+ return sja1105_cfg_pad_rx_config(priv, port);
+}
+
+int sja1105_clocking_setup(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port, rc;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
+ sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
+}
+
+int sja1110_disable_microcontroller(struct sja1105_private *priv)
+{
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1110_cgu_outclk outclk_6_c = {
+ .clksrc = 0x3,
+ .pd = true,
+ };
+ struct sja1110_cgu_outclk outclk_7_c = {
+ .clksrc = 0x5,
+ .pd = true,
+ };
+ int rc;
+
+ /* Power down the BASE_TIMER_CLK to disable the watchdog timer */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc)
+ return rc;
+
+ /* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
new file mode 100644
index 000000000..bdbbff2a7
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright 2020 NXP
+ */
+#include "sja1105.h"
+
+/* Since devlink regions have a fixed size and the static config has a variable
+ * size, we need to calculate the maximum possible static config size by
+ * creating a dummy config with all table entries populated to the max, and get
+ * its packed length. This is done dynamically as opposed to simply hardcoding
+ * a number, since currently not all static config tables are implemented, so
+ * we are avoiding a possible code desynchronization.
+ */
+static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
+{
+ struct sja1105_static_config config;
+ enum sja1105_blk_idx blk_idx;
+ int rc;
+
+ rc = sja1105_static_config_init(&config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return 0;
+
+ for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
+ struct sja1105_table *table = &config.tables[blk_idx];
+
+ table->entry_count = table->ops->max_entry_count;
+ }
+
+ return sja1105_static_config_get_length(&config);
+}
+
+static int
+sja1105_region_static_config_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct sja1105_private *priv = ds->priv;
+ size_t max_len, len;
+
+ len = sja1105_static_config_get_length(&priv->static_config);
+ max_len = sja1105_static_config_get_max_size(priv);
+
+ *data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return static_config_buf_prepare_for_upload(priv, *data, len);
+}
+
+static struct devlink_region_ops sja1105_region_static_config_ops = {
+ .name = "static-config",
+ .snapshot = sja1105_region_static_config_snapshot,
+ .destructor = kfree,
+};
+
+enum sja1105_region_id {
+ SJA1105_REGION_STATIC_CONFIG = 0,
+};
+
+struct sja1105_region {
+ const struct devlink_region_ops *ops;
+ size_t (*get_size)(struct sja1105_private *priv);
+};
+
+static struct sja1105_region sja1105_regions[] = {
+ [SJA1105_REGION_STATIC_CONFIG] = {
+ .ops = &sja1105_region_static_config_ops,
+ .get_size = sja1105_static_config_get_max_size,
+ },
+};
+
+static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+ const struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+
+ priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
+ GFP_KERNEL);
+ if (!priv->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regions; i++) {
+ size = sja1105_regions[i].get_size(priv);
+ ops = sja1105_regions[i].ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ while (--i >= 0)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+ return PTR_ERR(region);
+ }
+
+ priv->regions[i] = region;
+ }
+
+ return 0;
+}
+
+static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+
+ for (i = 0; i < num_regions; i++)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+}
+
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, "sja1105");
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
+ return rc;
+}
+
+int sja1105_devlink_setup(struct dsa_switch *ds)
+{
+ return sja1105_setup_devlink_regions(ds);
+}
+
+void sja1105_devlink_teardown(struct dsa_switch *ds)
+{
+ sja1105_teardown_devlink_regions(ds);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
new file mode 100644
index 000000000..984c0e604
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -0,0 +1,1413 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+/* In the dynamic configuration interface, the switch exposes a register-like
+ * view of some of the static configuration tables.
+ * Many times the field organization of the dynamic tables is abbreviated (not
+ * all fields are dynamically reconfigurable) and different from the static
+ * ones, but the key reason for having it is that we can spare a switch reset
+ * for settings that can be changed dynamically.
+ *
+ * This file creates a per-switch-family abstraction called
+ * struct sja1105_dynamic_table_ops and two operations that work with it:
+ * - sja1105_dynamic_config_write
+ * - sja1105_dynamic_config_read
+ *
+ * Compared to the struct sja1105_table_ops from sja1105_static_config.c,
+ * the dynamic accessors work with a compound buffer:
+ *
+ * packed_buf
+ *
+ * |
+ * V
+ * +-----------------------------------------+------------------+
+ * | ENTRY BUFFER | COMMAND BUFFER |
+ * +-----------------------------------------+------------------+
+ *
+ * <----------------------- packed_size ------------------------>
+ *
+ * The ENTRY BUFFER may or may not have the same layout, or size, as its static
+ * configuration table entry counterpart. When it does, the same packing
+ * function is reused (bar exceptional cases - see
+ * sja1105pqrs_dyn_l2_lookup_entry_packing).
+ *
+ * The reason for the COMMAND BUFFER being at the end is to be able to send
+ * a dynamic write command through a single SPI burst. By the time the switch
+ * reacts to the command, the ENTRY BUFFER is already populated with the data
+ * sent by the core.
+ *
+ * The COMMAND BUFFER is always SJA1105_SIZE_DYN_CMD bytes (one 32-bit word) in
+ * size.
+ *
+ * Sometimes the ENTRY BUFFER does not really exist (when the number of fields
+ * that can be reconfigured is small), then the switch repurposes some of the
+ * unused 32 bits of the COMMAND BUFFER to hold ENTRY data.
+ *
+ * The key members of struct sja1105_dynamic_table_ops are:
+ * - .entry_packing: A function that deals with packing an ENTRY structure
+ * into an SPI buffer, or retrieving an ENTRY structure
+ * from one.
+ * The @packed_buf pointer it's given does always point to
+ * the ENTRY portion of the buffer.
+ * - .cmd_packing: A function that deals with packing/unpacking the COMMAND
+ * structure to/from the SPI buffer.
+ * It is given the same @packed_buf pointer as .entry_packing,
+ * so most of the time, the @packed_buf points *behind* the
+ * COMMAND offset inside the buffer.
+ * To access the COMMAND portion of the buffer, the function
+ * knows its correct offset.
+ * Giving both functions the same pointer is handy because in
+ * extreme cases (see sja1105pqrs_dyn_l2_lookup_entry_packing)
+ * the .entry_packing is able to jump to the COMMAND portion,
+ * or vice-versa (sja1105pqrs_l2_lookup_cmd_packing).
+ * - .access: A bitmap of:
+ * OP_READ: Set if the hardware manual marks the ENTRY portion of the
+ * dynamic configuration table buffer as R (readable) after
+ * an SPI read command (the switch will populate the buffer).
+ * OP_WRITE: Set if the manual marks the ENTRY portion of the dynamic
+ * table buffer as W (writable) after an SPI write command
+ * (the switch will read the fields provided in the buffer).
+ * OP_DEL: Set if the manual says the VALIDENT bit is supported in the
+ * COMMAND portion of this dynamic config buffer (i.e. the
+ * specified entry can be invalidated through a SPI write
+ * command).
+ * OP_SEARCH: Set if the manual says that the index of an entry can
+ * be retrieved in the COMMAND portion of the buffer based
+ * on its ENTRY portion, as a result of a SPI write command.
+ * Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
+ * this.
+ * OP_VALID_ANYWAY: Reading some tables through the dynamic config
+ * interface is possible even if the VALIDENT bit is not
+ * set in the writeback. So don't error out in that case.
+ * - .max_entry_count: The number of entries, counting from zero, that can be
+ * reconfigured through the dynamic interface. If a static
+ * table can be reconfigured at all dynamically, this
+ * number always matches the maximum number of supported
+ * static entries.
+ * - .packed_size: The length in bytes of the compound ENTRY + COMMAND BUFFER.
+ * Note that sometimes the compound buffer may contain holes in
+ * it (see sja1105_vlan_lookup_cmd_packing). The @packed_buf is
+ * contiguous however, so @packed_size includes any unused
+ * bytes.
+ * - .addr: The base SPI address at which the buffer must be written to the
+ * switch's memory. When looking at the hardware manual, this must
+ * always match the lowest documented address for the ENTRY, and not
+ * that of the COMMAND, since the other 32-bit words will follow along
+ * at the correct addresses.
+ */
+
+#define SJA1105_SIZE_DYN_CMD 4
+
+#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_VL_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_POLICING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
+
+#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
+
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
+#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+
+#define SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_GENERAL_PARAMS_ENTRY)
+
+#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+
+#define SJA1105_SIZE_RETAGGING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
+
+#define SJA1105ET_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY)
+
+#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+
+#define SJA1110_SIZE_XMII_PARAMS_DYN_CMD \
+ SJA1110_SIZE_XMII_PARAMS_ENTRY
+
+#define SJA1110_SIZE_L2_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_POLICING_ENTRY)
+
+#define SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD \
+ SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY
+
+#define SJA1105_MAX_DYN_CMD_SIZE \
+ SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD
+
+struct sja1105_dyn_cmd {
+ bool search;
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+enum sja1105_hostcmd {
+ SJA1105_HOSTCMD_SEARCH = 1,
+ SJA1105_HOSTCMD_READ = 2,
+ SJA1105_HOSTCMD_WRITE = 3,
+ SJA1105_HOSTCMD_INVALIDATE = 4,
+};
+
+/* Command and entry overlap */
+static void
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->index, 9, 0, size, op);
+}
+
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
+static void
+sja1110_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
+
+ sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
+ sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
+ return size;
+}
+
+static void
+sja1110_vl_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static void
+sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op, int entry_size)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u8 *p = buf + entry_size;
+ u64 hostcmd;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+
+ /* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
+ * using it to delete a management route was unsupported. UM10944
+ * said about it:
+ *
+ * In case of a write access with the MGMTROUTE flag set,
+ * the flag will be ignored. It will always be found cleared
+ * for read accesses with the MGMTROUTE flag set.
+ *
+ * SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
+ * is now another flag called HOSTCMD which does more stuff (quoting
+ * from UM11040):
+ *
+ * A write request is accepted only when HOSTCMD is set to write host
+ * or invalid. A read request is accepted only when HOSTCMD is set to
+ * search host or read host.
+ *
+ * So it is possible to translate a RDWRSET/VALIDENT combination into
+ * HOSTCMD so that we keep the dynamic command API in place, and
+ * at the same time achieve compatibility with the management route
+ * command structure.
+ */
+ if (cmd->rdwrset == SPI_READ) {
+ if (cmd->search)
+ hostcmd = SJA1105_HOSTCMD_SEARCH;
+ else
+ hostcmd = SJA1105_HOSTCMD_READ;
+ } else {
+ /* SPI_WRITE */
+ if (cmd->valident)
+ hostcmd = SJA1105_HOSTCMD_WRITE;
+ else
+ hostcmd = SJA1105_HOSTCMD_INVALIDATE;
+ }
+ sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
+
+ /* Hack - The hardware takes the 'index' field within
+ * struct sja1105_l2_lookup_entry as the index on which this command
+ * will operate. However it will ignore everything else, so 'index'
+ * is logically part of command but physically part of entry.
+ * Populate the 'index' entry field from within the command callback,
+ * such that our API doesn't need to ask for a full-blown entry
+ * structure when e.g. a delete is requested.
+ */
+ sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
+}
+
+static void
+sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
+
+ sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
+}
+
+/* The switch is so retarded that it makes our command/entry abstraction
+ * crumble apart.
+ *
+ * On P/Q/R/S, the switch tries to say whether a FDB entry
+ * is statically programmed or dynamically learned via a flag called LOCKEDS.
+ * The hardware manual says about this fiels:
+ *
+ * On write will specify the format of ENTRY.
+ * On read the flag will be found cleared at times the VALID flag is found
+ * set. The flag will also be found cleared in response to a read having the
+ * MGMTROUTE flag set. In response to a read with the MGMTROUTE flag
+ * cleared, the flag be set if the most recent access operated on an entry
+ * that was either loaded by configuration or through dynamic reconfiguration
+ * (as opposed to automatically learned entries).
+ *
+ * The trouble with this flag is that it's part of the *command* to access the
+ * dynamic interface, and not part of the *entry* retrieved from it.
+ * Otherwise said, for a sja1105_dynamic_config_read, LOCKEDS is supposed to be
+ * an output from the switch into the command buffer, and for a
+ * sja1105_dynamic_config_write, the switch treats LOCKEDS as an input
+ * (hence we can write either static, or automatically learned entries, from
+ * the core).
+ * But the manual contradicts itself in the last phrase where it says that on
+ * read, LOCKEDS will be set to 1 for all FDB entries written through the
+ * dynamic interface (therefore, the value of LOCKEDS from the
+ * sja1105_dynamic_config_write is not really used for anything, it'll store a
+ * 1 anyway).
+ * This means you can't really write a FDB entry with LOCKEDS=0 (automatically
+ * learned) into the switch, which kind of makes sense.
+ * As for reading through the dynamic interface, it doesn't make too much sense
+ * to put LOCKEDS into the command, since the switch will inevitably have to
+ * ignore it (otherwise a command would be like "read the FDB entry 123, but
+ * only if it's dynamically learned" <- well how am I supposed to know?) and
+ * just use it as an output buffer for its findings. But guess what... that's
+ * what the entry buffer is for!
+ * Unfortunately, what really breaks this abstraction is the fact that it
+ * wasn't designed having the fact in mind that the switch can output
+ * entry-related data as writeback through the command buffer.
+ * However, whether a FDB entry is statically or dynamically learned *is* part
+ * of the entry and not the command data, no matter what the switch thinks.
+ * In order to do that, we'll need to wrap around the
+ * sja1105pqrs_l2_lookup_entry_packing from sja1105_static_config.c, and take
+ * a peek outside of the caller-supplied @buf (the entry buffer), to reach the
+ * command buffer.
+ */
+static size_t
+sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static size_t sja1110_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1110_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above. */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+
+ /* UM10944: To specify if a PTP egress timestamp shall be captured on
+ * each port upon transmission of the frame, the LSB of VLANID in the
+ * ENTRY field provided by the host must be set.
+ * Bit 1 of VLANID then specifies the register where the timestamp for
+ * this port is stored in.
+ */
+ sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
+ sja1105_packing(buf, &entry->takets, 84, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ return size;
+}
+
+static void
+sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+
+ /* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
+ * is the same (driver uses it to confirm that frame was sent).
+ * So just keep the name from E/T.
+ */
+ sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
+ sja1105_packing(buf, &entry->takets, 70, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ return size;
+}
+
+/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
+ * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
+ * between entry (0x2d, 0x2e) and command (0x30).
+ */
+static void
+sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above, applied for 'vlanid' field of
+ * struct sja1105_vlan_lookup_entry.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
+}
+
+/* In SJA1110 there is no gap between the command and the data, yay... */
+static void
+sja1110_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u64 type_entry = 0;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ /* Hack: treat 'vlanid' field of struct sja1105_vlan_lookup_entry as
+ * cmd->index.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, op);
+
+ /* But the VALIDENT bit has disappeared, now we are supposed to
+ * invalidate an entry through the TYPE_ENTRY field of the entry..
+ * This is a hack to transform the non-zero quality of the TYPE_ENTRY
+ * field into a VALIDENT bit.
+ */
+ if (op == PACK && !cmd->valident) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, PACK);
+ } else if (op == UNPACK) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, UNPACK);
+ cmd->valident = !!type_entry;
+ }
+}
+
+static void
+sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1110_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+
+ sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+ u8 *reg2 = buf;
+
+ sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
+ sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
+ sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
+ sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
+ sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
+ sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
+ sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
+ sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
+ sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
+ sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
+ sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
+ sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
+ sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
+ /* MAC configuration table entries which can't be reconfigured:
+ * top, base, enabled, ifg, maxage, drpnona664
+ */
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 2, 0, size, op);
+}
+
+static void
+sja1110_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void
+sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ sja1105_packing(buf, &cmd->valid, 31, 31,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->poly, 7, 0,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
+ struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+}
+
+static void
+sja1110_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
+sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+}
+
+static size_t
+sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+}
+
+static void
+sja1110_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
+sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+}
+
+static void
+sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 29, 29, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 5, 0, size, op);
+}
+
+static void
+sja1110_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->index, 19, 16, size, op);
+}
+
+static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u8 *cmd = buf + size;
+ u32 *p = buf;
+
+ sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op);
+ sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op);
+ sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op);
+ sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op);
+ return size;
+}
+
+static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void sja1110_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 7, 0, size, op);
+}
+
+static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->port, 159, 157, size, op);
+ sja1105_packing(buf, &entry->prio, 156, 154, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op);
+ sja1105_packing(buf, &entry->send_slope, 89, 58, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op);
+ return size;
+}
+
+static size_t sja1110_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u64 entry_type = SJA1110_CBS_SHAPER;
+
+ sja1105_packing(buf, &entry_type, 159, 159, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 151, 120, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 119, 88, size, op);
+ sja1105_packing(buf, &entry->send_slope, 87, 56, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 55, 24, size, op);
+ return size;
+}
+
+static void sja1110_dummy_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+}
+
+static void
+sja1110_l2_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_POLICING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 6, 0, size, op);
+}
+
+#define OP_READ BIT(0)
+#define OP_WRITE BIT(1)
+#define OP_DEL BIT(2)
+#define OP_SEARCH BIT(3)
+#define OP_VALID_ANYWAY BIT(4)
+
+/* SJA1105E/T: First generation */
+const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105et_vl_lookup_entry_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
+ .access = OP_WRITE,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x35,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105et_mgmt_route_entry_packing,
+ .cmd_packing = sja1105et_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x27,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105et_mac_config_entry_packing,
+ .cmd_packing = sja1105et_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x36,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x31,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105et_cbs_entry_packing,
+ .cmd_packing = sja1105et_cbs_cmd_packing,
+ .max_entry_count = SJA1105ET_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
+ .addr = 0x2c,
+ },
+};
+
+/* SJA1105P/Q/R/S: Second generation */
+const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105_vl_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x47,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105pqrs_mgmt_route_entry_packing,
+ .cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x2D,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x2A,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105pqrs_mac_config_entry_packing,
+ .cmd_packing = sja1105pqrs_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x4B,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x54,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = 0x8003,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105pqrs_general_params_entry_packing,
+ .cmd_packing = sja1105pqrs_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x3B,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105pqrs_cbs_entry_packing,
+ .cmd_packing = sja1105pqrs_cbs_cmd_packing,
+ .max_entry_count = SJA1105PQRS_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = 0x32,
+ },
+};
+
+/* SJA1110: Third generation */
+const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1110_vl_lookup_entry_packing,
+ .cmd_packing = sja1110_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x124),
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .entry_packing = sja1110_vl_policing_entry_packing,
+ .cmd_packing = sja1110_vl_policing_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ .packed_size = SJA1110_SIZE_VL_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x310),
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1110_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x8c),
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1110_vlan_lookup_entry_packing,
+ .cmd_packing = sja1110_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xb4),
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1110_l2_forwarding_entry_packing,
+ .cmd_packing = sja1110_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xa8),
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1110_mac_config_entry_packing,
+ .cmd_packing = sja1110_mac_config_cmd_packing,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x134),
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1110_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x158),
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2000C),
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1110_general_params_entry_packing,
+ .cmd_packing = sja1110_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xe8),
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1110_retagging_entry_packing,
+ .cmd_packing = sja1110_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xdc),
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1110_cbs_entry_packing,
+ .cmd_packing = sja1110_cbs_cmd_packing,
+ .max_entry_count = SJA1110_MAX_CBS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xc4),
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .entry_packing = sja1110_xmii_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_XMII_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x3c),
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .entry_packing = sja1110_l2_policing_entry_packing,
+ .cmd_packing = sja1110_l2_policing_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2fc),
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .entry_packing = sja1110_l2_forwarding_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x20000),
+ },
+};
+
+#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
+#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
+
+static int
+sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
+{
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
+ int rc;
+
+ /* Read back the whole entry + command structure. */
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc)
+ return rc;
+
+ /* Unpack the command structure, and return it to the caller in case it
+ * needs to perform further checks on it (VALIDENT).
+ */
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+
+ /* Hardware hasn't cleared VALID => still working on it */
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
+}
+
+/* Poll the dynamic config entry's control area until the hardware has
+ * cleared the VALID bit, which means we have confirmation that it has
+ * finished processing the command.
+ */
+static int
+sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
+{
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
+}
+
+/* Provides read access to the settings through the dynamic interface
+ * of the switch.
+ * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
+ * The selection is limited by the hardware in respect to which
+ * configuration blocks can be read through the dynamic interface.
+ * @index is used to retrieve a particular table entry. If negative,
+ * (and if the @blk_idx supports the searching operation) a search
+ * is performed by the @entry parameter.
+ * @entry Type-casted to an unpacked structure that holds a table entry
+ * of the type specified in @blk_idx.
+ * Usually an output argument. If @index is negative, then this
+ * argument is used as input/output: it should be pre-populated
+ * with the element to search for. Entries which support the
+ * search operation will have an "index" field (not the @index
+ * argument to this function) and that is where the found index
+ * will be returned (or left unmodified - thus negative - if not
+ * found).
+ */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= 0 && index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0 && !(ops->access & OP_SEARCH))
+ return -EOPNOTSUPP;
+ if (!(ops->access & OP_READ))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_READ; /* Action is read */
+ if (index < 0) {
+ /* Avoid copying a signed negative number to an u64 */
+ cmd.index = 0;
+ cmd.search = true;
+ } else {
+ cmd.index = index;
+ cmd.search = false;
+ }
+ cmd.valident = true;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (cmd.search)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
+
+ return rc;
+}
+
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0)
+ return -ERANGE;
+ if (!(ops->access & OP_WRITE))
+ return -EOPNOTSUPP;
+ if (!keep && !(ops->access & OP_DEL))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+
+ cmd.valident = keep; /* If false, deletes entry */
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_WRITE; /* Action is write */
+ cmd.index = index;
+
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+ /* Don't dereference potentially NULL pointer if just
+ * deleting a table entry is what was requested. For cases
+ * where 'index' field is physically part of entry structure,
+ * and needed here, we deal with that in the cmd_packing callback.
+ */
+ if (keep)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
+
+ return rc;
+}
+
+static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((crc ^ byte) & (1 << 7)) {
+ crc <<= 1;
+ crc ^= poly;
+ } else {
+ crc <<= 1;
+ }
+ byte <<= 1;
+ }
+ return crc;
+}
+
+/* CRC8 algorithm with non-reversed input, non-reversed output,
+ * no input xor and no output xor. Code customized for receiving
+ * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
+ * is also received as argument in the Koopman notation that the switch
+ * hardware stores it in.
+ */
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params =
+ priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
+ u64 input, poly_koopman = l2_lookup_params->poly;
+ /* Convert polynomial from Koopman to 'normal' notation */
+ u8 poly = (u8)(1 + (poly_koopman << 1));
+ u8 crc = 0; /* seed */
+ int i;
+
+ input = ((u64)vid << 48) | ether_addr_to_u64(addr);
+
+ /* Mask the eight bytes starting from MSB one at a time */
+ for (i = 56; i >= 0; i -= 8) {
+ u8 byte = (input & (0xffull << i)) >> i;
+
+ crc = sja1105_crc8_add(crc, byte, poly);
+ }
+ return crc;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
new file mode 100644
index 000000000..a1472f80a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_DYNAMIC_CONFIG_H
+#define _SJA1105_DYNAMIC_CONFIG_H
+
+#include "sja1105.h"
+#include <linux/packing.h>
+
+/* Special index that can be used for sja1105_dynamic_config_read */
+#define SJA1105_SEARCH -1
+
+struct sja1105_dyn_cmd;
+
+struct sja1105_dynamic_table_ops {
+ /* This returns size_t just to keep same prototype as the
+ * static config ops, of which we are reusing some functions.
+ */
+ size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op);
+ void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op);
+ size_t max_entry_count;
+ size_t packed_size;
+ u64 addr;
+ u8 access;
+};
+
+struct sja1105_mgmt_entry {
+ u64 tsreg;
+ u64 takets;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN];
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
new file mode 100644
index 000000000..decc6c931
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+enum sja1105_counter_index {
+ __SJA1105_COUNTER_UNUSED,
+ /* MAC */
+ N_RUNT,
+ N_SOFERR,
+ N_ALIGNERR,
+ N_MIIERR,
+ TYPEERR,
+ SIZEERR,
+ TCTIMEOUT,
+ PRIORERR,
+ NOMASTER,
+ MEMOV,
+ MEMERR,
+ INVTYP,
+ INTCYOV,
+ DOMERR,
+ PCFBAGDROP,
+ SPCPRIOR,
+ AGEPRIOR,
+ PORTDROP,
+ LENDROP,
+ BAGDROP,
+ POLICEERR,
+ DRPNONA664ERR,
+ SPCERR,
+ AGEDRP,
+ /* HL1 */
+ N_N664ERR,
+ N_VLANERR,
+ N_UNRELEASED,
+ N_SIZEERR,
+ N_CRCERR,
+ N_VLNOTFOUND,
+ N_CTPOLERR,
+ N_POLERR,
+ N_RXFRM,
+ N_RXBYTE,
+ N_TXFRM,
+ N_TXBYTE,
+ /* HL2 */
+ N_QFULL,
+ N_PART_DROP,
+ N_EGR_DISABLED,
+ N_NOT_REACH,
+ __MAX_SJA1105ET_PORT_COUNTER,
+ /* P/Q/R/S only */
+ /* ETHER */
+ N_DROPS_NOLEARN = __MAX_SJA1105ET_PORT_COUNTER,
+ N_DROPS_NOROUTE,
+ N_DROPS_ILL_DTAG,
+ N_DROPS_DTAG,
+ N_DROPS_SOTAG,
+ N_DROPS_SITAG,
+ N_DROPS_UTAG,
+ N_TX_BYTES_1024_2047,
+ N_TX_BYTES_512_1023,
+ N_TX_BYTES_256_511,
+ N_TX_BYTES_128_255,
+ N_TX_BYTES_65_127,
+ N_TX_BYTES_64,
+ N_TX_MCAST,
+ N_TX_BCAST,
+ N_RX_BYTES_1024_2047,
+ N_RX_BYTES_512_1023,
+ N_RX_BYTES_256_511,
+ N_RX_BYTES_128_255,
+ N_RX_BYTES_65_127,
+ N_RX_BYTES_64,
+ N_RX_MCAST,
+ N_RX_BCAST,
+ __MAX_SJA1105PQRS_PORT_COUNTER,
+};
+
+struct sja1105_port_counter {
+ enum sja1105_stats_area area;
+ const char name[ETH_GSTRING_LEN];
+ int offset;
+ int start;
+ int end;
+ bool is_64bit;
+};
+
+static const struct sja1105_port_counter sja1105_port_counters[] = {
+ /* MAC-Level Diagnostic Counters */
+ [N_RUNT] = {
+ .area = MAC,
+ .name = "n_runt",
+ .offset = 0,
+ .start = 31,
+ .end = 24,
+ },
+ [N_SOFERR] = {
+ .area = MAC,
+ .name = "n_soferr",
+ .offset = 0x0,
+ .start = 23,
+ .end = 16,
+ },
+ [N_ALIGNERR] = {
+ .area = MAC,
+ .name = "n_alignerr",
+ .offset = 0x0,
+ .start = 15,
+ .end = 8,
+ },
+ [N_MIIERR] = {
+ .area = MAC,
+ .name = "n_miierr",
+ .offset = 0x0,
+ .start = 7,
+ .end = 0,
+ },
+ /* MAC-Level Diagnostic Flags */
+ [TYPEERR] = {
+ .area = MAC,
+ .name = "typeerr",
+ .offset = 0x1,
+ .start = 27,
+ .end = 27,
+ },
+ [SIZEERR] = {
+ .area = MAC,
+ .name = "sizeerr",
+ .offset = 0x1,
+ .start = 26,
+ .end = 26,
+ },
+ [TCTIMEOUT] = {
+ .area = MAC,
+ .name = "tctimeout",
+ .offset = 0x1,
+ .start = 25,
+ .end = 25,
+ },
+ [PRIORERR] = {
+ .area = MAC,
+ .name = "priorerr",
+ .offset = 0x1,
+ .start = 24,
+ .end = 24,
+ },
+ [NOMASTER] = {
+ .area = MAC,
+ .name = "nomaster",
+ .offset = 0x1,
+ .start = 23,
+ .end = 23,
+ },
+ [MEMOV] = {
+ .area = MAC,
+ .name = "memov",
+ .offset = 0x1,
+ .start = 22,
+ .end = 22,
+ },
+ [MEMERR] = {
+ .area = MAC,
+ .name = "memerr",
+ .offset = 0x1,
+ .start = 21,
+ .end = 21,
+ },
+ [INVTYP] = {
+ .area = MAC,
+ .name = "invtyp",
+ .offset = 0x1,
+ .start = 19,
+ .end = 19,
+ },
+ [INTCYOV] = {
+ .area = MAC,
+ .name = "intcyov",
+ .offset = 0x1,
+ .start = 18,
+ .end = 18,
+ },
+ [DOMERR] = {
+ .area = MAC,
+ .name = "domerr",
+ .offset = 0x1,
+ .start = 17,
+ .end = 17,
+ },
+ [PCFBAGDROP] = {
+ .area = MAC,
+ .name = "pcfbagdrop",
+ .offset = 0x1,
+ .start = 16,
+ .end = 16,
+ },
+ [SPCPRIOR] = {
+ .area = MAC,
+ .name = "spcprior",
+ .offset = 0x1,
+ .start = 15,
+ .end = 12,
+ },
+ [AGEPRIOR] = {
+ .area = MAC,
+ .name = "ageprior",
+ .offset = 0x1,
+ .start = 11,
+ .end = 8,
+ },
+ [PORTDROP] = {
+ .area = MAC,
+ .name = "portdrop",
+ .offset = 0x1,
+ .start = 6,
+ .end = 6,
+ },
+ [LENDROP] = {
+ .area = MAC,
+ .name = "lendrop",
+ .offset = 0x1,
+ .start = 5,
+ .end = 5,
+ },
+ [BAGDROP] = {
+ .area = MAC,
+ .name = "bagdrop",
+ .offset = 0x1,
+ .start = 4,
+ .end = 4,
+ },
+ [POLICEERR] = {
+ .area = MAC,
+ .name = "policeerr",
+ .offset = 0x1,
+ .start = 3,
+ .end = 3,
+ },
+ [DRPNONA664ERR] = {
+ .area = MAC,
+ .name = "drpnona664err",
+ .offset = 0x1,
+ .start = 2,
+ .end = 2,
+ },
+ [SPCERR] = {
+ .area = MAC,
+ .name = "spcerr",
+ .offset = 0x1,
+ .start = 1,
+ .end = 1,
+ },
+ [AGEDRP] = {
+ .area = MAC,
+ .name = "agedrp",
+ .offset = 0x1,
+ .start = 0,
+ .end = 0,
+ },
+ /* High-Level Diagnostic Counters */
+ [N_N664ERR] = {
+ .area = HL1,
+ .name = "n_n664err",
+ .offset = 0xF,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLANERR] = {
+ .area = HL1,
+ .name = "n_vlanerr",
+ .offset = 0xE,
+ .start = 31,
+ .end = 0,
+ },
+ [N_UNRELEASED] = {
+ .area = HL1,
+ .name = "n_unreleased",
+ .offset = 0xD,
+ .start = 31,
+ .end = 0,
+ },
+ [N_SIZEERR] = {
+ .area = HL1,
+ .name = "n_sizeerr",
+ .offset = 0xC,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CRCERR] = {
+ .area = HL1,
+ .name = "n_crcerr",
+ .offset = 0xB,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLNOTFOUND] = {
+ .area = HL1,
+ .name = "n_vlnotfound",
+ .offset = 0xA,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CTPOLERR] = {
+ .area = HL1,
+ .name = "n_ctpolerr",
+ .offset = 0x9,
+ .start = 31,
+ .end = 0,
+ },
+ [N_POLERR] = {
+ .area = HL1,
+ .name = "n_polerr",
+ .offset = 0x8,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RXFRM] = {
+ .area = HL1,
+ .name = "n_rxfrm",
+ .offset = 0x6,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_RXBYTE] = {
+ .area = HL1,
+ .name = "n_rxbyte",
+ .offset = 0x4,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXFRM] = {
+ .area = HL1,
+ .name = "n_txfrm",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXBYTE] = {
+ .area = HL1,
+ .name = "n_txbyte",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_QFULL] = {
+ .area = HL2,
+ .name = "n_qfull",
+ .offset = 0x3,
+ .start = 31,
+ .end = 0,
+ },
+ [N_PART_DROP] = {
+ .area = HL2,
+ .name = "n_part_drop",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ },
+ [N_EGR_DISABLED] = {
+ .area = HL2,
+ .name = "n_egr_disabled",
+ .offset = 0x1,
+ .start = 31,
+ .end = 0,
+ },
+ [N_NOT_REACH] = {
+ .area = HL2,
+ .name = "n_not_reach",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ },
+ /* Ether Stats */
+ [N_DROPS_NOLEARN] = {
+ .area = ETHER,
+ .name = "n_drops_nolearn",
+ .offset = 0x16,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_NOROUTE] = {
+ .area = ETHER,
+ .name = "n_drops_noroute",
+ .offset = 0x15,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_ILL_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_ill_dtag",
+ .offset = 0x14,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_dtag",
+ .offset = 0x13,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SOTAG] = {
+ .area = ETHER,
+ .name = "n_drops_sotag",
+ .offset = 0x12,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SITAG] = {
+ .area = ETHER,
+ .name = "n_drops_sitag",
+ .offset = 0x11,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_UTAG] = {
+ .area = ETHER,
+ .name = "n_drops_utag",
+ .offset = 0x10,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_1024_2047",
+ .offset = 0x0F,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_512_1023",
+ .offset = 0x0E,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_256_511",
+ .offset = 0x0D,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_128_255",
+ .offset = 0x0C,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_65_127",
+ .offset = 0x0B,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_64",
+ .offset = 0x0A,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_MCAST] = {
+ .area = ETHER,
+ .name = "n_tx_mcast",
+ .offset = 0x09,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BCAST] = {
+ .area = ETHER,
+ .name = "n_tx_bcast",
+ .offset = 0x08,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_1024_2047",
+ .offset = 0x07,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_512_1023",
+ .offset = 0x06,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_256_511",
+ .offset = 0x05,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_128_255",
+ .offset = 0x04,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_65_127",
+ .offset = 0x03,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_64",
+ .offset = 0x02,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_MCAST] = {
+ .area = ETHER,
+ .name = "n_rx_mcast",
+ .offset = 0x01,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BCAST] = {
+ .area = ETHER,
+ .name = "n_rx_bcast",
+ .offset = 0x00,
+ .start = 31,
+ .end = 0,
+ },
+};
+
+static int sja1105_port_counter_read(struct sja1105_private *priv, int port,
+ enum sja1105_counter_index idx, u64 *ctr)
+{
+ const struct sja1105_port_counter *c = &sja1105_port_counters[idx];
+ size_t size = c->is_64bit ? 8 : 4;
+ u8 buf[8] = {0};
+ u64 regs;
+ int rc;
+
+ regs = priv->info->regs->stats[c->area][port];
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs + c->offset, buf, size);
+ if (rc)
+ return rc;
+
+ sja1105_unpack(buf, ctr, c->start, c->end, size);
+
+ return 0;
+}
+
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ int rc, k = 0;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to read port %d counters: %d\n",
+ port, rc);
+ break;
+ }
+ }
+}
+
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ char *p = data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ int sset_count = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
+ sset_count++;
+ }
+
+ return sset_count;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
new file mode 100644
index 000000000..fad5afe38
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020 NXP
+ */
+#include "sja1105.h"
+#include "sja1105_vl.h"
+
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
+{
+ struct sja1105_rule *rule;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list)
+ if (rule->cookie == cookie)
+ return rule;
+
+ return NULL;
+}
+
+static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
+ if (!priv->flow_block.l2_policer_used[i])
+ return i;
+
+ return -1;
+}
+
+static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_BCAST_POLICER;
+ rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_BCAST;
+ new_rule = true;
+ }
+
+ if (rule->bcast_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the broadcast policers of all ports attached to this block
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+
+ policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->bcast_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_setup_tc_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port, int tc,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_TC_POLICER;
+ rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_TC;
+ rule->key.tc.pcp = tc;
+ new_rule = true;
+ }
+
+ if (rule->tc_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port-TC pair already has an L2 policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the policers for traffic class @tc of all ports attached to
+ * this block point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int index = (p * SJA1105_NUM_TC) + tc;
+
+ policing[index].sharindx = rule->tc_pol.sharindx;
+ }
+
+ policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->tc_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_flower_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ switch (key->type) {
+ case SJA1105_KEY_BCAST:
+ return sja1105_setup_bcast_policer(priv, extack, cookie, port,
+ rate_bytes_per_sec, burst);
+ case SJA1105_KEY_TC:
+ return sja1105_setup_tc_policer(priv, extack, cookie, port,
+ key->tc.pcp, rate_bytes_per_sec,
+ burst);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sja1105_flower_parse_key(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ struct sja1105_key *key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ bool is_bcast_dmac = false;
+ u64 dmac = U64_MAX;
+ u16 vid = U16_MAX;
+ u16 pcp = U16_MAX;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on protocol not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(match.mask->dst, bcast)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ dmac = ether_addr_to_u64(match.key->dst);
+ is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_id &&
+ match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on VID is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_priority &&
+ match.mask->vlan_priority != 0x7) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on PCP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id)
+ vid = match.key->vlan_id;
+ if (match.mask->vlan_priority)
+ pcp = match.key->vlan_priority;
+ }
+
+ if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
+ key->type = SJA1105_KEY_BCAST;
+ return 0;
+ }
+ if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_TC;
+ key->tc.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_VLAN_AWARE_VL;
+ key->vl.dmac = dmac;
+ key->vl.vid = vid;
+ key->vl.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX) {
+ key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
+ key->vl.dmac = dmac;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
+ return -EOPNOTSUPP;
+}
+
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct sja1105_private *priv = ds->priv;
+ const struct flow_action_entry *act;
+ unsigned long cookie = cls->cookie;
+ bool routing_rule = false;
+ struct sja1105_key key;
+ bool gate_rule = false;
+ bool vl_rule = false;
+ int rc, i;
+
+ rc = sja1105_flower_parse_key(priv, extack, cls, &key);
+ if (rc)
+ return rc;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_flower_policer(priv, port, extack, cookie,
+ &key,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_TRAP: {
+ int cpu = dsa_upstream_port(ds, port);
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(cpu), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_REDIRECT: {
+ struct dsa_port *to_dp;
+
+ to_dp = dsa_port_from_netdev(act->dev);
+ if (IS_ERR(to_dp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a switch port");
+ return -EOPNOTSUPP;
+ }
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(to_dp->index), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_DROP:
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, 0, false);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_GATE:
+ gate_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_gate(priv, port, extack, cookie,
+ &key, act->hw_index,
+ act->gate.prio,
+ act->gate.basetime,
+ act->gate.cycletime,
+ act->gate.cycletimeext,
+ act->gate.num_entries,
+ act->gate.entries);
+ if (rc)
+ goto out;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (vl_rule && !rc) {
+ /* Delay scheduling configuration until DESTPORTS has been
+ * populated by all other actions.
+ */
+ if (gate_rule) {
+ if (!routing_rule) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload gate action together with redirect or trap");
+ return -EOPNOTSUPP;
+ }
+ rc = sja1105_init_scheduling(priv);
+ if (rc)
+ goto out;
+ }
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+ }
+
+out:
+ return rc;
+}
+
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ struct sja1105_l2_policing_entry *policing;
+ int old_sharindx;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type == SJA1105_RULE_VL)
+ return sja1105_vl_delete(priv, port, rule, cls->common.extack);
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+ } else if (rule->type == SJA1105_RULE_TC_POLICER) {
+ int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
+
+ old_sharindx = policing[index].sharindx;
+ policing[index].sharindx = port;
+ } else {
+ return -EINVAL;
+ }
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ priv->flow_block.l2_policer_used[old_sharindx] = false;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ int rc;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type != SJA1105_RULE_VL)
+ return 0;
+
+ rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
+ cls->common.extack);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void sja1105_flower_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+ for (port = 0; port < ds->num_ports; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+}
+
+void sja1105_flower_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &priv->flow_block.rules) {
+ rule = list_entry(pos, struct sja1105_rule, list);
+ list_del(&rule->list);
+ kfree(rule);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
new file mode 100644
index 000000000..f1f1368e8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -0,0 +1,3481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+#include "sja1105_tas.h"
+
+#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
+{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(gpio, 1);
+ /* Wait for minimum reset pulse length */
+ msleep(pulse_len);
+ gpiod_set_value_cansleep(gpio, 0);
+ /* Wait until chip is ready after reset */
+ msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
+}
+
+static void
+sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to, bool allow)
+{
+ if (allow)
+ l2_fwd[from].reach_port |= BIT(to);
+ else
+ l2_fwd[from].reach_port &= ~BIT(to);
+}
+
+static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to)
+{
+ return !!(l2_fwd[from].reach_port & BIT(to));
+}
+
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].drpuntag == drop)
+ return 0;
+
+ mac[port].drpuntag = drop;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].vlanid == pvid)
+ return 0;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_vlan_lookup_entry *vlan;
+ bool drop_untagged = false;
+ int match, rc;
+ u16 pvid;
+
+ if (br && br_vlan_enabled(br))
+ pvid = priv->bridge_pvid[port];
+ else
+ pvid = priv->tag_8021q_pvid[port];
+
+ rc = sja1105_pvid_apply(priv, port, pvid);
+ if (rc)
+ return rc;
+
+ /* Only force dropping of untagged packets when the port is under a
+ * VLAN-aware bridge. When the tag_8021q pvid is used, we are
+ * deliberately removing the RX VLAN from the port's VMEMB_PORT list,
+ * to prevent DSA tag spoofing from the link partner. Untagged packets
+ * are the only ones that should be received with tag_8021q, so
+ * definitely don't drop them.
+ */
+ if (pvid == priv->bridge_pvid[port]) {
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+
+ match = sja1105_is_vlan_configured(priv, pvid);
+
+ if (match < 0 || !(vlan[match].vmemb_port & BIT(port)))
+ drop_untagged = true;
+ }
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ drop_untagged = true;
+
+ return sja1105_drop_untagged(ds, port, drop_untagged);
+}
+
+static int sja1105_init_mac_settings(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry default_mac = {
+ /* Enable all 8 priority queues on egress.
+ * Every queue i holds top[i] - base[i] frames.
+ * Sum of top[i] - base[i] is 511 (max hardware limit).
+ */
+ .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
+ .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
+ .enabled = {true, true, true, true, true, true, true, true},
+ /* Keep standard IFG of 12 bytes on egress. */
+ .ifg = 0,
+ /* Always put the MAC speed in automatic mode, where it can be
+ * adjusted at runtime by PHYLINK.
+ */
+ .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
+ /* No static correction for 1-step 1588 events */
+ .tp_delin = 0,
+ .tp_delout = 0,
+ /* Disable aging for critical TTEthernet traffic */
+ .maxage = 0xFF,
+ /* Internal VLAN (pvid) to apply to untagged ingress */
+ .vlanprio = 0,
+ .vlanid = 1,
+ .ing_mirr = false,
+ .egr_mirr = false,
+ /* Don't drop traffic with other EtherType than ETH_P_IP */
+ .drpnona664 = false,
+ /* Don't drop double-tagged traffic */
+ .drpdtag = false,
+ /* Don't drop untagged traffic */
+ .drpuntag = false,
+ /* Don't retag 802.1p (VID 0) traffic with the pvid */
+ .retag = false,
+ /* Disable learning and I/O on user ports by default -
+ * STP will enable it.
+ */
+ .dyn_learn = false,
+ .egress = false,
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ struct dsa_port *dp;
+
+ table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
+
+ /* Discard previous MAC Configuration Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ mac = table->entries;
+
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ mac[dp->index] = default_mac;
+
+ /* Let sja1105_bridge_stp_state_set() keep address learning
+ * enabled for the DSA ports. CPU ports use software-assisted
+ * learning to ensure that only FDB entries belonging to the
+ * bridge are learned, and that they are learned towards all
+ * CPU ports in a cross-chip topology if multiple CPU ports
+ * exist.
+ */
+ if (dsa_port_is_dsa(dp))
+ dp->learning = true;
+
+ /* Disallow untagged packets from being received on the
+ * CPU and DSA ports.
+ */
+ if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))
+ mac[dp->index].drpuntag = true;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_mii_settings(struct sja1105_private *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
+
+ /* Discard previous xMII Mode Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on PHYLINK DT bindings */
+ table->entry_count = table->ops->max_entry_count;
+
+ mii = table->entries;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ sja1105_mii_role_t role = XMII_MAC;
+
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ switch (priv->phy_mode[i]) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
+ mii->special[i] = true;
+
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ role = XMII_PHY;
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ if (!priv->info->supports_mii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ break;
+ case PHY_INTERFACE_MODE_REVRMII:
+ role = XMII_PHY;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!priv->info->supports_rmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!priv->info->supports_rgmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!priv->info->supports_sgmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (!priv->info->supports_2500basex[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+unsupported:
+ default:
+ dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
+ phy_modes(priv->phy_mode[i]), i);
+ return -EINVAL;
+ }
+
+ mii->phy_mac[i] = role;
+ }
+ return 0;
+}
+
+static int sja1105_init_static_fdb(struct sja1105_private *priv)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int port;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ /* We only populate the FDB table through dynamic L2 Address Lookup
+ * entries, except for a special entry at the end which is a catch-all
+ * for unknown multicast and will be used to control flooding domain.
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ if (!priv->info->can_limit_mcast_flood)
+ return 0;
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+ l2_lookup = table->entries;
+
+ /* All L2 multicast addresses have an odd first octet */
+ l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].lockeds = true;
+ l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
+
+ /* Flood multicast to every port by default */
+ for (port = 0; port < priv->ds->num_ports; port++)
+ if (!dsa_is_unused_port(priv->ds, port))
+ l2_lookup[0].destports |= BIT(port);
+
+ return 0;
+}
+
+static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+ /* All entries within a FDB bin are available for learning */
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
+ /* Don't discard management traffic based on ENFPORT -
+ * we don't perform SMAC port enforcement anyway, so
+ * what we are setting here doesn't matter.
+ */
+ .no_enf_hostprt = false,
+ /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
+ * Maybe correlate with no_linklocal_learn from bridge driver?
+ */
+ .no_mgmt_learn = true,
+ /* P/Q/R/S only */
+ .use_static = true,
+ /* Dynamically learned FDB entries can overwrite other (older)
+ * dynamic FDB entries
+ */
+ .owr_dyn = true,
+ .drpnolearn = true,
+ };
+ struct dsa_switch *ds = priv->ds;
+ int port, num_used_ports = 0;
+ struct sja1105_table *table;
+ u64 max_fdb_entries;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (!dsa_is_unused_port(ds, port))
+ num_used_ports++;
+
+ max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
+ default_l2_lookup_params;
+
+ return 0;
+}
+
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
+static int sja1105_init_static_vlan(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_vlan_lookup_entry pvid = {
+ .type_entry = SJA1110_VLAN_D_TAG,
+ .ving_mirr = 0,
+ .vegr_mirr = 0,
+ .vmemb_port = 0,
+ .vlan_bc = 0,
+ .tag_port = 0,
+ .vlanid = SJA1105_DEFAULT_VLAN,
+ };
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kzalloc(table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ pvid.vmemb_port |= BIT(port);
+ pvid.vlan_bc |= BIT(port);
+ pvid.tag_port &= ~BIT(port);
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN;
+ priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN;
+ }
+ }
+
+ ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2fwd;
+ struct dsa_switch *ds = priv->ds;
+ struct dsa_switch_tree *dst;
+ struct sja1105_table *table;
+ struct dsa_link *dl;
+ int port, tc;
+ int from, to;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ l2fwd = table->entries;
+
+ /* First 5 entries in the L2 Forwarding Table define the forwarding
+ * rules and the VLAN PCP to ingress queue mapping.
+ * Set up the ingress queue mapping first.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ l2fwd[port].vlan_pmap[tc] = tc;
+ }
+
+ /* Then manage the forwarding domain for user ports. These can forward
+ * only to the always-on domain (CPU port and DSA links)
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_user_port(ds, from))
+ continue;
+
+ for (to = 0; to < ds->num_ports; to++) {
+ if (!dsa_is_cpu_port(ds, to) &&
+ !dsa_is_dsa_port(ds, to))
+ continue;
+
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* Then manage the forwarding domain for DSA links and CPU ports (the
+ * always-on domain). These can send packets to any enabled port except
+ * themselves.
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from))
+ continue;
+
+ for (to = 0; to < ds->num_ports; to++) {
+ if (dsa_is_unused_port(ds, to))
+ continue;
+
+ if (from == to)
+ continue;
+
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* In odd topologies ("H" connections where there is a DSA link to
+ * another switch which also has its own CPU port), TX packets can loop
+ * back into the system (they are flooded from CPU port 1 to the DSA
+ * link, and from there to CPU port 2). Prevent this from happening by
+ * cutting RX from DSA links towards our CPU port, if the remote switch
+ * has its own CPU port and therefore doesn't need ours for network
+ * stack termination.
+ */
+ dst = ds->dst;
+
+ list_for_each_entry(dl, &dst->rtable, list) {
+ if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp)
+ continue;
+
+ from = dl->dp->index;
+ to = dsa_upstream_port(ds, from);
+
+ dev_warn(ds->dev,
+ "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n",
+ from, to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, false);
+
+ l2fwd[from].bc_domain &= ~BIT(to);
+ l2fwd[from].fl_domain &= ~BIT(to);
+ }
+
+ /* Finally, manage the egress flooding domain. All ports start up with
+ * flooding enabled, including the CPU port and DSA links.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ priv->ucast_egress_floods |= BIT(port);
+ priv->bcast_egress_floods |= BIT(port);
+ }
+
+ /* Next 8 entries define VLAN PCP mapping from ingress to egress.
+ * Create a one-to-one mapping.
+ */
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++) {
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc;
+ }
+
+ l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true;
+ }
+
+ return 0;
+}
+
+static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
+{
+ struct sja1110_pcp_remapping_entry *pcp_remap;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
+
+ /* Nothing to do for SJA1105 */
+ if (!table->ops->max_entry_count)
+ return 0;
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ pcp_remap = table->entries;
+
+ /* Repeat the configuration done for vlan_pmap */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ pcp_remap[port].egrpcp[tc] = tc;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2fwd_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ /* This table only has a single entry */
+ l2fwd_params = table->entries;
+
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ l2fwd_params->max_dynp = 0;
+ /* Use a single memory partition for all ingress queues */
+ l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
+
+ return 0;
+}
+
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+ l2_fwd_params = table->entries;
+ l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
+
+ /* If we have any critical-traffic virtual links, we need to reserve
+ * some frame buffer memory for them. At the moment, hardcode the value
+ * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
+ * remaining for best-effort traffic. TODO: figure out a more flexible
+ * way to perform the frame buffer partitioning.
+ */
+ if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ vl_fwd_params = table->entries;
+
+ l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
+ vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
+}
+
+/* SJA1110 TDMACONFIGIDX values:
+ *
+ * | 100 Mbps ports | 1Gbps ports | 2.5Gbps ports | Disabled ports
+ * -----+----------------+---------------+---------------+---------------
+ * 0 | 0, [5:10] | [1:2] | [3:4] | retag
+ * 1 |0, [5:10], retag| [1:2] | [3:4] | -
+ * 2 | 0, [5:10] | [1:3], retag | 4 | -
+ * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
+ * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
+ * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
+ * 14 | 0, [5:10] | [1:4], retag | - | -
+ * 15 | [5:10] | [0:4], retag | - | -
+ */
+static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ bool port_1_is_base_tx;
+ bool port_3_is_2500;
+ bool port_4_is_2500;
+ u64 tdmaconfigidx;
+
+ if (priv->info->device_id != SJA1110_DEVICE_ID)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ /* All the settings below are "as opposed to SGMII", which is the
+ * other pinmuxing option.
+ */
+ port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
+ port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
+ port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
+
+ if (port_1_is_base_tx)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 5;
+ else if (port_3_is_2500 && port_4_is_2500)
+ /* Retagging port will operate at 100 Mbps */
+ tdmaconfigidx = 1;
+ else if (port_3_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 3;
+ else if (port_4_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 2;
+ else
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 14;
+
+ general_params->tdmaconfigidx = tdmaconfigidx;
+}
+
+static int sja1105_init_topology(struct sja1105_private *priv,
+ struct sja1105_general_params_entry *general_params)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ /* The host port is the destination for traffic matching mac_fltres1
+ * and mac_fltres0 on all ports except itself. Default to an invalid
+ * value.
+ */
+ general_params->host_port = ds->num_ports;
+
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address, and no RX timestamps will be
+ * taken either (presumably because it is a cascaded port and a
+ * downstream SJA switch already did that).
+ * To disable the feature, we need to do different things depending on
+ * switch generation. On SJA1105 we need to set an invalid port, while
+ * on SJA1110 which support multiple cascaded ports, this field is a
+ * bitmask so it must be left zero.
+ */
+ if (!priv->info->multiple_cascade_ports)
+ general_params->casc_port = ds->num_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ bool is_upstream = dsa_is_upstream_port(ds, port);
+ bool is_dsa_link = dsa_is_dsa_port(ds, port);
+
+ /* Upstream ports can be dedicated CPU ports or
+ * upstream-facing DSA links
+ */
+ if (is_upstream) {
+ if (general_params->host_port == ds->num_ports) {
+ general_params->host_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a host port, configuring %d as one too is not supported\n",
+ general_params->host_port, port);
+ return -EINVAL;
+ }
+ }
+
+ /* Cascade ports are downstream-facing DSA links */
+ if (is_dsa_link && !is_upstream) {
+ if (priv->info->multiple_cascade_ports) {
+ general_params->casc_port |= BIT(port);
+ } else if (general_params->casc_port == ds->num_ports) {
+ general_params->casc_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a cascade port, configuring %d as one too is not supported\n",
+ general_params->casc_port, port);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (general_params->host_port == ds->num_ports) {
+ dev_err(ds->dev, "No host port configured\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_general_params(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry default_general_params = {
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
+ .switchid = priv->ds->index,
+ /* Priority queue for link-local management frames
+ * (both ingress to and egress from CPU - PTP, STP etc)
+ */
+ .hostprio = 7,
+ .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
+ .incl_srcpt1 = true,
+ .send_meta1 = true,
+ .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
+ .incl_srcpt0 = true,
+ .send_meta0 = true,
+ /* Default to an invalid value */
+ .mirr_port = priv->ds->num_ports,
+ /* No TTEthernet */
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
+ .vlmarker = 0,
+ .vlmask = 0,
+ /* Only update correctionField for 1-step PTP (L2 transport) */
+ .ignore2stf = 0,
+ /* Forcefully disable VLAN filtering by telling
+ * the switch that VLAN has a different EtherType.
+ */
+ .tpid = ETH_P_SJA1105,
+ .tpid2 = ETH_P_SJA1105,
+ /* Enable the TTEthernet engine on SJA1110 */
+ .tte_en = true,
+ /* Set up the EtherType for control packets on SJA1110 */
+ .header_type = ETH_P_SJA1110,
+ };
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ rc = sja1105_init_topology(priv, &default_general_params);
+ if (rc)
+ return rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ general_params = table->entries;
+
+ /* This table only has a single entry */
+ general_params[0] = default_general_params;
+
+ sja1110_select_tdmaconfigidx(priv);
+
+ return 0;
+}
+
+static int sja1105_init_avb_params(struct sja1105_private *priv)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ avb = table->entries;
+
+ /* Configure the MAC addresses for meta frames */
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+ /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
+ * default. This is because there might be boards with a hardware
+ * layout where enabling the pin as output might cause an electrical
+ * clash. On E/T the pin is always an output, which the board designers
+ * probably already knew, so even if there are going to be electrical
+ * issues, there's nothing we can do.
+ */
+ avb->cas_master = false;
+
+ return 0;
+}
+
+/* The L2 policing table is 2-stage. The table is looked up for each frame
+ * according to the ingress port, whether it was broadcast or not, and the
+ * classified traffic class (given by VLAN PCP). This portion of the lookup is
+ * fixed, and gives access to the SHARINDX, an indirection register pointing
+ * within the policing table itself, which is used to resolve the policer that
+ * will be used for this frame.
+ *
+ * Stage 1 Stage 2
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 2: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 5: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 7: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 TC 7 |SHARINDX| ...
+ * +------------+--------+
+ * |Port 0 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * |Port 1 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * ... ...
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ *
+ * In this driver, we shall use policers 0-4 as statically alocated port
+ * (matchall) policers. So we need to make the SHARINDX for all lookups
+ * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
+ * lookup) equal.
+ * The remaining policers (40) shall be dynamically allocated for flower
+ * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
+ */
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
+static int sja1105_init_l2_policing(struct sja1105_private *priv)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
+
+ /* Discard previous L2 Policing Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ policing = table->entries;
+
+ /* Setup shared indices for the matchall policers */
+ for (port = 0; port < ds->num_ports; port++) {
+ int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
+
+ policing[bcast].sharindx = port;
+ /* Only SJA1110 has multicast policers */
+ if (mcast < table->ops->max_entry_count)
+ policing[mcast].sharindx = port;
+ }
+
+ /* Setup the matchall policer parameters */
+ for (port = 0; port < ds->num_ports; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ mtu += VLAN_HLEN;
+
+ policing[port].smax = 65535; /* Burst size in bytes */
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].maxlen = mtu;
+ policing[port].partition = 0;
+ }
+
+ return 0;
+}
+
+static int sja1105_static_config_load(struct sja1105_private *priv)
+{
+ int rc;
+
+ sja1105_static_config_free(&priv->static_config);
+ rc = sja1105_static_config_init(&priv->static_config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return rc;
+
+ /* Build static configuration */
+ rc = sja1105_init_mac_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_mii_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_fdb(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_vlan(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_lookup_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_policing(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_general_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_avb_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1110_init_pcp_remapping(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Send initial configuration to hardware via SPI */
+ return sja1105_static_config_upload(priv);
+}
+
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
+ }
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
+ }
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
+ return 0;
+}
+
+static int sja1105_parse_ports_node(struct sja1105_private *priv,
+ struct device_node *ports_node)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ u32 index;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &index) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ index);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ phy_node = of_parse_phandle(child, "phy-handle", 0);
+ if (!phy_node) {
+ if (!of_phy_is_fixed_link(child)) {
+ dev_err(dev, "phy-handle or fixed-link "
+ "properties missing!\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+ /* phy-handle is missing, but fixed-link isn't.
+ * So it's a fixed link. Default to PHY role.
+ */
+ priv->fixed_link[index] = true;
+ } else {
+ of_node_put(phy_node);
+ }
+
+ priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_parse_dt(struct sja1105_private *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *switch_node = dev->of_node;
+ struct device_node *ports_node;
+ int rc;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ rc = sja1105_parse_ports_node(priv, ports_node);
+ of_node_put(ports_node);
+
+ return rc;
+}
+
+/* Convert link speed from SJA1105 to ethtool encoding */
+static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
+ u64 speed)
+{
+ if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
+ return SPEED_10;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
+ return SPEED_100;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
+ return SPEED_1000;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
+ return SPEED_2500;
+ return SPEED_UNKNOWN;
+}
+
+/* Set link speed in the MAC configuration for a specific port. */
+static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ int speed_mbps)
+{
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ u64 speed;
+ int rc;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (speed_mbps) {
+ case SPEED_UNKNOWN:
+ /* PHYLINK called sja1105_mac_config() to inform us about
+ * the state->interface, but AN has not completed and the
+ * speed is not yet valid. UM10944.pdf says that setting
+ * SJA1105_SPEED_AUTO at runtime disables the port, so that is
+ * ok for power consumption in case AN will never complete -
+ * otherwise PHYLINK should come back with a new update.
+ */
+ speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
+ break;
+ case SPEED_10:
+ speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
+ break;
+ case SPEED_100:
+ speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
+ break;
+ case SPEED_1000:
+ speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ break;
+ case SPEED_2500:
+ speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
+ break;
+ default:
+ dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ return -EINVAL;
+ }
+
+ /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
+ * table, since this will be used for the clocking setup, and we no
+ * longer need to store it in the static config (already told hardware
+ * we want auto during upload phase).
+ * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
+ * we need to configure the PCS only (if even that).
+ */
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
+ else
+ mac[port].speed = speed;
+
+ /* Write to the dynamic reconfiguration tables */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to write MAC config: %d\n", rc);
+ return rc;
+ }
+
+ /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
+ * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
+ * RMII no change of the clock setup is required. Actually, changing
+ * the clock setup does interrupt the clock signal for a certain time
+ * which causes trouble for all PHYs relying on this signal.
+ */
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
+ return 0;
+
+ return sja1105_clocking_setup_port(priv, port);
+}
+
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct dw_xpcs *xpcs = priv->xpcs[port];
+
+ if (xpcs)
+ return &xpcs->pcs;
+
+ return NULL;
+}
+
+static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ sja1105_inhibit_tx(ds->priv, BIT(port), true);
+}
+
+static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_adjust_port_config(priv, port, speed);
+
+ sja1105_inhibit_tx(priv, BIT(port), false);
+}
+
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
+ }
+
+ /* The MAC does not support pause frames, and also doesn't
+ * support half-duplex traffic modes.
+ */
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
+ mii->xmii_mode[port] == XMII_MODE_SGMII)
+ config->mac_capabilities |= MAC_1000FD;
+
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
+}
+
+static int
+sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++)
+ if (l2_lookup[i].macaddr == requested->macaddr &&
+ l2_lookup[i].vlanid == requested->vlanid &&
+ l2_lookup[i].destports & BIT(port))
+ return i;
+
+ return -1;
+}
+
+/* We want FDB entries added statically through the bridge command to persist
+ * across switch resets, which are a common thing during normal SJA1105
+ * operation. So we have to back them up in the static configuration tables
+ * and hence apply them on next static config upload... yay!
+ */
+static int
+sja1105_static_fdb_change(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested,
+ bool keep)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int rc, match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ match = sja1105_find_static_fdb_entry(priv, port, requested);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!keep)
+ return 0;
+
+ /* No match => new entry */
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it may be new memory) */
+ l2_lookup = table->entries;
+
+ /* We have a match.
+ * If the job was to add this FDB entry, it's already done (mostly
+ * anyway, since the port forwarding mask may have changed, case in
+ * which we update it).
+ * Otherwise we have to delete it.
+ */
+ if (keep) {
+ l2_lookup[match] = *requested;
+ return 0;
+ }
+
+ /* To remove, the strategy is to overwrite the element with
+ * the last one, and then reduce the array size by 1
+ */
+ l2_lookup[match] = l2_lookup[table->entry_count - 1];
+ return sja1105_table_resize(table, table->entry_count - 1);
+}
+
+/* First-generation switches have a 4-way set associative TCAM that
+ * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
+ * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
+ * For the placement of a newly learnt FDB entry, the switch selects the bin
+ * based on a hash function, and the way within that bin incrementally.
+ */
+static int sja1105et_fdb_index(int bin, int way)
+{
+ return bin * SJA1105ET_FDB_BIN_SIZE + way;
+}
+
+static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
+{
+ int way;
+
+ for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* Skip unused entries, optionally marking them
+ * into the return value
+ */
+ if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup)) {
+ if (last_unused)
+ *last_unused = way;
+ continue;
+ }
+
+ if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
+ l2_lookup.vlanid == vid) {
+ if (match)
+ *match = l2_lookup;
+ return way;
+ }
+ }
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int last_unused = -1;
+ int start, end, i;
+ int bin, way, rc;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
+ if (way >= 0) {
+ /* We have an FDB entry. Is our port in the destination
+ * mask? If yes, we need to do nothing. If not, we need
+ * to rewrite the entry by adding this port to it.
+ */
+ if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
+ return 0;
+ l2_lookup.destports |= BIT(port);
+ } else {
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* We don't have an FDB entry. We construct a new one and
+ * try to find a place for it within the FDB table.
+ */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.destports = BIT(port);
+ l2_lookup.vlanid = vid;
+
+ if (last_unused >= 0) {
+ way = last_unused;
+ } else {
+ /* Bin is full, need to evict somebody.
+ * Choose victim at random. If you get these messages
+ * often, you may need to consider changing the
+ * distribution function:
+ * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
+ */
+ get_random_bytes(&way, sizeof(u8));
+ way %= SJA1105ET_FDB_BIN_SIZE;
+ dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
+ bin, addr, way);
+ /* Evict entry */
+ sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, NULL, false);
+ }
+ }
+ l2_lookup.lockeds = true;
+ l2_lookup.index = sja1105et_fdb_index(bin, way);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* Invalidate a dynamically learned entry if that exists */
+ start = sja1105et_fdb_index(bin, 0);
+ end = sja1105et_fdb_index(bin, way);
+
+ for (i = start; i < end; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &tmp);
+ if (rc == -ENOENT)
+ continue;
+ if (rc)
+ return rc;
+
+ if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL, false);
+ if (rc)
+ return rc;
+
+ break;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int index, bin, way, rc;
+ bool keep;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
+ if (way < 0)
+ return 0;
+ index = sja1105et_fdb_index(bin, way);
+
+ /* We have an FDB entry. Is our port in the destination mask? If yes,
+ * we need to remove it. If the resulting port mask becomes empty, we
+ * need to completely evict the FDB entry.
+ * Otherwise we just write it back.
+ */
+ l2_lookup.destports &= ~BIT(port);
+
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ /* Search for an existing entry in the FDB table */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+ /* Found a static entry and this port is already in the entry's
+ * port mask => job done
+ */
+ if ((tmp.destports & BIT(port)) && tmp.lockeds)
+ return 0;
+
+ l2_lookup = tmp;
+
+ /* l2_lookup.index is populated by the switch in case it
+ * found something.
+ */
+ l2_lookup.destports |= BIT(port);
+ goto skip_finding_an_index;
+ }
+
+ /* Not found, so try to find an unused spot in the FDB.
+ * This is slightly inefficient because the strategy is knock-knock at
+ * every possible position from 0 to 1023.
+ */
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL);
+ if (rc < 0)
+ break;
+ }
+ if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
+ dev_err(ds->dev, "FDB is full, cannot add entry.\n");
+ return -EINVAL;
+ }
+ l2_lookup.index = i;
+
+skip_finding_an_index:
+ l2_lookup.lockeds = true;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* The switch learns dynamic entries and looks up the FDB left to
+ * right. It is possible that our addition was concurrent with the
+ * dynamic learning of the same address, so now that the static entry
+ * has been installed, we are certain that address learning for this
+ * particular address has been turned off, so the dynamic entry either
+ * is in the FDB at an index smaller than the static one, or isn't (it
+ * can also be at a larger index, but in that case it is inactive
+ * because the static FDB entry will match first, and the dynamic one
+ * will eventually age out). Search for a dynamically learned address
+ * prior to our static one and invalidate it.
+ */
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "port %d failed to read back entry for %pM vid %d: %pe\n",
+ port, addr, vid, ERR_PTR(rc));
+ return rc;
+ }
+
+ if (tmp.index < l2_lookup.index) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ tmp.index, NULL, false);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ bool keep;
+ int rc;
+
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc < 0)
+ return 0;
+
+ l2_lookup.destports &= ~BIT(port);
+
+ /* Decide whether we remove just this port from the FDB entry,
+ * or if we remove it completely.
+ */
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(dev, "Failed to dump FDB: %d\n", rc);
+ return rc;
+ }
+
+ /* FDB dump callback is per port. This means we have to
+ * disregard a valid entry if it's not for this port, even if
+ * only to revisit it later. This is inefficient because the
+ * 1024-sized FDB table needs to be traversed 4 times through
+ * SPI during a 'bridge fdb show' command.
+ */
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
+ /* We need to hide the dsa_8021q VLANs from the user. */
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
+ l2_lookup.vlanid = 0;
+ rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void sja1105_fast_age(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
+ int i;
+
+ mutex_lock(&priv->fdb_lock);
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(ds->dev, "Failed to read FDB: %pe\n",
+ ERR_PTR(rc));
+ break;
+ }
+
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+
+ /* Don't delete static FDB entries */
+ if (l2_lookup.lockeds)
+ continue;
+
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to delete FDB entry %pM vid %lld: %pe\n",
+ macaddr, l2_lookup.vlanid, ERR_PTR(rc));
+ break;
+ }
+ }
+
+ mutex_unlock(&priv->fdb_lock);
+}
+
+static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
+}
+
+static int sja1105_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
+}
+
+/* Common function for unicast and broadcast flood configuration.
+ * Flooding is configured between each {ingress, egress} port pair, and since
+ * the bridge's semantics are those of "egress flooding", it means we must
+ * enable flooding towards this port from all ingress ports that are in the
+ * same forwarding domain.
+ */
+static int sja1105_manage_flood_domains(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct dsa_switch *ds = priv->ds;
+ int from, to, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (from = 0; from < ds->num_ports; from++) {
+ u64 fl_domain = 0, bc_domain = 0;
+
+ for (to = 0; to < priv->ds->num_ports; to++) {
+ if (!sja1105_can_forward(l2_fwd, from, to))
+ continue;
+
+ if (priv->ucast_egress_floods & BIT(to))
+ fl_domain |= BIT(to);
+ if (priv->bcast_egress_floods & BIT(to))
+ bc_domain |= BIT(to);
+ }
+
+ /* Nothing changed, nothing to do */
+ if (l2_fwd[from].fl_domain == fl_domain &&
+ l2_fwd[from].bc_domain == bc_domain)
+ continue;
+
+ l2_fwd[from].fl_domain = fl_domain;
+ l2_fwd[from].bc_domain = bc_domain;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ from, &l2_fwd[from], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool member)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct sja1105_private *priv = ds->priv;
+ int i, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ /* For the ports already under the bridge, only one thing needs
+ * to be done, and that is to add this port to their
+ * reachability domain. So we can perform the SPI write for
+ * them immediately. However, for this port itself (the one
+ * that is new to the bridge), we need to add all other ports
+ * to its reachability domain. So we do that incrementally in
+ * this loop, and perform the SPI write only at the end, once
+ * the domain contains all other bridge ports.
+ */
+ if (i == port)
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ sja1105_port_allow_traffic(l2_fwd, i, port, member);
+ sja1105_port_allow_traffic(l2_fwd, port, i, member);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ i, &l2_fwd[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+ if (rc)
+ return rc;
+
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+
+ return sja1105_manage_flood_domains(priv);
+}
+
+static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ /* From UM10944 description of DRPDTAG (why put this there?):
+ * "Management traffic flows to the port regardless of the state
+ * of the INGRESS flag". So BPDUs are still be allowed to pass.
+ * At the moment no difference between DISABLED and BLOCKING.
+ */
+ mac[port].ingress = false;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LISTENING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LEARNING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ mac[port].ingress = true;
+ mac[port].egress = true;
+ mac[port].dyn_learn = dp->learning;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rc = sja1105_bridge_member(ds, port, bridge, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ if (rc) {
+ sja1105_bridge_member(ds, port, bridge, false);
+ return rc;
+ }
+
+ *tx_fwd_offload = true;
+
+ return 0;
+}
+
+static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
+ sja1105_bridge_member(ds, port, bridge, false);
+}
+
+#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (cbs->port == port && cbs->prio == prio) {
+ memset(cbs, 0, sizeof(*cbs));
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
+ i, cbs, true);
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *offload)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
+ int index;
+
+ if (!offload->enable)
+ return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
+
+ cbs = &priv->cbs[index];
+ cbs->port = port;
+ cbs->prio = offload->queue;
+ /* locredit and sendslope are negative by definition. In hardware,
+ * positive values must be provided, and the negative sign is implicit.
+ */
+ cbs->credit_hi = offload->hicredit;
+ cbs->credit_lo = abs(offload->locredit);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
+ /* Convert the negative values from 64-bit 2's complement
+ * to 32-bit 2's complement (for the case of 0x80000000 whose
+ * negative is still negative).
+ */
+ cbs->credit_lo &= GENMASK_ULL(31, 0);
+ cbs->send_slope &= GENMASK_ULL(31, 0);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
+ true);
+}
+
+static int sja1105_reload_cbs(struct sja1105_private *priv)
+{
+ int rc = 0, i;
+
+ /* The credit based shapers are only allocated if
+ * CONFIG_NET_SCH_CBS is enabled.
+ */
+ if (!priv->cbs)
+ return 0;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (!cbs->idle_slope && !cbs->send_slope)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
+ true);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+ [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+ [SJA1105_VIRTUAL_LINKS] = "Virtual links",
+};
+
+/* For situations where we need to change a setting at runtime that is only
+ * available through the static configuration, resetting the switch in order
+ * to upload the new static config is unavoidable. Back up the settings we
+ * modify at runtime (currently only MAC) and restore them after uploading,
+ * such that this operation is relatively seamless.
+ */
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
+{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
+ int speed_mbps[SJA1105_MAX_NUM_PORTS];
+ u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
+ int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->fdb_lock);
+ mutex_lock(&priv->mgmt_lock);
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
+ * switch wants to see in the static config in order to allow us to
+ * change it through the dynamic interface later.
+ */
+ for (i = 0; i < ds->num_ports; i++) {
+ speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
+ mac[i].speed);
+ mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
+
+ if (priv->xpcs[i])
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
+ }
+
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ /* Reset switch and send updated static configuration */
+ rc = sja1105_static_config_upload(priv);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
+
+ /* Configure the CGU (PLLs) for MII and RMII PHYs.
+ * For these interfaces there is no dynamic configuration
+ * needed, since PLLs have same settings at all speeds.
+ */
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct dw_xpcs *xpcs = priv->xpcs[i];
+ unsigned int mode;
+
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ if (rc < 0)
+ goto out;
+
+ if (!xpcs)
+ continue;
+
+ if (bmcr[i] & BMCR_ANENABLE)
+ mode = MLO_AN_INBAND;
+ else if (priv->fixed_link[i])
+ mode = MLO_AN_FIXED;
+ else
+ mode = MLO_AN_PHY;
+
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode, NULL);
+ if (rc < 0)
+ goto out;
+
+ if (!phylink_autoneg_inband(mode)) {
+ int speed = SPEED_UNKNOWN;
+
+ if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else if (bmcr[i] & BMCR_SPEED1000)
+ speed = SPEED_1000;
+ else if (bmcr[i] & BMCR_SPEED100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
+ }
+ }
+
+ rc = sja1105_reload_cbs(priv);
+ if (rc < 0)
+ goto out;
+out:
+ mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->tag_proto;
+}
+
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ u16 tpid, tpid2;
+ int rc;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change VLAN filtering with active VL rules");
+ return -EBUSY;
+ }
+ }
+
+ if (enabled) {
+ /* Enable VLAN filtering. */
+ tpid = ETH_P_8021Q;
+ tpid2 = ETH_P_8021AD;
+ } else {
+ /* Disable VLAN filtering. */
+ tpid = ETH_P_SJA1105;
+ tpid2 = ETH_P_SJA1105;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
+ general_params->tpid2 = tpid2;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+ }
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
+
+ return rc;
+}
+
+static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid,
+ u16 flags, bool allowed_ingress)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].type_entry = SJA1110_VLAN_D_TAG;
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc |= BIT(port);
+
+ if (allowed_ingress)
+ vlan[match].vmemb_port |= BIT(port);
+ else
+ vlan[match].vmemb_port &= ~BIT(port);
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], true);
+}
+
+static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ /* Can't delete a missing entry. */
+ if (match < 0)
+ return 0;
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ /* Also unset tag_port, just so we don't have a confusing bitmap
+ * (no practical purpose).
+ */
+ vlan[match].tag_port &= ~BIT(port);
+
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
+
+ return 0;
+}
+
+static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 flags = vlan->flags;
+ int rc;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ flags = 0;
+
+ rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true);
+ if (rc)
+ return rc;
+
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ priv->bridge_pvid[port] = vlan->vid;
+
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_vlan_del(priv, port, vlan->vid);
+ if (rc)
+ return rc;
+
+ /* In case the pvid was deleted, make sure that untagged packets will
+ * be dropped.
+ */
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct sja1105_private *priv = ds->priv;
+ bool allowed_ingress = true;
+ int rc;
+
+ /* Prevent attackers from trying to inject a DSA tag from
+ * the outside world.
+ */
+ if (dsa_is_user_port(ds, port))
+ allowed_ingress = false;
+
+ rc = sja1105_vlan_add(priv, port, vid, flags, allowed_ingress);
+ if (rc)
+ return rc;
+
+ if (flags & BRIDGE_VLAN_INFO_PVID)
+ priv->tag_8021q_pvid[port] = vid;
+
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return sja1105_vlan_del(priv, port, vid);
+}
+
+static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack = info->info.extack;
+ struct net_device *upper = info->upper_dev;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ if (is_vlan_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported");
+ return -EBUSY;
+ }
+
+ if (netif_is_bridge_master(upper)) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br != upper && br_vlan_enabled(br)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
+ struct sk_buff *skb, bool takets)
+{
+ struct sja1105_mgmt_entry mgmt_route = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct ethhdr *hdr;
+ int timeout = 10;
+ int rc;
+
+ hdr = eth_hdr(skb);
+
+ mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
+ mgmt_route.destports = BIT(port);
+ mgmt_route.enfport = 1;
+ mgmt_route.tsreg = 0;
+ mgmt_route.takets = takets;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, true);
+ if (rc < 0) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ /* Transfer skb to the host port. */
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route);
+ if (rc < 0) {
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to poll for mgmt route\n");
+ continue;
+ }
+
+ /* UM10944: The ENFPORT flag of the respective entry is
+ * cleared when a match is found. The host can use this
+ * flag as an acknowledgment.
+ */
+ cpu_relax();
+ } while (mgmt_route.enfport && --timeout);
+
+ if (!timeout) {
+ /* Clean up the management route so that a follow-up
+ * frame may not match on it by mistake.
+ * This is only hardware supported on P/Q/R/S - on E/T it is
+ * a no-op and we are silently discarding the -EOPNOTSUPP.
+ */
+ sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, false);
+ dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#define work_to_xmit_work(w) \
+ container_of((w), struct sja1105_deferred_xmit_work, work)
+
+/* Deferred work is unfortunately necessary because setting up the management
+ * route cannot be done from atomit context (SPI transfer takes a sleepable
+ * lock on the bus)
+ */
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
+{
+ struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sja1105_private *priv = ds->priv;
+ int port = xmit_work->dp->index;
+
+ clone = SJA1105_SKB_CB(skb)->clone;
+
+ mutex_lock(&priv->mgmt_lock);
+
+ sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
+
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
+
+ mutex_unlock(&priv->mgmt_lock);
+
+ kfree(xmit_work);
+}
+
+static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data;
+
+ if (proto != priv->info->tag_proto)
+ return -EPROTONOSUPPORT;
+
+ tagger_data = sja1105_tagger_data(ds);
+ tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
+ tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
+
+ return 0;
+}
+
+/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
+ * which cannot be reconfigured at runtime. So a switch reset is required.
+ */
+static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ unsigned int maxage;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+
+ maxage = SJA1105_AGEING_TIME_MS(ageing_time);
+
+ if (l2_lookup_params->maxage == maxage)
+ return 0;
+
+ l2_lookup_params->maxage = maxage;
+
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
+}
+
+static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ new_mtu += VLAN_HLEN;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[port].maxlen == new_mtu)
+ return 0;
+
+ policing[port].maxlen = new_mtu;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return sja1105_setup_tc_cbs(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
+ */
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ already_enabled = (general_params->mirr_port != ds->num_ports);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
+ }
+
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
+
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = ds->num_ports;
+ }
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (ingress)
+ mac[from].ing_mirr = enabled;
+ else
+ mac[from].egr_mirr = enabled;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
+}
+
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
+}
+
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
+}
+
+static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ /* In hardware, every 8 microseconds the credit level is incremented by
+ * the value of RATE bytes divided by 64, up to a maximum of SMAX
+ * bytes.
+ */
+ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ 1000000);
+ policing[port].smax = policer->burst;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].smax = 65535;
+
+ sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
+ bool enabled)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].dyn_learn = enabled;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_FLOOD) {
+ if (flags.val & BR_FLOOD)
+ priv->ucast_egress_floods |= BIT(to);
+ else
+ priv->ucast_egress_floods &= ~BIT(to);
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ if (flags.val & BR_BCAST_FLOOD)
+ priv->bcast_egress_floods |= BIT(to);
+ else
+ priv->bcast_egress_floods &= ~BIT(to);
+ }
+
+ return sja1105_manage_flood_domains(priv);
+}
+
+static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (match = 0; match < table->entry_count; match++)
+ if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ break;
+
+ if (match == table->entry_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not find FDB entry for unknown multicast");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ if (flags.val & BR_MCAST_FLOOD)
+ l2_lookup[match].destports |= BIT(to);
+ else
+ l2_lookup[match].destports &= ~BIT(to);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
+ !priv->info->can_limit_mcast_flood) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This chip cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ rc = sja1105_port_set_learning(priv, port, learn_ena);
+ if (rc)
+ return rc;
+ }
+
+ if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
+ rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
+ if (rc)
+ return rc;
+ }
+
+ /* For chips that can't offload BR_MCAST_FLOOD independently, there
+ * is nothing to do here, we ensured the configuration is in sync by
+ * offloading BR_FLOOD.
+ */
+ if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
+ rc = sja1105_port_mcast_flood(priv, port, flags,
+ extack);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (priv->info->disable_microcontroller) {
+ rc = priv->info->disable_microcontroller(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to disable microcontroller: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ return rc;
+ }
+
+ /* Configure the CGU (PHY link modes and speeds) */
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to configure MII clocking: %pe\n",
+ ERR_PTR(rc));
+ goto out_static_config_free;
+ }
+ }
+
+ sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
+
+ rc = sja1105_ptp_clock_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
+ goto out_flower_teardown;
+ }
+
+ rc = sja1105_mdiobus_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
+ ERR_PTR(rc));
+ goto out_ptp_clock_unregister;
+ }
+
+ rc = sja1105_devlink_setup(ds);
+ if (rc < 0)
+ goto out_mdiobus_unregister;
+
+ rtnl_lock();
+ rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+ ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
+ /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
+ ds->max_num_bridges = 7;
+
+ /* Advertise the 8 egress queues */
+ ds->num_tx_queues = SJA1105_NUM_TC;
+
+ ds->mtu_enforcement_ingress = true;
+ ds->assisted_learning_on_cpu_port = true;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_mdiobus_unregister:
+ sja1105_mdiobus_unregister(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_flower_teardown:
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
+
+ return rc;
+}
+
+static void sja1105_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
+
+ sja1105_devlink_teardown(ds);
+ sja1105_mdiobus_unregister(ds);
+ sja1105_ptp_clock_unregister(ds);
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+ sja1105_static_config_free(&priv->static_config);
+}
+
+static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .connect_tag_protocol = sja1105_connect_tag_protocol,
+ .setup = sja1105_setup,
+ .teardown = sja1105_teardown,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .port_change_mtu = sja1105_change_mtu,
+ .port_max_mtu = sja1105_get_max_mtu,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
+ .phylink_mac_link_up = sja1105_mac_link_up,
+ .phylink_mac_link_down = sja1105_mac_link_down,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+ .get_ts_info = sja1105_get_ts_info,
+ .port_fdb_dump = sja1105_fdb_dump,
+ .port_fdb_add = sja1105_fdb_add,
+ .port_fdb_del = sja1105_fdb_del,
+ .port_fast_age = sja1105_fast_age,
+ .port_bridge_join = sja1105_bridge_join,
+ .port_bridge_leave = sja1105_bridge_leave,
+ .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
+ .port_bridge_flags = sja1105_port_bridge_flags,
+ .port_stp_state_set = sja1105_bridge_stp_state_set,
+ .port_vlan_filtering = sja1105_vlan_filtering,
+ .port_vlan_add = sja1105_bridge_vlan_add,
+ .port_vlan_del = sja1105_bridge_vlan_del,
+ .port_mdb_add = sja1105_mdb_add,
+ .port_mdb_del = sja1105_mdb_del,
+ .port_hwtstamp_get = sja1105_hwtstamp_get,
+ .port_hwtstamp_set = sja1105_hwtstamp_set,
+ .port_rxtstamp = sja1105_port_rxtstamp,
+ .port_txtstamp = sja1105_port_txtstamp,
+ .port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
+ .port_policer_add = sja1105_port_policer_add,
+ .port_policer_del = sja1105_port_policer_del,
+ .cls_flower_add = sja1105_cls_flower_add,
+ .cls_flower_del = sja1105_cls_flower_del,
+ .cls_flower_stats = sja1105_cls_flower_stats,
+ .devlink_info_get = sja1105_devlink_info_get,
+ .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
+ .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
+ .port_prechangeupper = sja1105_prechangeupper,
+};
+
+static const struct of_device_id sja1105_dt_ids[];
+
+static int sja1105_check_device_id(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
+ struct device *dev = &priv->spidev->dev;
+ const struct of_device_id *match;
+ u32 device_id;
+ u64 part_no;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
+
+ for (match = sja1105_dt_ids; match->compatible[0]; match++) {
+ const struct sja1105_info *info = match->data;
+
+ /* Is what's been probed in our match table at all? */
+ if (info->device_id != device_id || info->part_no != part_no)
+ continue;
+
+ /* But is it what's in the device tree? */
+ if (priv->info->device_id != device_id ||
+ priv->info->part_no != part_no) {
+ dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
+ priv->info->name, info->name);
+ /* It isn't. No problem, pick that up. */
+ priv->info = info;
+ }
+
+ return 0;
+ }
+
+ dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
+ device_id, part_no);
+
+ return -ENODEV;
+}
+
+static int sja1105_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct sja1105_private *priv;
+ size_t max_xfer, max_msg;
+ struct dsa_switch *ds;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "No DTS bindings for SJA1105 driver\n");
+ return -EINVAL;
+ }
+
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
+ priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Populate our driver private structure (priv) based on
+ * the device tree node that was probed (spi)
+ */
+ priv->spidev = spi;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ rc = spi_setup(spi);
+ if (rc < 0) {
+ dev_err(dev, "Could not init SPI\n");
+ return rc;
+ }
+
+ /* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
+ * a small one for the message header and another one for the current
+ * chunk of the packed buffer.
+ * Check that the restrictions imposed by the SPI controller are
+ * respected: the chunk buffer is smaller than the max transfer size,
+ * and the total length of the chunk plus its message header is smaller
+ * than the max message size.
+ * We do that during probe time since the maximum transfer size is a
+ * runtime invariant.
+ */
+ max_xfer = spi_max_transfer_size(spi);
+ max_msg = spi_max_message_size(spi);
+
+ /* We need to send at least one 64-bit word of SPI payload per message
+ * in order to be able to make useful progress.
+ */
+ if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
+ dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
+ return -EINVAL;
+ }
+
+ priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
+ if (priv->max_xfer_len > max_xfer)
+ priv->max_xfer_len = max_xfer;
+ if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
+ priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
+
+ priv->info = of_device_get_match_data(dev);
+
+ /* Detect hardware device */
+ rc = sja1105_check_device_id(priv);
+ if (rc < 0) {
+ dev_err(dev, "Device ID check failed: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->num_ports = priv->info->num_ports;
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->dynamic_config_lock);
+ mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
+ spin_lock_init(&priv->ts_id_lock);
+
+ rc = sja1105_parse_dt(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
+
+ if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
+ priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
+ sizeof(struct sja1105_cbs_entry),
+ GFP_KERNEL);
+ if (!priv->cbs)
+ return -ENOMEM;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void sja1105_remove(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void sja1105_shutdown(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id sja1105_dt_ids[] = {
+ { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
+ { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
+ { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
+ { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
+ { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
+ { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
+ { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
+ { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
+ { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
+static struct spi_driver sja1105_driver = {
+ .driver = {
+ .name = "sja1105",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sja1105_dt_ids),
+ },
+ .id_table = sja1105_spi_ids,
+ .probe = sja1105_probe,
+ .remove = sja1105_remove,
+ .shutdown = sja1105_shutdown,
+};
+
+module_spi_driver(sja1105_driver);
+
+MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
+MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
+MODULE_DESCRIPTION("SJA1105 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
new file mode 100644
index 000000000..4059fcc8c
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 NXP
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/of_mdio.h>
+#include "sja1105.h"
+
+#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
+
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return 0xffff;
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1105_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1105_XPCS_ID & GENMASK(15, 0);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ tmp = val;
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return -EINVAL;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1110_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1110_XPCS_ID & GENMASK(15, 0);
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+}
+
+enum sja1105_mdio_opcode {
+ SJA1105_C45_ADDR = 0,
+ SJA1105_C22 = 1,
+ SJA1105_C45_DATA = 2,
+ SJA1105_C45_DATA_AUTOINC = 3,
+};
+
+static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
+ int phy, enum sja1105_mdio_opcode op,
+ int xad)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
+}
+
+static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+ }
+
+ /* Clause 22 read */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ tmp = val & 0xffff;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+ }
+
+ /* Clause 22 write */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ tmp = val & 0xffff;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp = val;
+
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+}
+
+static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-TX MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-tx",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_tx_mdio_read;
+ bus->write = sja1105_base_tx_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_tx = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_tx_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_tx)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_tx);
+ mdiobus_free(priv->mdio_base_tx);
+ priv->mdio_base_tx = NULL;
+}
+
+static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-T1 MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_t1_mdio_read;
+ bus->write = sja1105_base_t1_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_t1 = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_t1_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_t1)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_t1);
+ mdiobus_free(priv->mdio_base_t1);
+ priv->mdio_base_t1 = NULL;
+}
+
+static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct dsa_switch *ds = priv->ds;
+ struct mii_bus *bus;
+ int rc = 0;
+ int port;
+
+ if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ return 0;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "SJA1105 PCS MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(ds->dev));
+ bus->read = priv->info->pcs_mdio_read;
+ bus->write = priv->info->pcs_mdio_write;
+ bus->parent = ds->dev;
+ /* There is no PHY on this MDIO bus => mask out all PHY addresses
+ * from auto probing.
+ */
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ mdiobus_free(bus);
+ return rc;
+ }
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ if (priv->phy_mode[port] != PHY_INTERFACE_MODE_SGMII &&
+ priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
+ continue;
+
+ mdiodev = mdio_device_create(bus, port);
+ if (IS_ERR(mdiodev)) {
+ rc = PTR_ERR(mdiodev);
+ goto out_pcs_free;
+ }
+
+ xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
+ if (IS_ERR(xpcs)) {
+ rc = PTR_ERR(xpcs);
+ goto out_pcs_free;
+ }
+
+ priv->xpcs[port] = xpcs;
+ }
+
+ priv->mdio_pcs = bus;
+
+ return 0;
+
+out_pcs_free:
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ if (!priv->mdio_pcs)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(priv->mdio_pcs);
+ mdiobus_free(priv->mdio_pcs);
+ priv->mdio_pcs = NULL;
+}
+
+int sja1105_mdiobus_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device_node *switch_node = ds->dev->of_node;
+ struct device_node *mdio_node;
+ int rc;
+
+ rc = sja1105_mdiobus_pcs_register(priv);
+ if (rc)
+ return rc;
+
+ mdio_node = of_get_child_by_name(switch_node, "mdios");
+ if (!mdio_node)
+ return 0;
+
+ if (!of_device_is_available(mdio_node))
+ goto out_put_mdio_node;
+
+ if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
+ if (rc)
+ goto err_put_mdio_node;
+ }
+
+ if (regs->mdio_100base_t1 != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_t1_register(priv, mdio_node);
+ if (rc)
+ goto err_free_base_tx_mdiobus;
+ }
+
+out_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return 0;
+
+err_free_base_tx_mdiobus:
+ sja1105_mdiobus_base_tx_unregister(priv);
+err_put_mdio_node:
+ of_node_put(mdio_node);
+ sja1105_mdiobus_pcs_unregister(priv);
+
+ return rc;
+}
+
+void sja1105_mdiobus_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_mdiobus_base_t1_unregister(priv);
+ sja1105_mdiobus_base_tx_unregister(priv);
+ sja1105_mdiobus_pcs_unregister(priv);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
new file mode 100644
index 000000000..a7d41e781
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include "sja1105.h"
+
+/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
+ * therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
+ * Set the maximum supported ppb to a round value smaller than the maximum.
+ *
+ * Percentually speaking, this is a +/- 0.032x adjustment of the
+ * free-running counter (0.968x to 1.032x).
+ */
+#define SJA1105_MAX_ADJ_PPB 32000000
+#define SJA1105_SIZE_PTP_CMD 4
+
+/* PTPSYNCTS has no interrupt or update mechanism, because the intended
+ * hardware use case is for the timestamp to be collected synchronously,
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
+ */
+#define SJA1105_EXTTS_INTERVAL (HZ / 6)
+
+/* This range is actually +/- SJA1105_MAX_ADJ_PPB
+ * divided by 1000 (ppb -> ppm) and with a 16-bit
+ * "fractional" part (actually fixed point).
+ * |
+ * v
+ * Convert scaled_ppm from the +/- ((10^6) << 16) range
+ * into the +/- (1 << 31) range.
+ *
+ * This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
+ * and defines the scaling factor between scaled_ppm and the actual
+ * frequency adjustments of the PHC.
+ *
+ * ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
+ * simplifies to
+ * ptpclkrate = scaled_ppm * 2^9 / 5^6
+ */
+#define SJA1105_CC_MULT_NUM (1 << 9)
+#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
+
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
+};
+
+#define extts_to_data(t) \
+ container_of((t), struct sja1105_ptp_data, extts_timer)
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en &= ~BIT(port);
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->hwts_tx_en |= BIT(port);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en &= ~BIT(port);
+ break;
+ default:
+ priv->hwts_rx_en |= BIT(port);
+ break;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->hwts_tx_en & BIT(port))
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (priv->hwts_rx_en & BIT(port))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ /* Called during cleanup */
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
+ return 0;
+}
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ const struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
+
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
+
+ return rc;
+}
+
+/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
+ * around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
+ * seconds).
+ *
+ * This receives the RX or TX MAC timestamps, provided by hardware as
+ * the lower bits of the cycle counter, sampled at the time the timestamp was
+ * collected.
+ *
+ * To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
+ * read and the high-order bits are filled in.
+ *
+ * Must be called within one wraparound period of the partial timestamp since
+ * it was generated by the MAC.
+ */
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
+ u64 ts_reconstructed;
+
+ ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
+
+ /* Check lower bits of current cycle counter against the timestamp.
+ * If the current cycle counter is lower than the partial timestamp,
+ * then wraparound surely occurred and must be accounted for.
+ */
+ if ((now & partial_tstamp_mask) <= ts_partial)
+ ts_reconstructed -= (partial_tstamp_mask + 1);
+
+ return ts_reconstructed;
+}
+
+/* Reads the SPI interface for an egress timestamp generated by the switch
+ * for frames sent using management routes.
+ *
+ * SJA1105 E/T layout of the 4-byte SPI payload:
+ *
+ * 31 23 15 7 0
+ * | | | | |
+ * +-----+-----+-----+ ^
+ * ^ |
+ * | |
+ * 24-bit timestamp Update bit
+ *
+ *
+ * SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
+ *
+ * 31 23 15 7 0 63 55 47 39 32
+ * | | | | | | | | | |
+ * ^ +-----+-----+-----+-----+
+ * | ^
+ * | |
+ * Update bit 32-bit timestamp
+ *
+ * Notice that the update bit is in the same place.
+ * To have common code for E/T and P/Q/R/S for reading the timestamp,
+ * we need to juggle with the offset and the bit indices.
+ */
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int tstamp_bit_start, tstamp_bit_end;
+ int timeout = 10;
+ u8 packed_buf[8];
+ u64 update;
+ int rc;
+
+ do {
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(packed_buf, &update, 0, 0,
+ priv->info->ptpegr_ts_bytes);
+ if (update)
+ break;
+
+ usleep_range(10, 50);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Point the end bit to the second 32-bit word on P/Q/R/S,
+ * no-op on E/T.
+ */
+ tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
+ /* Shift the 24-bit timestamp on E/T to be collected from 31:8.
+ * No-op on P/Q/R/S.
+ */
+ tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
+ tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
+
+ *ts = 0;
+
+ sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
+ priv->info->ptpegr_ts_bytes);
+
+ return 0;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
+}
+
+static void sja1105_extts_poll(struct sja1105_private *priv)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+}
+
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ struct dsa_switch *ds = priv->ds;
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx(skb);
+ }
+
+ if (ptp_data->extts_enabled)
+ sja1105_extts_poll(priv);
+
+ mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
+}
+
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (!(priv->hwts_rx_en & BIT(port)))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
+ return true;
+}
+
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ts = SJA1105_SKB_CB(skb)->tstamp;
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+
+ /* Don't defer */
+ return false;
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->rxtstamp(ds, port, skb);
+}
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
+/* In addition to cloning the skb which is done by the common
+ * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
+ * packet to the TX timestamping queue.
+ */
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ u8 ts_id;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ spin_lock(&priv->ts_id_lock);
+
+ ts_id = priv->ts_id;
+ /* Deal automatically with 8-bit wraparound */
+ priv->ts_id++;
+
+ SJA1105_SKB_CB(clone)->ts_id = ts_id;
+
+ spin_unlock(&priv->ts_id_lock);
+
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to clone
+ * the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sk_buff *clone;
+
+ if (!(priv->hwts_tx_en & BIT(port)))
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ SJA1105_SKB_CB(skb)->clone = clone;
+
+ if (priv->info->txtstamp)
+ priv->info->txtstamp(ds, port, skb);
+}
+
+static int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ cmd.resptp = 1;
+
+ dev_dbg(ds->dev, "Resetting PTP clock\n");
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
+
+ *ns = sja1105_ticks_to_ns(ticks);
+
+ return 0;
+}
+
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 now = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 ns = timespec64_to_ns(ts);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
+ s64 clkrate;
+ int rc;
+
+ clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
+ clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
+
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
+
+ sja1105_tas_adjfreq(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ struct sja1105_private *priv = ds->priv;
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data)
+{
+ unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) *
+ SJA1105_EXTTS_INTERVAL;
+
+ mod_timer(&ptp_data->extts_timer, expires);
+}
+
+static void sja1105_ptp_extts_timer(struct timer_list *t)
+{
+ struct sja1105_ptp_data *ptp_data = extts_to_data(t);
+
+ ptp_schedule_worker(ptp_data->clock, 0);
+
+ sja1105_ptp_extts_setup_timer(ptp_data);
+}
+
+static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
+ enum ptp_pin_function func)
+{
+ struct sja1105_avb_params_entry *avb;
+ enum ptp_pin_function old_func;
+
+ avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID ||
+ avb->cas_master)
+ old_func = PTP_PF_PEROUT;
+ else
+ old_func = PTP_PF_EXTTS;
+
+ if (func == old_func)
+ return 0;
+
+ avb->cas_master = (func == PTP_PF_PEROUT);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
+ true);
+}
+
+/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
+ * frequency f:
+ *
+ * NSEC_PER_SEC
+ * f = ----------------------
+ * (PTPPINDUR * 8 ns) * 2
+ */
+static int sja1105_per_out_enable(struct sja1105_private *priv,
+ struct ptp_perout_request *perout,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ /* We only support one channel */
+ if (perout->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
+ if (rc)
+ goto out;
+
+ if (on) {
+ struct timespec64 pin_duration_ts = {
+ .tv_sec = perout->period.sec,
+ .tv_nsec = perout->period.nsec,
+ };
+ struct timespec64 pin_start_ts = {
+ .tv_sec = perout->start.sec,
+ .tv_nsec = perout->start.nsec,
+ };
+ u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
+ u64 pin_start = timespec64_to_ns(&pin_start_ts);
+ u32 pin_duration32;
+ u64 now;
+
+ /* ptppindur: 32 bit register which holds the interval between
+ * 2 edges on PTP_CLK. So check for truncation which happens
+ * at periods larger than around 68.7 seconds.
+ */
+ pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
+ if (pin_duration > U32_MAX) {
+ rc = -ERANGE;
+ goto out;
+ }
+ pin_duration32 = pin_duration;
+
+ /* ptppins: 64 bit register which needs to hold a PTP time
+ * larger than the current time, otherwise the startptpcp
+ * command won't do anything. So advance the current time
+ * by a number of periods in a way that won't alter the
+ * phase offset.
+ */
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
+ if (rc < 0)
+ goto out;
+
+ pin_start = future_base_time(pin_start, pin_duration,
+ now + 1ull * NSEC_PER_SEC);
+ pin_start = ns_to_sja1105_ticks(pin_start);
+
+ rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
+ &pin_start, NULL);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
+ &pin_duration32, NULL);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (on)
+ cmd.startptpcp = true;
+ else
+ cmd.stopptpcp = true;
+
+ rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_extts_enable(struct sja1105_private *priv,
+ struct ptp_extts_request *extts,
+ bool on)
+{
+ int rc;
+
+ /* We only support one channel */
+ if (extts->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (extts->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* We can only enable time stamping on both edges, sadly. */
+ if ((extts->flags & PTP_STRICT_FLAGS) &&
+ (extts->flags & PTP_ENABLE_FEATURE) &&
+ (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+ if (rc)
+ return rc;
+
+ priv->ptp_data.extts_enabled = on;
+
+ if (on)
+ sja1105_ptp_extts_setup_timer(&priv->ptp_data);
+ else
+ del_timer_sync(&priv->ptp_data.extts_timer);
+
+ return 0;
+}
+
+static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc = -EOPNOTSUPP;
+
+ if (req->type == PTP_CLK_REQ_PEROUT)
+ rc = sja1105_per_out_enable(priv, &req->perout, on);
+ else if (req->type == PTP_CLK_REQ_EXTTS)
+ rc = sja1105_extts_enable(priv, &req->extts, on);
+
+ return rc;
+}
+
+static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+
+ if (chan != 0 || pin != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc sja1105_ptp_pin = {
+ .name = "ptp_clk",
+ .index = 0,
+ .func = PTP_PF_NONE,
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettimex64 = sja1105_ptp_gettimex,
+ .settime64 = sja1105_ptp_settime,
+ .enable = sja1105_ptp_enable,
+ .verify = sja1105_ptp_verify_pin,
+ .do_aux_work = sja1105_rxtstamp_work,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
+ .pin_config = &sja1105_ptp_pin,
+ .n_pins = 1,
+ .n_ext_ts = 1,
+ .n_per_out = 1,
+ };
+
+ /* Only used on SJA1105 */
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
+ /* Only used on SJA1110 */
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0);
+
+ return sja1105_ptp_reset(ds);
+}
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return;
+
+ del_timer_sync(&ptp_data->extts_timer);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
+}
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
new file mode 100644
index 000000000..416461ee9
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_PTP_H
+#define _SJA1105_PTP_H
+
+#include <linux/timer.h>
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+/* This is not a preprocessor macro because the "ns" argument may or may not be
+ * s64 at caller side. This ensures it is properly type-cast before div_s64.
+ */
+static inline s64 ns_to_sja1105_delta(s64 ns)
+{
+ return div_s64(ns, 200);
+}
+
+static inline s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+struct sja1105_ptp_cmd {
+ u64 startptpcp; /* start toggling PTP_CLK pin */
+ u64 stopptpcp; /* stop toggling PTP_CLK pin */
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
+
+struct sja1105_ptp_data {
+ struct timer_list extts_timer;
+ /* Used only on SJA1105 to reconstruct partial timestamps */
+ struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ bool extts_enabled;
+ u64 ptpsyncts;
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
+
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
+
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
+#else
+
+struct sja1105_ptp_cmd;
+
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
+
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
+{
+}
+
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ return 0;
+}
+
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ return 0;
+}
+
+#define sja1105et_ptp_cmd_packing NULL
+
+#define sja1105pqrs_ptp_cmd_packing NULL
+
+#define sja1105_get_ts_info NULL
+
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
+#define sja1105_rxtstamp NULL
+#define sja1110_rxtstamp NULL
+#define sja1110_txtstamp NULL
+
+#define sja1110_process_meta_tstamp NULL
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+
+#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
new file mode 100644
index 000000000..e6b61aef4
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -0,0 +1,977 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/packing.h>
+#include "sja1105.h"
+
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
+
+static void
+sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
+{
+ const int size = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &msg->access, 31, 31, size);
+ sja1105_pack(buf, &msg->read_count, 30, 25, size);
+ sja1105_pack(buf, &msg->address, 24, 4, size);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking @len bytes from *buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing @len bytes into *buf
+ */
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
+{
+ u8 hdr_buf[SJA1105_SIZE_SPI_MSG_HEADER] = {0};
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer xfers[2] = {0};
+ struct spi_transfer *chunk_xfer;
+ struct spi_transfer *hdr_xfer;
+ struct sja1105_chunk chunk;
+ int num_chunks;
+ int rc, i = 0;
+
+ num_chunks = DIV_ROUND_UP(len, priv->max_xfer_len);
+
+ chunk.reg_addr = reg_addr;
+ chunk.buf = buf;
+ chunk.len = min_t(size_t, len, priv->max_xfer_len);
+
+ hdr_xfer = &xfers[0];
+ chunk_xfer = &xfers[1];
+
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *ptp_sts_xfer;
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ priv->max_xfer_len);
+
+ rc = spi_sync_transfer(spi, xfers, 2);
+ if (rc < 0) {
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr
+ *
+ * The u64 *value is unpacked, meaning that it's stored in the native
+ * CPU endianness and directly usable by software running on the core.
+ */
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[8];
+ int rc;
+
+ if (rw == SPI_WRITE)
+ sja1105_pack(packed_buf, value, 63, 0, 8);
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
+
+ if (rw == SPI_READ)
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
+
+ return rc;
+}
+
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[4];
+ u64 tmp;
+ int rc;
+
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
+ }
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
+
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
+ }
+
+ return rc;
+}
+
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 cold_reset = BIT(3);
+
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
+
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 cold_reset = BIT(2);
+
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
+
+static int sja1110_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 switch_reset = BIT(20);
+
+ /* Only reset the switch core.
+ * A full cold reset would re-enable the BASE_MCSS_CLOCK PLL which
+ * would turn on the microcontroller, potentially letting it execute
+ * code which could interfere with our configuration.
+ */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
+}
+
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 inhibit_cmd;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, NULL);
+ if (rc < 0)
+ return rc;
+
+ if (tx_inhibited)
+ inhibit_cmd |= port_bitmap;
+ else
+ inhibit_cmd &= ~port_bitmap;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, NULL);
+}
+
+struct sja1105_status {
+ u64 configs;
+ u64 crcchkl;
+ u64 ids;
+ u64 crcchkg;
+};
+
+/* This is not reading the entire General Status area, which is also
+ * divergent between E/T and P/Q/R/S, but only the relevant bits for
+ * ensuring that the static config upload procedure was successful.
+ */
+static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
+{
+ /* So that addition translates to 4 bytes */
+ u32 *p = buf;
+
+ /* device_id is missing from the buffer, but we don't
+ * want to diverge from the manual definition of the
+ * register addresses, so we'll back off one step with
+ * the register pointer, and never access p[0].
+ */
+ p--;
+ sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
+ sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
+}
+
+static int sja1105_status_get(struct sja1105_private *priv,
+ struct sja1105_status *status)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[4];
+ int rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sja1105_status_unpack(packed_buf, status);
+
+ return 0;
+}
+
+/* Not const because unpacking priv->static_config into buffers and preparing
+ * for upload requires the recalculation of table CRCs and updating the
+ * structures with these.
+ */
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ struct sja1105_table_header final_header;
+ sja1105_config_valid_t valid;
+ char *final_header_ptr;
+ int crc_len;
+
+ valid = sja1105_static_config_check_valid(config,
+ priv->info->max_frame_mem);
+ if (valid != SJA1105_CONFIG_OK) {
+ dev_err(&priv->spidev->dev,
+ sja1105_static_config_error_msg[valid]);
+ return -EINVAL;
+ }
+
+ /* Write Device ID and config tables to config_buf */
+ sja1105_static_config_pack(config_buf, config);
+ /* Recalculate CRC of the last header (right now 0xDEADBEEF).
+ * Don't include the CRC field itself.
+ */
+ crc_len = buf_len - 4;
+ /* Read the whole table header */
+ final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
+ sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
+ /* Modify */
+ final_header.crc = sja1105_crc32(config_buf, crc_len);
+ /* Rewrite */
+ sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
+
+ return 0;
+}
+
+#define RETRIES 10
+
+int sja1105_static_config_upload(struct sja1105_private *priv)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+ int buf_len;
+
+ buf_len = sja1105_static_config_get_length(config);
+ config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Invalid config, cannot upload\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* Prevent PHY jabbering during switch reset by inhibiting
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+ rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ /* Wait for an eventual egress packet to finish transmission
+ * (reach IFG). It is guaranteed that a second one will not
+ * follow, and that switch cold reset is thus safe
+ */
+ usleep_range(500, 1000);
+ do {
+ /* Put the SJA1105 in programming mode */
+ rc = priv->info->reset_cmd(priv->ds);
+ if (rc < 0) {
+ dev_err(dev, "Failed to reset switch, retrying...\n");
+ continue;
+ }
+ /* Wait for the switch to come out of reset */
+ usleep_range(1000, 5000);
+ /* Upload the static config to the device */
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Failed to upload config, retrying...\n");
+ continue;
+ }
+ /* Check that SJA1105 responded well to the config upload */
+ rc = sja1105_status_get(priv, &status);
+ if (rc < 0)
+ continue;
+
+ if (status.ids == 1) {
+ dev_err(dev, "Mismatch between hardware and static config "
+ "device id. Wrote 0x%llx, wants 0x%llx\n",
+ config->device_id, priv->info->device_id);
+ continue;
+ }
+ if (status.crcchkl == 1) {
+ dev_err(dev, "Switch reported invalid local CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.crcchkg == 1) {
+ dev_err(dev, "Switch reported invalid global CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.configs == 0) {
+ dev_err(dev, "Switch reported that configuration is "
+ "invalid, retrying...\n");
+ continue;
+ }
+ /* Success! */
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ rc = -EIO;
+ dev_err(dev, "Failed to upload config to device, giving up\n");
+ goto out;
+ } else if (retries != RETRIES) {
+ dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
+ }
+
+out:
+ kfree(config_buf);
+ return rc;
+}
+
+static const struct sja1105_regs sja1105et_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x11,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM10944.pdf, Table 78, CGU Register overview */
+ .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
+ .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
+ .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
+ .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
+ .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
+ .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
+ .ptppinst = 0x14,
+ .ptppindur = 0x16,
+ .ptp_control = 0x17,
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkrate = 0x1A,
+ .ptpclkcorp = 0x1D,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1105pqrs_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x12,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
+ /* UM11040.pdf, Table 114 */
+ .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
+ .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
+ .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
+ .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
+ .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
+ .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
+ .ptppinst = 0x15,
+ .ptppindur = 0x17,
+ .ptp_control = 0x18,
+ .ptpclkval = 0x19,
+ .ptpclkrate = 0x1B,
+ .ptpclkcorp = 0x1E,
+ .ptpsyncts = 0x1F,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1110_regs = {
+ .device_id = SJA1110_SPI_ADDR(0x0),
+ .prod_id = SJA1110_ACU_ADDR(0xf00),
+ .status = SJA1110_SPI_ADDR(0x4),
+ .port_control = SJA1110_SPI_ADDR(0x50), /* actually INHIB_TX */
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = SJA1110_RGU_ADDR(0x100), /* Reset Control Register 0 */
+ /* Ports 2 and 3 are capable of xMII, but there isn't anything to
+ * configure in the CGU/ACU for them.
+ */
+ .pad_mii_tx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_rx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_id = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1110_ACU_ADDR(0x18), SJA1110_ACU_ADDR(0x28),
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .rmii_pll1 = SJA1105_RSV_ADDR,
+ .cgu_idiv = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208, 0x20a,
+ 0x20c, 0x20e, 0x210, 0x212, 0x214},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440, 0x450,
+ 0x460, 0x470, 0x480, 0x490, 0x4a0},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640, 0x650,
+ 0x660, 0x670, 0x680, 0x690, 0x6a0},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460, 0x1478,
+ 0x1490, 0x14a8, 0x14c0, 0x14d8, 0x14f0},
+ .mii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rgmii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ref_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .ptpschtm = SJA1110_SPI_ADDR(0x54),
+ .ptppinst = SJA1110_SPI_ADDR(0x5c),
+ .ptppindur = SJA1110_SPI_ADDR(0x64),
+ .ptp_control = SJA1110_SPI_ADDR(0x68),
+ .ptpclkval = SJA1110_SPI_ADDR(0x6c),
+ .ptpclkrate = SJA1110_SPI_ADDR(0x74),
+ .ptpclkcorp = SJA1110_SPI_ADDR(0x80),
+ .ptpsyncts = SJA1110_SPI_ADDR(0x84),
+ .mdio_100base_tx = 0x1c2400,
+ .mdio_100base_t1 = 0x1c1000,
+ .pcs_base = {SJA1105_RSV_ADDR, 0x1c1400, 0x1c1800, 0x1c1c00, 0x1c2000,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+};
+
+const struct sja1105_info sja1105e_info = {
+ .device_id = SJA1105E_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105e_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = false,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105E",
+};
+
+const struct sja1105_info sja1105t_info = {
+ .device_id = SJA1105T_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105t_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = false,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105T",
+};
+
+const struct sja1105_info sja1105p_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105P_PART_NO,
+ .static_ops = sja1105p_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105P",
+};
+
+const struct sja1105_info sja1105q_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105Q_PART_NO,
+ .static_ops = sja1105q_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105Q",
+};
+
+const struct sja1105_info sja1105r_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105R_PART_NO,
+ .static_ops = sja1105r_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
+ .name = "SJA1105R",
+};
+
+const struct sja1105_info sja1105s_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105S_PART_NO,
+ .static_ops = sja1105s_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .regs = &sja1105pqrs_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
+ .name = "SJA1105S",
+};
+
+const struct sja1105_info sja1110a_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110A_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, true},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1},
+ .name = "SJA1110A",
+};
+
+const struct sja1105_info sja1110b_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110B_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY},
+ .name = "SJA1110B",
+};
+
+const struct sja1105_info sja1110c_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110C_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110C",
+};
+
+const struct sja1105_info sja1110d_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110D_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, false, true, false, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110D",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
new file mode 100644
index 000000000..baba204ad
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -0,0 +1,1952 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105_static_config.h"
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+/* Convenience wrappers over the generic packing functions. These take into
+ * account the SJA1105 memory layout quirks and provide some level of
+ * programmer protection against incorrect API use. The errors are not expected
+ * to occur durring runtime, therefore printing and swallowing them here is
+ * appropriate instead of clutterring up higher-level code.
+ */
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
+{
+ int rc = packing(buf, (u64 *)val, start, end, len,
+ PACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
+{
+ int rc = packing((void *)buf, val, start, end, len,
+ UNPACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL)
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ else if (rc == -ERANGE)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ dump_stack();
+}
+
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op)
+{
+ int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
+u32 sja1105_crc32(const void *buf, size_t len)
+{
+ unsigned int i;
+ u64 word;
+ u32 crc;
+
+ /* seed */
+ crc = ~0;
+ for (i = 0; i < len; i += 4) {
+ sja1105_unpack(buf + i, &word, 31, 0, 4);
+ crc = crc32_le(crc, (u8 *)&word, 4);
+ }
+ return ~crc;
+}
+
+static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
+ sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
+ return size;
+}
+
+static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
+ sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
+ sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
+ sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
+ sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
+ sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
+ sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
+ sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
+ return size;
+}
+
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
+ sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
+ sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
+ sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
+ sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
+ sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
+ sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
+ return size;
+}
+
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->vllupformat, 447, 447, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 446, 446, size, op);
+ sja1105_packing(buf, &entry->switchid, 445, 442, size, op);
+ sja1105_packing(buf, &entry->hostprio, 441, 439, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 438, 391, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 390, 343, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 342, 295, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 294, 247, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 246, 246, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 245, 245, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 244, 244, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 243, 243, size, op);
+ sja1105_packing(buf, &entry->casc_port, 242, 232, size, op);
+ sja1105_packing(buf, &entry->host_port, 231, 228, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 227, 224, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 223, 192, size, op);
+ sja1105_packing(buf, &entry->vlmask, 191, 160, size, op);
+ sja1105_packing(buf, &entry->tpid2, 159, 144, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 143, 143, size, op);
+ sja1105_packing(buf, &entry->tpid, 142, 127, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 126, 126, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 125, 114, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 113, 111, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
+ sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
+ sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
+ sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
+ return size;
+}
+
+static size_t
+sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 13; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 5; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 10, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
+ sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
+ for (i = 0, offset = 25; i < 8; i++, offset += 3)
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ int offset, i;
+
+ if (entry->type_egrpcp2outputq) {
+ for (i = 0, offset = 31; i < SJA1110_NUM_PORTS;
+ i++, offset += 3) {
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ }
+ } else {
+ sja1105_packing(buf, &entry->bc_domain, 63, 53, size, op);
+ sja1105_packing(buf, &entry->reach_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 41, 31, size, op);
+ }
+ return size;
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
+ sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
+ sja1105_packing(buf, &entry->poly, 13, 6, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 58; i < 5; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 42, 33, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 32, 28, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ sja1105_packing(buf, &entry->use_static, 24, 24, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 23, 23, size, op);
+ sja1105_packing(buf, &entry->learn_once, 22, 22, size, op);
+ return size;
+}
+
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 70; i < SJA1110_NUM_PORTS; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 69, 55, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 54, 45, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 44, 34, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 32, 32, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 31, 31, size, op);
+ sja1105_packing(buf, &entry->use_static, 30, 30, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->learn_once, 28, 28, size, op);
+ return size;
+}
+
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ sja1105_packing(buf, &entry->index, 29, 20, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
+ sja1105_packing(buf, &entry->takets, 146, 146, size, op);
+ sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
+ sja1105_packing(buf, &entry->retag, 144, 144, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 159, 159, size, op);
+ sja1105_packing(buf, &entry->age, 158, 144, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
+ sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
+ sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ sja1105_packing(buf, &entry->index, 15, 6, size, op);
+ return size;
+}
+
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->trap, 168, 168, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 167, 156, size, op);
+ sja1105_packing(buf, &entry->takets, 155, 155, size, op);
+ sja1105_packing(buf, &entry->mirr, 154, 154, size, op);
+ sja1105_packing(buf, &entry->retag, 153, 153, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 168, 168, size, op);
+ sja1105_packing(buf, &entry->age, 167, 153, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 152, 152, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 151, 140, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 139, 92, size, op);
+ sja1105_packing(buf, &entry->mask_srcport, 91, 88, size, op);
+ sja1105_packing(buf, &entry->iotag, 87, 87, size, op);
+ sja1105_packing(buf, &entry->vlanid, 86, 75, size, op);
+ sja1105_packing(buf, &entry->macaddr, 74, 27, size, op);
+ sja1105_packing(buf, &entry->srcport, 26, 23, size, op);
+ sja1105_packing(buf, &entry->destports, 22, 12, size, op);
+ sja1105_packing(buf, &entry->enfport, 11, 11, size, op);
+ sja1105_packing(buf, &entry->index, 10, 1, size, op);
+ return size;
+}
+
+static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
+ sja1105_packing(buf, &entry->smax, 57, 42, size, op);
+ sja1105_packing(buf, &entry->rate, 41, 26, size, op);
+ sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
+ sja1105_packing(buf, &entry->partition, 14, 12, size, op);
+ return size;
+}
+
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 57, size, op);
+ sja1105_packing(buf, &entry->smax, 56, 39, size, op);
+ sja1105_packing(buf, &entry->rate, 38, 21, size, op);
+ sja1105_packing(buf, &entry->maxlen, 20, 10, size, op);
+ sja1105_packing(buf, &entry->partition, 9, 7, size, op);
+ return size;
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 72; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
+ sja1105_packing(buf, &entry->speed, 66, 65, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
+ sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
+ sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
+ sja1105_packing(buf, &entry->retag, 4, 4, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
+ sja1105_packing(buf, &entry->egress, 2, 2, size, op);
+ sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
+ sja1105_packing(buf, &entry->speed, 98, 97, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
+ sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
+ sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
+ sja1105_packing(buf, &entry->retag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->egress, 32, 32, size, op);
+ sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
+ return size;
+}
+
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->speed, 98, 96, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 95, 80, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 79, 64, size, op);
+ sja1105_packing(buf, &entry->maxage, 63, 56, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 55, 53, size, op);
+ sja1105_packing(buf, &entry->vlanid, 52, 41, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 37, 37, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->retag, 33, 33, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 32, 32, size, op);
+ sja1105_packing(buf, &entry->egress, 31, 31, size, op);
+ sja1105_packing(buf, &entry->ingress, 30, 30, size, op);
+ sja1105_packing(buf, &entry->ifg, 10, 5, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->clksrc, 31, 30, size, op);
+ sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 31, 29, size, op);
+ sja1105_packing(buf, &entry->delta, 28, 11, size, op);
+ sja1105_packing(buf, &entry->address, 10, 1, size, op);
+ return size;
+}
+
+static size_t
+sja1110_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 63, 61, size, op);
+ sja1105_packing(buf, &entry->delta, 60, 43, size, op);
+ sja1105_packing(buf, &entry->address, 42, 31, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+static size_t sja1110_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 0; i < 8; i++, offset += 12)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 11, offset + 0, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 63, 54, size, op);
+ sja1105_packing(buf, &entry->winend, 53, 53, size, op);
+ sja1105_packing(buf, &entry->winst, 52, 52, size, op);
+ sja1105_packing(buf, &entry->destports, 51, 47, size, op);
+ sja1105_packing(buf, &entry->setvalid, 46, 46, size, op);
+ sja1105_packing(buf, &entry->txen, 45, 45, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op);
+ sja1105_packing(buf, &entry->resmedia, 43, 36, size, op);
+ sja1105_packing(buf, &entry->vlindex, 35, 26, size, op);
+ sja1105_packing(buf, &entry->delta, 25, 8, size, op);
+ return size;
+}
+
+static size_t sja1110_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 95, 84, size, op);
+ sja1105_packing(buf, &entry->winend, 83, 83, size, op);
+ sja1105_packing(buf, &entry->winst, 82, 82, size, op);
+ sja1105_packing(buf, &entry->destports, 81, 71, size, op);
+ sja1105_packing(buf, &entry->setvalid, 70, 70, size, op);
+ sja1105_packing(buf, &entry->txen, 69, 69, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 68, 68, size, op);
+ sja1105_packing(buf, &entry->resmedia, 67, 60, size, op);
+ sja1105_packing(buf, &entry->vlindex, 59, 48, size, op);
+ sja1105_packing(buf, &entry->delta, 47, 30, size, op);
+ return size;
+}
+
+static size_t
+sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 9, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
+ return size;
+}
+
+static size_t
+sja1110_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 7, 7, size, op);
+ return size;
+}
+
+static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 20, size, op);
+ return size;
+}
+
+static size_t sja1110_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 14, size, op);
+ return size;
+}
+
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 89, 42, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 41, 30, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 26, 24, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 57, 42, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 82, 35, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 34, 23, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 18, 16, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 50, 35, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ }
+ return size;
+}
+
+static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 40, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
+ sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+
+ sja1105_packing(buf, &entry->ving_mirr, 95, 85, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 84, 74, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 73, 63, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 62, 52, size, op);
+ sja1105_packing(buf, &entry->tag_port, 51, 41, size, op);
+ sja1105_packing(buf, &entry->type_entry, 40, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 17; i < 5; i++, offset += 3) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 20; i < SJA1110_NUM_PORTS; i++, offset += 4) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ sja1105_packing(buf, &entry->special[i],
+ offset + 3, offset + 3, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
+ sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
+ sja1105_packing(buf, &entry->destports, 27, 23, size, op);
+ return size;
+}
+
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 53, size, op);
+ sja1105_packing(buf, &entry->ing_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 41, 30, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 29, 18, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 17, 17, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 16, 16, size, op);
+ sja1105_packing(buf, &entry->destports, 15, 5, size, op);
+ return size;
+}
+
+static size_t sja1110_pcp_remapping_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1110_pcp_remapping_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_PCP_REMAPPING_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < SJA1105_NUM_TC; i++, offset += 3)
+ sja1105_packing(buf, &entry->egrpcp[i],
+ offset + 2, offset + 0, size, op);
+
+ return size;
+}
+
+size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_TABLE_HEADER;
+ struct sja1105_table_header *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
+ sja1105_packing(buf, &entry->len, 55, 32, size, op);
+ sja1105_packing(buf, &entry->crc, 95, 64, size, op);
+ return size;
+}
+
+/* WARNING: the *hdr pointer is really non-const, because it is
+ * modifying the CRC of the header for a 2-stage packing operation
+ */
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
+{
+ /* First pack the table as-is, then calculate the CRC, and
+ * finally put the proper CRC into the packed buffer
+ */
+ memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(buf, hdr, PACK);
+ hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
+ sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
+}
+
+static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
+{
+ u64 computed_crc;
+ int len_bytes;
+
+ len_bytes = (uintptr_t)(crc_ptr - table_start);
+ computed_crc = sja1105_crc32(table_start, len_bytes);
+ sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
+}
+
+/* The block IDs that the switches support are unfortunately sparse, so keep a
+ * mapping table to "block indices" and translate back and forth so that we
+ * don't waste useless memory in struct sja1105_static_config.
+ * Also, since the block id comes from essentially untrusted input (unpacking
+ * the static config from userspace) it has to be sanitized (range-checked)
+ * before blindly indexing kernel memory with the blk_idx.
+ */
+static u64 blk_id_map[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
+ [BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
+ [BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
+ [BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
+ [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
+ [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
+ [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
+ [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
+ [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
+ [BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
+ [BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
+ [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
+ [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
+ [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_RETAGGING] = BLKID_RETAGGING,
+ [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+ [BLK_IDX_PCP_REMAPPING] = BLKID_PCP_REMAPPING,
+};
+
+const char *sja1105_static_config_error_msg[] = {
+ [SJA1105_CONFIG_OK] = "",
+ [SJA1105_TTETHERNET_NOT_SUPPORTED] =
+ "schedule-table present, but TTEthernet is "
+ "only supported on T and Q/S",
+ [SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] =
+ "schedule-table present, but one of "
+ "schedule-entry-points-table, schedule-parameters-table or "
+ "schedule-entry-points-parameters table is empty",
+ [SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
+ "vl-lookup-table present, but one of vl-policing-table, "
+ "vl-forwarding-table or vl-forwarding-parameters-table is empty",
+ [SJA1105_MISSING_L2_POLICING_TABLE] =
+ "l2-policing-table needs to have at least one entry",
+ [SJA1105_MISSING_L2_FORWARDING_TABLE] =
+ "l2-forwarding-table is either missing or incomplete",
+ [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
+ "l2-forwarding-parameters-table is missing",
+ [SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
+ "general-parameters-table is missing",
+ [SJA1105_MISSING_VLAN_TABLE] =
+ "vlan-lookup-table needs to have at least the default untagged VLAN",
+ [SJA1105_MISSING_XMII_TABLE] =
+ "xmii-table is missing",
+ [SJA1105_MISSING_MAC_TABLE] =
+ "mac-configuration-table needs to contain an entry for each port",
+ [SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
+ "Not allowed to overcommit frame memory. L2 memory partitions "
+ "and VL memory partitions share the same space. The sum of all "
+ "16 memory partitions is not allowed to be larger than 929 "
+ "128-byte blocks (or 910 with retagging). Please adjust "
+ "l2-forwarding-parameters-table.part_spc and/or "
+ "vl-forwarding-parameters-table.partspc.",
+};
+
+static sja1105_config_valid_t
+static_config_check_memory_size(const struct sja1105_table *tables, int max_mem)
+{
+ const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int i, mem = 0;
+
+ l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
+
+ for (i = 0; i < 8; i++)
+ mem += l2_fwd_params->part_spc[i];
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
+ vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
+ for (i = 0; i < 8; i++)
+ mem += vl_fwd_params->partspc[i];
+ }
+
+ if (tables[BLK_IDX_RETAGGING].entry_count)
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
+
+ if (mem > max_mem)
+ return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
+
+ return SJA1105_CONFIG_OK;
+}
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem)
+{
+ const struct sja1105_table *tables = config->tables;
+#define IS_FULL(blk_idx) \
+ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
+
+ if (tables[BLK_IDX_SCHEDULE].entry_count) {
+ if (!tables[BLK_IDX_SCHEDULE].ops->max_entry_count)
+ return SJA1105_TTETHERNET_NOT_SUPPORTED;
+
+ if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+ }
+ if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool has_critical_links = false;
+ int i;
+
+ vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
+
+ for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
+ if (vl_lookup[i].iscritical) {
+ has_critical_links = true;
+ break;
+ }
+ }
+
+ if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+ }
+
+ if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
+ return SJA1105_MISSING_L2_POLICING_TABLE;
+
+ if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
+ return SJA1105_MISSING_VLAN_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING))
+ return SJA1105_MISSING_L2_FORWARDING_TABLE;
+
+ if (!IS_FULL(BLK_IDX_MAC_CONFIG))
+ return SJA1105_MISSING_MAC_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
+ return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
+ return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_XMII_PARAMS))
+ return SJA1105_MISSING_XMII_TABLE;
+
+ return static_config_check_memory_size(tables, max_mem);
+#undef IS_FULL
+}
+
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
+{
+ struct sja1105_table_header header = {0};
+ enum sja1105_blk_idx i;
+ char *p = buf;
+ int j;
+
+ sja1105_pack(p, &config->device_id, 31, 0, 4);
+ p += SJA1105_SIZE_DEVICE_ID;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+ char *table_start;
+
+ table = &config->tables[i];
+ if (!table->entry_count)
+ continue;
+
+ header.block_id = blk_id_map[i];
+ header.len = table->entry_count *
+ table->ops->packed_entry_size / 4;
+ sja1105_table_header_pack_with_crc(p, &header);
+ p += SJA1105_SIZE_TABLE_HEADER;
+ table_start = p;
+ for (j = 0; j < table->entry_count; j++) {
+ u8 *entry_ptr = table->entries;
+
+ entry_ptr += j * table->ops->unpacked_entry_size;
+ memset(p, 0, table->ops->packed_entry_size);
+ table->ops->packing(p, entry_ptr, PACK);
+ p += table->ops->packed_entry_size;
+ }
+ sja1105_table_write_crc(table_start, p);
+ p += 4;
+ }
+ /* Final header:
+ * Block ID does not matter
+ * Length of 0 marks that header is final
+ * CRC will be replaced on-the-fly on "config upload"
+ */
+ header.block_id = 0;
+ header.len = 0;
+ header.crc = 0xDEADBEEF;
+ memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(p, &header, PACK);
+}
+
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config)
+{
+ unsigned int sum;
+ unsigned int header_count;
+ enum sja1105_blk_idx i;
+
+ /* Ending header */
+ header_count = 1;
+ sum = SJA1105_SIZE_DEVICE_ID;
+
+ /* Tables (headers and entries) */
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+
+ table = &config->tables[i];
+ if (table->entry_count)
+ header_count++;
+
+ sum += table->ops->packed_entry_size * table->entry_count;
+ }
+ /* Headers have an additional CRC at the end */
+ sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
+ /* Last header does not have an extra CRC because there is no data */
+ sum -= 4;
+
+ return sum;
+}
+
+/* Compatibility matrices */
+
+/* SJA1105E: First generation, no TTEthernet */
+const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105T: First generation, TTEthernet */
+const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105P: Second generation, no TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105Q: Second generation, TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105R: Second generation, no TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105S: Second generation, TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1110A: Third generation */
+const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1110_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1110_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1110_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1110_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1110_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1110_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1110_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1110_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1110_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1110_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1110_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1110_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1110_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1110_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1110_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1110_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1110_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1110_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1110_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+ [BLK_IDX_PCP_REMAPPING] = {
+ .packing = sja1110_pcp_remapping_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1110_pcp_remapping_entry),
+ .packed_entry_size = SJA1110_SIZE_PCP_REMAPPING_ENTRY,
+ .max_entry_count = SJA1110_MAX_PCP_REMAPPING_COUNT,
+ },
+};
+
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id)
+{
+ enum sja1105_blk_idx i;
+
+ *config = (struct sja1105_static_config) {0};
+
+ /* Transfer static_ops array from priv into per-table ops
+ * for handier access
+ */
+ for (i = 0; i < BLK_IDX_MAX; i++)
+ config->tables[i].ops = &static_ops[i];
+
+ config->device_id = device_id;
+ return 0;
+}
+
+void sja1105_static_config_free(struct sja1105_static_config *config)
+{
+ enum sja1105_blk_idx i;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ if (config->tables[i].entry_count) {
+ kfree(config->tables[i].entries);
+ config->tables[i].entry_count = 0;
+ }
+ }
+}
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ u8 *entries = table->entries;
+
+ if (i > table->entry_count)
+ return -ERANGE;
+
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i) * entry_size);
+
+ table->entry_count--;
+
+ return 0;
+}
+
+/* No pointers to table->entries should be kept when this is called. */
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ void *new_entries, *old_entries = table->entries;
+
+ if (new_count > table->ops->max_entry_count)
+ return -ERANGE;
+
+ new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
+ if (!new_entries)
+ return -ENOMEM;
+
+ memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
+ entry_size);
+
+ table->entries = new_entries;
+ table->entry_count = new_count;
+ kfree(old_entries);
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
new file mode 100644
index 000000000..6a372d5f2
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_STATIC_CONFIG_H
+#define _SJA1105_STATIC_CONFIG_H
+
+#include <linux/packing.h>
+#include <linux/types.h>
+#include <asm/types.h>
+
+#define SJA1105_NUM_PORTS 5
+#define SJA1110_NUM_PORTS 11
+#define SJA1105_MAX_NUM_PORTS SJA1110_NUM_PORTS
+#define SJA1105_NUM_TC 8
+
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
+#define SJA1105_SIZE_DEVICE_ID 4
+#define SJA1105_SIZE_TABLE_HEADER 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY 8
+#define SJA1110_SIZE_SCHEDULE_ENTRY 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 8
+#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_VL_POLICING_ENTRY 8
+#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
+#define SJA1105_SIZE_L2_POLICING_ENTRY 8
+#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1110_SIZE_VLAN_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_RETAGGING_ENTRY 8
+#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1110_SIZE_XMII_PARAMS_ENTRY 8
+#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
+#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
+#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_CBS_ENTRY 16
+#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1110_SIZE_L2_LOOKUP_ENTRY 24
+#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY 28
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1110_SIZE_GENERAL_PARAMS_ENTRY 56
+#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_CBS_ENTRY 20
+#define SJA1110_SIZE_PCP_REMAPPING_ENTRY 4
+
+/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
+enum {
+ BLKID_SCHEDULE = 0x00,
+ BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
+ BLKID_VL_LOOKUP = 0x02,
+ BLKID_VL_POLICING = 0x03,
+ BLKID_VL_FORWARDING = 0x04,
+ BLKID_L2_LOOKUP = 0x05,
+ BLKID_L2_POLICING = 0x06,
+ BLKID_VLAN_LOOKUP = 0x07,
+ BLKID_L2_FORWARDING = 0x08,
+ BLKID_MAC_CONFIG = 0x09,
+ BLKID_SCHEDULE_PARAMS = 0x0A,
+ BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
+ BLKID_VL_FORWARDING_PARAMS = 0x0C,
+ BLKID_L2_LOOKUP_PARAMS = 0x0D,
+ BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_AVB_PARAMS = 0x10,
+ BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_RETAGGING = 0x12,
+ BLKID_CBS = 0x13,
+ BLKID_PCP_REMAPPING = 0x1C,
+ BLKID_XMII_PARAMS = 0x4E,
+};
+
+enum sja1105_blk_idx {
+ BLK_IDX_SCHEDULE = 0,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS,
+ BLK_IDX_VL_LOOKUP,
+ BLK_IDX_VL_POLICING,
+ BLK_IDX_VL_FORWARDING,
+ BLK_IDX_L2_LOOKUP,
+ BLK_IDX_L2_POLICING,
+ BLK_IDX_VLAN_LOOKUP,
+ BLK_IDX_L2_FORWARDING,
+ BLK_IDX_MAC_CONFIG,
+ BLK_IDX_SCHEDULE_PARAMS,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
+ BLK_IDX_VL_FORWARDING_PARAMS,
+ BLK_IDX_L2_LOOKUP_PARAMS,
+ BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_AVB_PARAMS,
+ BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_RETAGGING,
+ BLK_IDX_CBS,
+ BLK_IDX_XMII_PARAMS,
+ BLK_IDX_PCP_REMAPPING,
+ BLK_IDX_MAX,
+ /* Fake block indices that are only valid for dynamic access */
+ BLK_IDX_MGMT_ROUTE,
+ BLK_IDX_MAX_DYN,
+ BLK_IDX_INVAL = -1,
+};
+
+#define SJA1105_MAX_SCHEDULE_COUNT 1024
+#define SJA1110_MAX_SCHEDULE_COUNT 4096
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
+#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1110_MAX_VL_LOOKUP_COUNT 4096
+#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1110_MAX_VL_POLICING_COUNT 4096
+#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
+#define SJA1110_MAX_VL_FORWARDING_COUNT 4096
+#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
+#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1110_MAX_L2_POLICING_COUNT 110
+#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
+#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1110_MAX_L2_FORWARDING_COUNT 19
+#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1110_MAX_MAC_CONFIG_COUNT 11
+#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
+#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_RETAGGING_COUNT 32
+#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+#define SJA1105_MAX_AVB_PARAMS_COUNT 1
+#define SJA1105ET_MAX_CBS_COUNT 10
+#define SJA1105PQRS_MAX_CBS_COUNT 16
+#define SJA1110_MAX_CBS_COUNT 80
+#define SJA1110_MAX_PCP_REMAPPING_COUNT 11
+
+#define SJA1105_MAX_FRAME_MEMORY 929
+#define SJA1110_MAX_FRAME_MEMORY 1820
+#define SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD 19
+#define SJA1105_VL_FRAME_MEMORY 100
+
+#define SJA1105E_DEVICE_ID 0x9C00000Cull
+#define SJA1105T_DEVICE_ID 0x9E00030Eull
+#define SJA1105PR_DEVICE_ID 0xAF00030Eull
+#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+#define SJA1110_DEVICE_ID 0xB700030Full
+
+#define SJA1105ET_PART_NO 0x9A83
+#define SJA1105P_PART_NO 0x9A84
+#define SJA1105Q_PART_NO 0x9A85
+#define SJA1105R_PART_NO 0x9A86
+#define SJA1105S_PART_NO 0x9A87
+#define SJA1110A_PART_NO 0x1110
+#define SJA1110B_PART_NO 0x1111
+#define SJA1110C_PART_NO 0x1112
+#define SJA1110D_PART_NO 0x1113
+
+#define SJA1110_ACU 0x1c4400
+#define SJA1110_RGU 0x1c6000
+#define SJA1110_CGU 0x1c6400
+
+#define SJA1110_SPI_ADDR(x) ((x) / 4)
+#define SJA1110_ACU_ADDR(x) (SJA1110_ACU + SJA1110_SPI_ADDR(x))
+#define SJA1110_CGU_ADDR(x) (SJA1110_CGU + SJA1110_SPI_ADDR(x))
+#define SJA1110_RGU_ADDR(x) (SJA1110_RGU + SJA1110_SPI_ADDR(x))
+
+#define SJA1105_RSV_ADDR 0xffffffffffffffffull
+
+struct sja1105_schedule_entry {
+ u64 winstindex;
+ u64 winend;
+ u64 winst;
+ u64 destports;
+ u64 setvalid;
+ u64 txen;
+ u64 resmedia_en;
+ u64 resmedia;
+ u64 vlindex;
+ u64 delta;
+};
+
+struct sja1105_schedule_params_entry {
+ u64 subscheind[8];
+};
+
+struct sja1105_general_params_entry {
+ u64 vllupformat;
+ u64 mirr_ptacu;
+ u64 switchid;
+ u64 hostprio;
+ u64 mac_fltres1;
+ u64 mac_fltres0;
+ u64 mac_flt1;
+ u64 mac_flt0;
+ u64 incl_srcpt1;
+ u64 incl_srcpt0;
+ u64 send_meta1;
+ u64 send_meta0;
+ u64 casc_port;
+ u64 host_port;
+ u64 mirr_port;
+ u64 vlmarker;
+ u64 vlmask;
+ u64 tpid;
+ u64 ignore2stf;
+ u64 tpid2;
+ /* P/Q/R/S only */
+ u64 queue_ts;
+ u64 egrmirrvid;
+ u64 egrmirrpcp;
+ u64 egrmirrdei;
+ u64 replay_port;
+ /* SJA1110 only */
+ u64 tte_en;
+ u64 tdmaconfigidx;
+ u64 header_type;
+};
+
+struct sja1105_schedule_entry_points_entry {
+ u64 subschindx;
+ u64 delta;
+ u64 address;
+};
+
+struct sja1105_schedule_entry_points_params_entry {
+ u64 clksrc;
+ u64 actsubsch;
+};
+
+struct sja1105_vlan_lookup_entry {
+ u64 ving_mirr;
+ u64 vegr_mirr;
+ u64 vmemb_port;
+ u64 vlan_bc;
+ u64 tag_port;
+ u64 vlanid;
+ u64 type_entry; /* SJA1110 only */
+};
+
+struct sja1105_l2_lookup_entry {
+ u64 vlanid;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+ /* P/Q/R/S only */
+ u64 mask_iotag;
+ u64 mask_vlanid;
+ u64 mask_macaddr;
+ u64 mask_srcport;
+ u64 iotag;
+ u64 srcport;
+ u64 lockeds;
+ union {
+ /* LOCKEDS=1: Static FDB entries */
+ struct {
+ /* TSREG is deprecated in SJA1110, TRAP is supported only
+ * in SJA1110.
+ */
+ u64 trap;
+ u64 tsreg;
+ u64 mirrvlan;
+ u64 takets;
+ u64 mirr;
+ u64 retag;
+ };
+ /* LOCKEDS=0: Dynamically learned FDB entries */
+ struct {
+ u64 touched;
+ u64 age;
+ };
+ };
+};
+
+struct sja1105_l2_lookup_params_entry {
+ u64 maxaddrp[SJA1105_MAX_NUM_PORTS]; /* P/Q/R/S only */
+ u64 start_dynspc; /* P/Q/R/S only */
+ u64 drpnolearn; /* P/Q/R/S only */
+ u64 use_static; /* P/Q/R/S only */
+ u64 owr_dyn; /* P/Q/R/S only */
+ u64 learn_once; /* P/Q/R/S only */
+ u64 maxage; /* Shared */
+ u64 dyn_tbsz; /* E/T only */
+ u64 poly; /* E/T only */
+ u64 shared_learn; /* Shared */
+ u64 no_enf_hostprt; /* Shared */
+ u64 no_mgmt_learn; /* Shared */
+};
+
+struct sja1105_l2_forwarding_entry {
+ u64 bc_domain;
+ u64 reach_port;
+ u64 fl_domain;
+ /* This is actually max(SJA1105_NUM_TC, SJA1105_MAX_NUM_PORTS) */
+ u64 vlan_pmap[SJA1105_MAX_NUM_PORTS];
+ bool type_egrpcp2outputq;
+};
+
+struct sja1105_l2_forwarding_params_entry {
+ u64 max_dynp;
+ u64 part_spc[8];
+};
+
+struct sja1105_l2_policing_entry {
+ u64 sharindx;
+ u64 smax;
+ u64 rate;
+ u64 maxlen;
+ u64 partition;
+};
+
+struct sja1105_avb_params_entry {
+ u64 cas_master;
+ u64 destmeta;
+ u64 srcmeta;
+};
+
+struct sja1105_mac_config_entry {
+ u64 top[8];
+ u64 base[8];
+ u64 enabled[8];
+ u64 ifg;
+ u64 speed;
+ u64 tp_delin;
+ u64 tp_delout;
+ u64 maxage;
+ u64 vlanprio;
+ u64 vlanid;
+ u64 ing_mirr;
+ u64 egr_mirr;
+ u64 drpnona664;
+ u64 drpdtag;
+ u64 drpuntag;
+ u64 retag;
+ u64 dyn_learn;
+ u64 egress;
+ u64 ingress;
+};
+
+struct sja1105_retagging_entry {
+ u64 egr_port;
+ u64 ing_port;
+ u64 vlan_ing;
+ u64 vlan_egr;
+ u64 do_not_learn;
+ u64 use_dest_ports;
+ u64 destports;
+};
+
+struct sja1105_cbs_entry {
+ u64 port; /* Not used for SJA1110 */
+ u64 prio; /* Not used for SJA1110 */
+ u64 credit_hi;
+ u64 credit_lo;
+ u64 send_slope;
+ u64 idle_slope;
+};
+
+struct sja1105_xmii_params_entry {
+ u64 phy_mac[SJA1105_MAX_NUM_PORTS];
+ u64 xmii_mode[SJA1105_MAX_NUM_PORTS];
+ /* The SJA1110 insists being a snowflake, and requires SGMII,
+ * 2500base-x and internal MII ports connected to the 100base-TX PHY to
+ * set this bit. We set it unconditionally from the high-level logic,
+ * and only sja1110_xmii_params_entry_packing writes it to the static
+ * config. I have no better name for it than "special".
+ */
+ u64 special[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1110_pcp_remapping_entry {
+ u64 egrpcp[SJA1105_NUM_TC];
+};
+
+enum {
+ SJA1105_VL_FORMAT_PSFP = 0,
+ SJA1105_VL_FORMAT_ARINC664 = 1,
+};
+
+struct sja1105_vl_lookup_entry {
+ u64 format;
+ u64 port;
+ union {
+ /* SJA1105_VL_FORMAT_PSFP */
+ struct {
+ u64 destports;
+ u64 iscritical;
+ u64 macaddr;
+ u64 vlanid;
+ u64 vlanprior;
+ };
+ /* SJA1105_VL_FORMAT_ARINC664 */
+ struct {
+ u64 egrmirr;
+ u64 ingrmirr;
+ u64 vlid;
+ };
+ };
+ /* Not part of hardware structure */
+ unsigned long flow_cookie;
+};
+
+struct sja1105_vl_policing_entry {
+ u64 type;
+ u64 maxlen;
+ u64 sharindx;
+ u64 bag;
+ u64 jitter;
+};
+
+struct sja1105_vl_forwarding_entry {
+ u64 type;
+ u64 priority;
+ u64 partition;
+ u64 destports;
+};
+
+struct sja1105_vl_forwarding_params_entry {
+ u64 partspc[8];
+ u64 debugen;
+};
+
+struct sja1105_table_header {
+ u64 block_id;
+ u64 len;
+ u64 crc;
+};
+
+struct sja1105_table_ops {
+ size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op);
+ size_t unpacked_entry_size;
+ size_t packed_entry_size;
+ size_t max_entry_count;
+};
+
+struct sja1105_table {
+ const struct sja1105_table_ops *ops;
+ size_t entry_count;
+ void *entries;
+};
+
+struct sja1105_static_config {
+ u64 device_id;
+ struct sja1105_table tables[BLK_IDX_MAX];
+};
+
+extern const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX];
+
+size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr);
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config);
+
+typedef enum {
+ SJA1105_CONFIG_OK = 0,
+ SJA1105_TTETHERNET_NOT_SUPPORTED,
+ SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
+ SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION,
+ SJA1105_MISSING_L2_POLICING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
+ SJA1105_MISSING_GENERAL_PARAMS_TABLE,
+ SJA1105_MISSING_VLAN_TABLE,
+ SJA1105_MISSING_XMII_TABLE,
+ SJA1105_MISSING_MAC_TABLE,
+ SJA1105_OVERCOMMITTED_FRAME_MEMORY,
+} sja1105_config_valid_t;
+
+extern const char *sja1105_static_config_error_msg[];
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem);
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id);
+void sja1105_static_config_free(struct sja1105_static_config *config);
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i);
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
+
+u32 sja1105_crc32(const void *buf, size_t len);
+
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len);
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op);
+
+/* Common implementations for the static and dynamic configs */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
new file mode 100644
index 000000000..e6153848a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_TAS_CLKSRC_DISABLED 0
+#define SJA1105_TAS_CLKSRC_STANDALONE 1
+#define SJA1105_TAS_CLKSRC_AS6802 2
+#define SJA1105_TAS_CLKSRC_PTP 3
+#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ tas_data->enabled = true;
+
+ if (max_cycle_time < gating_cfg->cycle_time)
+ max_cycle_time = gating_cfg->cycle_time;
+ if (latest_base_time < gating_cfg->base_time)
+ latest_base_time = gating_cfg->base_time;
+ if (earliest_base_time > gating_cfg->base_time) {
+ earliest_base_time = gating_cfg->base_time;
+ its_cycle_time = gating_cfg->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
+/* Lo and behold: the egress scheduler from hell.
+ *
+ * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
+ * all schedule entries for all ports. These are the Gate Control List (GCL)
+ * entries, let's call them "timeslots" for short. This linear array of
+ * timeslots is held in BLK_IDX_SCHEDULE.
+ *
+ * Then there are a maximum of 8 "execution threads" inside the switch, which
+ * iterate cyclically through the "schedule". Each "cycle" has an entry point
+ * and an exit point, both being timeslot indices in the schedule table. The
+ * hardware calls each cycle a "subschedule".
+ *
+ * Subschedule (cycle) i starts when
+ * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
+ *
+ * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
+ * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
+ * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
+ *
+ * For each schedule entry (timeslot) k, the engine executes the gate control
+ * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
+ *
+ * +---------+
+ * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
+ * +---------+
+ * |
+ * +-----------------+
+ * | .actsubsch
+ * BLK_IDX_SCHEDULE_ENTRY_POINTS v
+ * +-------+-------+
+ * |cycle 0|cycle 1|
+ * +-------+-------+
+ * | | | |
+ * +----------------+ | | +-------------------------------------+
+ * | .subschindx | | .subschindx |
+ * | | +---------------+ |
+ * | .address | .address | |
+ * | | | |
+ * | | | |
+ * | BLK_IDX_SCHEDULE v v |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | ^ ^ ^ ^ |
+ * | | | | | |
+ * | +-------------------------+ | | | |
+ * | | +-------------------------------+ | | |
+ * | | | +-------------------+ | |
+ * | | | | | |
+ * | +---------------------------------------------------------------+ |
+ * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
+ * | +---------------------------------------------------------------+ |
+ * | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
+ * | | | |
+ * +--------+ +-------------------------------------------+
+ *
+ * In the above picture there are two subschedules (cycles):
+ *
+ * - cycle 0: iterates the schedule table from 0 to 2 (and back)
+ * - cycle 1: iterates the schedule table from 3 to 5 (and back)
+ *
+ * All other possible execution threads must be marked as unused by making
+ * their "subschedule end index" (subscheind) equal to the last valid
+ * subschedule's end index (in this case 5).
+ */
+int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ struct sja1105_schedule_entry_points_entry *schedule_entry_points;
+ struct sja1105_schedule_entry_points_params_entry
+ *schedule_entry_points_params;
+ struct sja1105_schedule_params_entry *schedule_params;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct sja1105_schedule_entry *schedule;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int schedule_start_idx;
+ s64 entry_point_delta;
+ int schedule_end_idx;
+ int num_entries = 0;
+ int num_cycles = 0;
+ int cycle = 0;
+ int i, k = 0;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Discard previous Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Figure out the dimensioning of the problem */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (tas_data->offload[port]) {
+ num_entries += tas_data->offload[port]->num_entries;
+ num_cycles++;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ num_entries += gating_cfg->num_entries;
+ num_cycles++;
+ }
+
+ /* Nothing to do */
+ if (!num_cycles)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_entries;
+ schedule = table->entries;
+
+ /* Schedule Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ /* Previously allocated memory will be freed automatically in
+ * sja1105_static_config_free. This is true for all early
+ * returns below.
+ */
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
+ schedule_entry_points_params = table->entries;
+
+ /* Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
+ schedule_params = table->entries;
+
+ /* Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_cycles;
+ schedule_entry_points = table->entries;
+
+ /* Finally start populating the static config tables */
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
+ schedule_entry_points_params->actsubsch = num_cycles - 1;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + offload->num_entries - 1;
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
+ */
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ /* The subschedule end indices need to be
+ * monotonically increasing.
+ */
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ for (i = 0; i < offload->num_entries; i++, k++) {
+ s64 delta_ns = offload->entries[i].interval;
+
+ schedule[k].delta = ns_to_sja1105_delta(delta_ns);
+ schedule[k].destports = BIT(port);
+ schedule[k].resmedia_en = true;
+ schedule[k].resmedia = SJA1105_GATE_MASK &
+ ~offload->entries[i].gate_mask;
+ }
+ cycle++;
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ struct sja1105_gate_entry *e;
+
+ /* Relative base time */
+ s64 rbt;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + gating_cfg->num_entries - 1;
+ rbt = future_base_time(gating_cfg->base_time,
+ gating_cfg->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ schedule[k].delta = ns_to_sja1105_delta(e->interval);
+ schedule[k].destports = e->rule->vl.destports;
+ schedule[k].setvalid = true;
+ schedule[k].txen = true;
+ schedule[k].vlindex = e->rule->vl.sharindx;
+ schedule[k].winstindex = e->rule->vl.sharindx;
+ if (e->gate_state) /* Gate open */
+ schedule[k].winst = true;
+ else /* Gate closed */
+ schedule[k].winend = true;
+ k++;
+ }
+ }
+
+ return 0;
+}
+
+/* Be there 2 port subschedules, each executing an arbitrary number of gate
+ * open/close events cyclically.
+ * None of those gate events must ever occur at the exact same time, otherwise
+ * the switch is known to act in exotically strange ways.
+ * However the hardware doesn't bother performing these integrity checks.
+ * So here we are with the task of validating whether the new @admin offload
+ * has any conflict with the already established TAS configuration in
+ * tas_data->offload. We already know the other ports are in harmony with one
+ * another, otherwise we wouldn't have saved them.
+ * Each gate event executes periodically, with a period of @cycle_time and a
+ * phase given by its cycle's @base_time plus its offset within the cycle
+ * (which in turn is given by the length of the events prior to it).
+ * There are two aspects to possible collisions:
+ * - Collisions within one cycle's (actually the longest cycle's) time frame.
+ * For that, we need to compare the cartesian product of each possible
+ * occurrence of each event within one cycle time.
+ * - Collisions in the future. Events may not collide within one cycle time,
+ * but if two port schedules don't have the same periodicity (aka the cycle
+ * times aren't multiples of one another), they surely will some time in the
+ * future (actually they will collide an infinite amount of times).
+ */
+static bool
+sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
+ const struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ const struct tc_taprio_qopt_offload *offload;
+ s64 max_cycle_time, min_cycle_time;
+ s64 delta1, delta2;
+ s64 rbt1, rbt2;
+ s64 stop_time;
+ s64 t1, t2;
+ int i, j;
+ s32 rem;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ return false;
+
+ /* Check if the two cycle times are multiples of one another.
+ * If they aren't, then they will surely collide.
+ */
+ max_cycle_time = max(offload->cycle_time, admin->cycle_time);
+ min_cycle_time = min(offload->cycle_time, admin->cycle_time);
+ div_s64_rem(max_cycle_time, min_cycle_time, &rem);
+ if (rem)
+ return true;
+
+ /* Calculate the "reduced" base time of each of the two cycles
+ * (transposed back as close to 0 as possible) by dividing to
+ * the cycle time.
+ */
+ div_s64_rem(offload->base_time, offload->cycle_time, &rem);
+ rbt1 = rem;
+
+ div_s64_rem(admin->base_time, admin->cycle_time, &rem);
+ rbt2 = rem;
+
+ stop_time = max_cycle_time + max(rbt1, rbt2);
+
+ /* delta1 is the relative base time of each GCL entry within
+ * the established ports' TAS config.
+ */
+ for (i = 0, delta1 = 0;
+ i < offload->num_entries;
+ delta1 += offload->entries[i].interval, i++) {
+ /* delta2 is the relative base time of each GCL entry
+ * within the newly added TAS config.
+ */
+ for (j = 0, delta2 = 0;
+ j < admin->num_entries;
+ delta2 += admin->entries[j].interval, j++) {
+ /* t1 follows all possible occurrences of the
+ * established ports' GCL entry i within the
+ * first cycle time.
+ */
+ for (t1 = rbt1 + delta1;
+ t1 <= stop_time;
+ t1 += offload->cycle_time) {
+ /* t2 follows all possible occurrences
+ * of the newly added GCL entry j
+ * within the first cycle time.
+ */
+ for (t2 = rbt2 + delta2;
+ t2 <= stop_time;
+ t2 += admin->cycle_time) {
+ if (t1 == t2) {
+ dev_warn(priv->ds->dev,
+ "GCL entry %d collides with entry %d of port %d\n",
+ j, i, port);
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
+ * global subschedule. If @port is -1, check it against all ports.
+ * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
+ * convert the gating configuration to a dummy tc-taprio offload structure.
+ */
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+
+ if (list_empty(&gating_cfg->entries))
+ return false;
+
+ dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
+ if (!dummy) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
+ return true;
+ }
+
+ dummy->num_entries = num_entries;
+ dummy->base_time = gating_cfg->base_time;
+ dummy->cycle_time = gating_cfg->cycle_time;
+
+ list_for_each_entry(e, &gating_cfg->entries, list)
+ dummy->entries[i++].interval = e->interval;
+
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+ for (port = 0; port < ds->num_ports; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+ break;
+ }
+ }
+
+ kfree(dummy);
+
+ return conflict;
+}
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ int other_port, rc, i;
+
+ /* Can't change an already configured port (must delete qdisc first).
+ * Can't delete the qdisc from an unconfigured port.
+ */
+ if (!!tas_data->offload[port] == admin->enable)
+ return -EINVAL;
+
+ if (!admin->enable) {
+ taprio_offload_free(tas_data->offload[port]);
+ tas_data->offload[port] = NULL;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+ }
+
+ /* The cycle time extension is the amount of time the last cycle from
+ * the old OPER needs to be extended in order to phase-align with the
+ * base time of the ADMIN when that becomes the new OPER.
+ * But of course our switch needs to be reset to switch-over between
+ * the ADMIN and the OPER configs - so much for a seamless transition.
+ * So don't add insult over injury and just say we don't support cycle
+ * time extension.
+ */
+ if (admin->cycle_time_extension)
+ return -ENOTSUPP;
+
+ for (i = 0; i < admin->num_entries; i++) {
+ s64 delta_ns = admin->entries[i].interval;
+ s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
+ bool too_long, too_short;
+
+ too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
+ too_short = (delta_cycles == 0);
+ if (too_long || too_short) {
+ dev_err(priv->ds->dev,
+ "Interval %llu too %s for GCL entry %d\n",
+ delta_ns, too_long ? "long" : "short", i);
+ return -ERANGE;
+ }
+ }
+
+ for (other_port = 0; other_port < ds->num_ports; other_port++) {
+ if (other_port == port)
+ continue;
+
+ if (sja1105_tas_check_conflicts(priv, other_port, admin))
+ return -ERANGE;
+ }
+
+ if (sja1105_gating_check_conflicts(priv, port, NULL)) {
+ dev_err(ds->dev, "Conflict with tc-gate schedule\n");
+ return -ERANGE;
+ }
+
+ tas_data->offload[port] = taprio_offload_get(admin);
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
+
+ INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
+}
+
+void sja1105_tas_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct tc_taprio_qopt_offload *offload;
+ int port;
+
+ cancel_work_sync(&priv->tas_data.tas_work);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ offload = priv->tas_data.offload[port];
+ if (!offload)
+ continue;
+
+ taprio_offload_free(offload);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
new file mode 100644
index 000000000..c05bd07e8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_TAS_H
+#define _SJA1105_TAS_H
+
+#include <net/pkt_sched.h>
+
+#define SJA1105_TAS_MAX_DELTA BIT(18)
+
+struct sja1105_private;
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
+struct sja1105_gate_entry {
+ struct list_head list;
+ struct sja1105_rule *rule;
+ s64 interval;
+ u8 gate_state;
+};
+
+struct sja1105_gating_config {
+ u64 cycle_time;
+ s64 base_time;
+ int num_entries;
+ struct list_head entries;
+};
+
+struct sja1105_tas_data {
+ struct tc_taprio_qopt_offload *offload[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_gating_config gating_cfg;
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
+};
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin);
+
+void sja1105_tas_setup(struct dsa_switch *ds);
+
+void sja1105_tas_teardown(struct dsa_switch *ds);
+
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack);
+
+int sja1105_init_scheduling(struct sja1105_private *priv);
+
+#else
+
+/* C doesn't allow empty structures, bah! */
+struct sja1105_tas_data {
+ u8 dummy;
+};
+
+static inline int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
+static inline bool
+sja1105_gating_check_conflicts(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ return true;
+}
+
+static inline int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
+
+#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
new file mode 100644
index 000000000..b7e95d60a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020 NXP
+ */
+#include <net/tc_act/tc_gate.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105_vl.h"
+
+#define SJA1105_SIZE_VL_STATUS 8
+
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ sja1105_free_gating_config(gating_cfg);
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
+/* The switch flow classification core implements TTEthernet, which 'thinks' in
+ * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
+ * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
+ * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
+ * (Per-Stream Filtering and Policing), which is what the driver is going to be
+ * implementing.
+ *
+ * VL Lookup
+ * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
+ * && VLAN PCP | | VLMARKER)
+ * && INGRESS PORT} +---------+ (both fixed)
+ * (exact match, | && DMAC[15:0] == VLID
+ * all specified in rule) | (specified in rule)
+ * v && INGRESS PORT }
+ * ------------
+ * 0 (PSFP) / \ 1 (ARINC664)
+ * +-----------/ VLLUPFORMAT \----------+
+ * | \ (fixed) / |
+ * | \ / |
+ * 0 (forwarding) v ------------ |
+ * ------------ |
+ * / \ 1 (QoS classification) |
+ * +---/ ISCRITICAL \-----------+ |
+ * | \ (per rule) / | |
+ * | \ / VLID taken from VLID taken from
+ * v ------------ index of rule contents of rule
+ * select that matched that matched
+ * DESTPORTS | |
+ * | +---------+--------+
+ * | |
+ * | v
+ * | VL Forwarding
+ * | (indexed by VLID)
+ * | +---------+
+ * | +--------------| |
+ * | | select TYPE +---------+
+ * | v
+ * | 0 (rate ------------ 1 (time
+ * | constrained) / \ triggered)
+ * | +------/ TYPE \------------+
+ * | | \ (per VLID) / |
+ * | v \ / v
+ * | VL Policing ------------ VL Policing
+ * | (indexed by VLID) (indexed by VLID)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select SHARINDX select SHARINDX to
+ * | to rate-limit re-enter VL Forwarding
+ * | groups of VL's with new VLID for egress
+ * | to same quota |
+ * | | |
+ * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
+ * | | |
+ * | v v
+ * | VL Forwarding VL Forwarding
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select PRIORITY, select PRIORITY,
+ * | PARTITION, DESTPORTS PARTITION, DESTPORTS
+ * | | |
+ * | v v
+ * | VL Policing VL Policing
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | | |
+ * | v |
+ * | select BAG, -> exceed => drop |
+ * | JITTER v
+ * | | ----------------------------------------------
+ * | | / Reception Window is open for this VL \
+ * | | / (the Schedule Table executes an entry i \
+ * | | / M <= i < N, for which these conditions hold): \ no
+ * | | +----/ \-+
+ * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
+ * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
+ * | | | \ / |
+ * | | | \ (the VL window has opened and not yet closed)/ |
+ * | | | ---------------------------------------------- |
+ * | | v v
+ * | | dispatch to DESTPORTS when the Schedule Table drop
+ * | | executes an entry i with TXEN == 1 && VLINDEX == i
+ * v v
+ * dispatch immediately to DESTPORTS
+ *
+ * The per-port classification key is always composed of {DMAC, VID, PCP} and
+ * is non-maskable. This 'looks like' the NULL stream identification function
+ * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
+ * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
+ * ID and PCP, and then the port-based defaults will be used.
+ *
+ * In TTEthernet, routing is something that needs to be done manually for each
+ * Virtual Link. So the flow action must always include one of:
+ * a. 'redirect', 'trap' or 'drop': select the egress port list
+ * Additionally, the following actions may be applied on a Virtual Link,
+ * turning it into 'critical' traffic:
+ * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
+ * given by the maximum frame length, bandwidth allocation gap (BAG) and
+ * maximum jitter.
+ * c. 'gate': turn it into a time-triggered VL, which can be only be received
+ * and forwarded according to a given schedule.
+ */
+
+static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
+ struct sja1105_vl_lookup_entry *b)
+{
+ if (a->macaddr < b->macaddr)
+ return true;
+ if (a->macaddr > b->macaddr)
+ return false;
+ if (a->vlanid < b->vlanid)
+ return true;
+ if (a->vlanid > b->vlanid)
+ return false;
+ if (a->port < b->port)
+ return true;
+ if (a->port > b->port)
+ return false;
+ if (a->vlanprior < b->vlanprior)
+ return true;
+ if (a->vlanprior > b->vlanprior)
+ return false;
+ /* Keys are equal */
+ return false;
+}
+
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
+static int sja1105_init_virtual_links(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_vl_policing_entry *vl_policing;
+ struct sja1105_vl_forwarding_entry *vl_fwd;
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool have_critical_virtual_links = false;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ int num_virtual_links = 0;
+ int max_sharindx = 0;
+ int i, j, k;
+
+ /* Figure out the dimensioning of the problem */
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ /* Each VL lookup entry matches on a single ingress port */
+ num_virtual_links += hweight_long(rule->port_mask);
+
+ if (rule->vl.type != SJA1105_VL_NONCRITICAL)
+ have_critical_virtual_links = true;
+ if (max_sharindx < rule->vl.sharindx)
+ max_sharindx = rule->vl.sharindx;
+ }
+
+ if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
+ return -ENOSPC;
+ }
+
+ if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
+ return -ENOSPC;
+ }
+
+ max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
+
+ /* Discard previous VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Nothing to do */
+ if (!num_virtual_links)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ table->entries = kcalloc(num_virtual_links,
+ table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_virtual_links;
+ vl_lookup = table->entries;
+
+ k = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ unsigned long port;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+
+ for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
+ vl_lookup[k].port = port;
+ vl_lookup[k].macaddr = rule->key.vl.dmac;
+ if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
+ vl_lookup[k].vlanid = rule->key.vl.vid;
+ vl_lookup[k].vlanprior = rule->key.vl.pcp;
+ } else {
+ /* FIXME */
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
+
+ vl_lookup[k].vlanid = vid;
+ vl_lookup[k].vlanprior = 0;
+ }
+ /* For critical VLs, the DESTPORTS mask is taken from
+ * the VL Forwarding Table, so no point in putting it
+ * in the VL Lookup Table
+ */
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ vl_lookup[k].destports = rule->vl.destports;
+ else
+ vl_lookup[k].iscritical = true;
+ vl_lookup[k].flow_cookie = rule->cookie;
+ k++;
+ }
+ }
+
+ /* UM10944.pdf chapter 4.2.3 VL Lookup table:
+ * "the entries in the VL Lookup table must be sorted in ascending
+ * order (i.e. the smallest value must be loaded first) according to
+ * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
+ */
+ for (i = 0; i < num_virtual_links; i++) {
+ struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
+
+ for (j = i + 1; j < num_virtual_links; j++) {
+ struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
+
+ if (sja1105_vl_key_lower(b, a)) {
+ struct sja1105_vl_lookup_entry tmp = *a;
+
+ *a = *b;
+ *b = tmp;
+ }
+ }
+ }
+
+ if (!have_critical_virtual_links)
+ return 0;
+
+ /* VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_policing = table->entries;
+
+ /* VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_fwd = table->entries;
+
+ /* VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = 1;
+
+ for (i = 0; i < num_virtual_links; i++) {
+ unsigned long cookie = vl_lookup[i].flow_cookie;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ continue;
+ if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
+ int sharindx = rule->vl.sharindx;
+
+ vl_policing[i].type = 1;
+ vl_policing[i].sharindx = sharindx;
+ vl_policing[i].maxlen = rule->vl.maxlen;
+ vl_policing[sharindx].type = 1;
+
+ vl_fwd[i].type = 1;
+ vl_fwd[sharindx].type = 1;
+ vl_fwd[sharindx].priority = rule->vl.ipv;
+ vl_fwd[sharindx].partition = 0;
+ vl_fwd[sharindx].destports = rule->vl.destports;
+ }
+ }
+
+ sja1105_frame_memory_partitioning(priv);
+
+ return 0;
+}
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
+ int rc;
+
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ list_add(&rule->list, &priv->flow_block.rules);
+ }
+
+ rule->port_mask |= BIT(port);
+ if (append)
+ rule->vl.destports |= destports;
+ else
+ rule->vl.destports = destports;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+}
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
+ int ipv = -1;
+ int i, rc;
+ s32 rem;
+
+ if (cycle_time_ext) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Base time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ list_add(&rule->list, &priv->flow_block.rules);
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
+ rule->vl.sharindx = index;
+ rule->vl.base_time = base_time;
+ rule->vl.cycle_time = cycle_time;
+ rule->vl.num_entries = num_entries;
+ rule->vl.entries = kcalloc(num_entries,
+ sizeof(struct action_gate_entry),
+ GFP_KERNEL);
+ if (!rule->vl.entries) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ div_s64_rem(entries[i].interval,
+ sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval must be multiple of 200 ns");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!entries[i].interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval cannot be zero");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (ns_to_sja1105_delta(entries[i].interval) >
+ SJA1105_TAS_MAX_DELTA) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Maximum interval is 52 ms");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (entries[i].maxoctets != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload IntervalOctetMax");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ipv == -1) {
+ ipv = entries[i].ipv;
+ } else if (ipv != entries[i].ipv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only support a single IPV per VL");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rule->vl.entries[i] = entries[i];
+ }
+
+ if (ipv == -1) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
+ ipv = key->vl.pcp;
+ else
+ ipv = 0;
+ }
+
+ /* TODO: support per-flow MTU */
+ rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ rule->vl.ipv = ipv;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ goto out;
+
+ if (sja1105_gating_check_conflicts(priv, -1, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
+ rc = -ERANGE;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule->vl.entries);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+static int sja1105_find_vlid(struct sja1105_private *priv, int port,
+ struct sja1105_key *key)
+{
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
+ return -1;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ vl_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac &&
+ vl_lookup[i].vlanid == key->vl.vid &&
+ vl_lookup[i].vlanprior == key->vl.pcp)
+ return i;
+ } else {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
+ u64 unreleased;
+ u64 timingerr;
+ u64 lengtherr;
+ int vlid, rc;
+ u64 pkts;
+
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ return 0;
+
+ vlid = sja1105_find_vlid(priv, port, &rule->key);
+ if (vlid < 0)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
+ SJA1105_SIZE_VL_STATUS);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
+ return rc;
+ }
+
+ sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
+
+ pkts = timingerr + unreleased + lengtherr;
+
+ flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
+ jiffies - rule->vl.stats.lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ rule->vl.stats.pkts = pkts;
+ rule->vl.stats.lastused = jiffies;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
new file mode 100644
index 000000000..51fba0dce
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 NXP
+ */
+#ifndef _SJA1105_VL_H
+#define _SJA1105_VL_H
+
+#include "sja1105.h"
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append);
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack);
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries);
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack);
+
+#else
+
+static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ unsigned long destports,
+ bool append)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_delete(struct sja1105_private *priv,
+ int port, struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time,
+ u64 cycle_time_ext, u32 num_entries,
+ struct action_gate_entry *entries)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
+
+#endif /* _SJA1105_VL_H */