summaryrefslogtreecommitdiffstats
path: root/drivers/net/dsa/sja1105
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/dsa/sja1105
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/dsa/sja1105')
-rw-r--r--drivers/net/dsa/sja1105/Kconfig45
-rw-r--r--drivers/net/dsa/sja1105/Makefile24
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h352
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c732
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c264
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c1034
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h40
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c554
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c499
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c3656
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c928
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h189
-rw-r--r--drivers/net/dsa/sja1105/sja1105_sgmii.h53
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c613
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c1470
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h455
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c895
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h104
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c789
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h74
20 files changed, 12770 insertions, 0 deletions
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
new file mode 100644
index 000000000..5e83b365f
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_SJA1105
+tristate "NXP SJA1105 Ethernet switch family support"
+ depends on NET_DSA && SPI
+ select NET_DSA_TAG_SJA1105
+ select PACKING
+ select CRC32
+ help
+ This is the driver for the NXP SJA1105 automotive Ethernet switch
+ family. These are 5-port devices and are managed over an SPI
+ interface. Probing is handled based on OF bindings and so is the
+ linkage to PHYLINK. The driver supports the following revisions:
+ - SJA1105E (Gen. 1, No TT-Ethernet)
+ - SJA1105T (Gen. 1, TT-Ethernet)
+ - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
+ - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
+ - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
+ - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+
+config NET_DSA_SJA1105_PTP
+ bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
+ depends on NET_DSA_SJA1105
+ depends on PTP_1588_CLOCK
+ help
+ This enables support for timestamping and PTP clock manipulations in
+ the SJA1105 DSA driver.
+
+config NET_DSA_SJA1105_TAS
+ bool "Support for the Time-Aware Scheduler on NXP SJA1105"
+ depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+ depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
+ help
+ This enables support for the TTEthernet-based egress scheduling
+ engine in the SJA1105 DSA driver, which is controlled using a
+ hardware offload of the tc-tqprio qdisc.
+
+config NET_DSA_SJA1105_VL
+ bool "Support for Virtual Links on NXP SJA1105"
+ depends on NET_DSA_SJA1105_TAS
+ help
+ This enables support for flow classification using capable devices
+ (SJA1105T, SJA1105Q, SJA1105S). The following actions are supported:
+ - redirect, trap, drop
+ - time-based ingress policing, via the tc-gate action
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
new file mode 100644
index 000000000..a860e3a91
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
+
+sja1105-objs := \
+ sja1105_spi.o \
+ sja1105_main.o \
+ sja1105_flower.o \
+ sja1105_ethtool.o \
+ sja1105_devlink.o \
+ sja1105_clocking.o \
+ sja1105_static_config.o \
+ sja1105_dynamic_config.o \
+
+ifdef CONFIG_NET_DSA_SJA1105_PTP
+sja1105-objs += sja1105_ptp.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_TAS
+sja1105-objs += sja1105_tas.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_VL
+sja1105-objs += sja1105_vl.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
new file mode 100644
index 000000000..4ebc4a5a7
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_H
+#define _SJA1105_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
+#include <net/dsa.h>
+#include <linux/mutex.h>
+#include "sja1105_static_config.h"
+
+#define SJA1105_NUM_PORTS 5
+#define SJA1105_NUM_TC 8
+#define SJA1105ET_FDB_BIN_SIZE 4
+/* The hardware value is in multiples of 10 ms.
+ * The passed parameter is in multiples of 1 ms.
+ */
+#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+#define SJA1105_NUM_L2_POLICERS 45
+
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
+#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
+
+/* Keeps the different addresses between E/T and P/Q/R/S */
+struct sja1105_regs {
+ u64 device_id;
+ u64 prod_id;
+ u64 status;
+ u64 port_control;
+ u64 rgu;
+ u64 vl_status;
+ u64 config;
+ u64 sgmii;
+ u64 rmii_pll1;
+ u64 ptppinst;
+ u64 ptppindur;
+ u64 ptp_control;
+ u64 ptpclkval;
+ u64 ptpclkrate;
+ u64 ptpclkcorp;
+ u64 ptpsyncts;
+ u64 ptpschtm;
+ u64 ptpegr_ts[SJA1105_NUM_PORTS];
+ u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mac[SJA1105_NUM_PORTS];
+ u64 mac_hl1[SJA1105_NUM_PORTS];
+ u64 mac_hl2[SJA1105_NUM_PORTS];
+ u64 ether_stats[SJA1105_NUM_PORTS];
+ u64 qlevel[SJA1105_NUM_PORTS];
+};
+
+struct sja1105_info {
+ u64 device_id;
+ /* Needed for distinction between P and R, and between Q and S
+ * (since the parts with/without SGMII share the same
+ * switch core and device_id)
+ */
+ u64 part_no;
+ /* E/T and P/Q/R/S have partial timestamps of different sizes.
+ * They must be reconstructed on both families anyway to get the full
+ * 64-bit values back.
+ */
+ int ptp_ts_bits;
+ /* Also SPI commands are of different sizes to retrieve
+ * the egress timestamps.
+ */
+ int ptpegr_ts_bytes;
+ int num_cbs_shapers;
+ const struct sja1105_dynamic_table_ops *dyn_ops;
+ const struct sja1105_table_ops *static_ops;
+ const struct sja1105_regs *regs;
+ /* Both E/T and P/Q/R/S have quirks when it comes to popping the S-Tag
+ * from double-tagged frames. E/T will pop it only when it's equal to
+ * TPID from the General Parameters Table, while P/Q/R/S will only
+ * pop it when it's equal to TPID2.
+ */
+ u16 qinq_tpid;
+ int (*reset_cmd)(struct dsa_switch *ds);
+ int (*setup_rgmii_delay)(const void *ctx, int port);
+ /* Prototypes from include/net/dsa.h */
+ int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+ const char *name;
+};
+
+enum sja1105_key_type {
+ SJA1105_KEY_BCAST,
+ SJA1105_KEY_TC,
+ SJA1105_KEY_VLAN_UNAWARE_VL,
+ SJA1105_KEY_VLAN_AWARE_VL,
+};
+
+struct sja1105_key {
+ enum sja1105_key_type type;
+
+ union {
+ /* SJA1105_KEY_TC */
+ struct {
+ int pcp;
+ } tc;
+
+ /* SJA1105_KEY_VLAN_UNAWARE_VL */
+ /* SJA1105_KEY_VLAN_AWARE_VL */
+ struct {
+ u64 dmac;
+ u16 vid;
+ u16 pcp;
+ } vl;
+ };
+};
+
+enum sja1105_rule_type {
+ SJA1105_RULE_BCAST_POLICER,
+ SJA1105_RULE_TC_POLICER,
+ SJA1105_RULE_VL,
+};
+
+enum sja1105_vl_type {
+ SJA1105_VL_NONCRITICAL,
+ SJA1105_VL_RATE_CONSTRAINED,
+ SJA1105_VL_TIME_TRIGGERED,
+};
+
+struct sja1105_rule {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned long port_mask;
+ struct sja1105_key key;
+ enum sja1105_rule_type type;
+
+ /* Action */
+ union {
+ /* SJA1105_RULE_BCAST_POLICER */
+ struct {
+ int sharindx;
+ } bcast_pol;
+
+ /* SJA1105_RULE_TC_POLICER */
+ struct {
+ int sharindx;
+ } tc_pol;
+
+ /* SJA1105_RULE_VL */
+ struct {
+ enum sja1105_vl_type type;
+ unsigned long destports;
+ int sharindx;
+ int maxlen;
+ int ipv;
+ u64 base_time;
+ u64 cycle_time;
+ int num_entries;
+ struct action_gate_entry *entries;
+ struct flow_stats stats;
+ } vl;
+ };
+};
+
+struct sja1105_flow_block {
+ struct list_head rules;
+ bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+ int num_virtual_links;
+};
+
+struct sja1105_bridge_vlan {
+ struct list_head list;
+ int port;
+ u16 vid;
+ bool pvid;
+ bool untagged;
+};
+
+enum sja1105_vlan_state {
+ SJA1105_VLAN_UNAWARE,
+ SJA1105_VLAN_BEST_EFFORT,
+ SJA1105_VLAN_FILTERING_FULL,
+};
+
+struct sja1105_private {
+ struct sja1105_static_config static_config;
+ bool rgmii_rx_delay[SJA1105_NUM_PORTS];
+ bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ bool best_effort_vlan_filtering;
+ const struct sja1105_info *info;
+ struct gpio_desc *reset_gpio;
+ struct spi_device *spidev;
+ struct dsa_switch *ds;
+ struct list_head dsa_8021q_vlans;
+ struct list_head bridge_vlans;
+ struct sja1105_flow_block flow_block;
+ struct sja1105_port ports[SJA1105_NUM_PORTS];
+ /* Serializes transmission of management frames so that
+ * the switch doesn't confuse them with one another.
+ */
+ struct mutex mgmt_lock;
+ struct dsa_8021q_context *dsa_8021q_ctx;
+ enum sja1105_vlan_state vlan_state;
+ struct devlink_region **regions;
+ struct sja1105_cbs_entry *cbs;
+ struct sja1105_tagger_data tagger_data;
+ struct sja1105_ptp_data ptp_data;
+ struct sja1105_tas_data tas_data;
+};
+
+#include "sja1105_dynamic_config.h"
+
+struct sja1105_spi_message {
+ u64 access;
+ u64 read_count;
+ u64 address;
+};
+
+/* From sja1105_main.c */
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_RX_HWTSTAMPING,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+ SJA1105_BEST_EFFORT_POLICING,
+ SJA1105_VIRTUAL_LINKS,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans);
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+
+/* From sja1105_devlink.c */
+int sja1105_devlink_setup(struct dsa_switch *ds);
+void sja1105_devlink_teardown(struct dsa_switch *ds);
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
+/* From sja1105_spi.c */
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len);
+int sja1105_static_config_upload(struct sja1105_private *priv);
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited);
+
+extern const struct sja1105_info sja1105e_info;
+extern const struct sja1105_info sja1105t_info;
+extern const struct sja1105_info sja1105p_info;
+extern const struct sja1105_info sja1105q_info;
+extern const struct sja1105_info sja1105r_info;
+extern const struct sja1105_info sja1105s_info;
+
+/* From sja1105_clocking.c */
+
+typedef enum {
+ XMII_MAC = 0,
+ XMII_PHY = 1,
+} sja1105_mii_role_t;
+
+typedef enum {
+ XMII_MODE_MII = 0,
+ XMII_MODE_RMII = 1,
+ XMII_MODE_RGMII = 2,
+ XMII_MODE_SGMII = 3,
+} sja1105_phy_interface_t;
+
+typedef enum {
+ SJA1105_SPEED_10MBPS = 3,
+ SJA1105_SPEED_100MBPS = 2,
+ SJA1105_SPEED_1000MBPS = 1,
+ SJA1105_SPEED_AUTO = 0,
+} sja1105_speed_t;
+
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
+int sja1105_clocking_setup(struct sja1105_private *priv);
+
+/* From sja1105_ethtool.c */
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data);
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* From sja1105_dynamic_config.c */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry);
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep);
+
+enum sja1105_iotag {
+ SJA1105_C_TAG = 0, /* Inner VLAN header */
+ SJA1105_S_TAG = 1, /* Outer VLAN header */
+};
+
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+
+/* From sja1105_flower.c */
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+void sja1105_flower_setup(struct dsa_switch *ds);
+void sja1105_flower_teardown(struct dsa_switch *ds);
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
new file mode 100644
index 000000000..2a9b8a6a5
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_CGU_CMD 4
+
+/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
+struct sja1105_cfg_pad_mii {
+ u64 d32_os;
+ u64 d32_ih;
+ u64 d32_ipud;
+ u64 d10_ih;
+ u64 d10_os;
+ u64 d10_ipud;
+ u64 ctrl_os;
+ u64 ctrl_ih;
+ u64 ctrl_ipud;
+ u64 clk_os;
+ u64 clk_ih;
+ u64 clk_ipud;
+};
+
+struct sja1105_cfg_pad_mii_id {
+ u64 rxc_stable_ovr;
+ u64 rxc_delay;
+ u64 rxc_bypass;
+ u64 rxc_pd;
+ u64 txc_stable_ovr;
+ u64 txc_delay;
+ u64 txc_bypass;
+ u64 txc_pd;
+};
+
+/* UM10944 Table 82.
+ * IDIV_0_C to IDIV_4_C control registers
+ * (addr. 10000Bh to 10000Fh)
+ */
+struct sja1105_cgu_idiv {
+ u64 clksrc;
+ u64 autoblock;
+ u64 idiv;
+ u64 pd;
+};
+
+/* PLL_1_C control register
+ *
+ * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
+ * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
+ */
+struct sja1105_cgu_pll_ctrl {
+ u64 pllclksrc;
+ u64 msel;
+ u64 autoblock;
+ u64 psel;
+ u64 direct;
+ u64 fbsel;
+ u64 bypass;
+ u64 pd;
+};
+
+enum {
+ CLKSRC_MII0_TX_CLK = 0x00,
+ CLKSRC_MII0_RX_CLK = 0x01,
+ CLKSRC_MII1_TX_CLK = 0x02,
+ CLKSRC_MII1_RX_CLK = 0x03,
+ CLKSRC_MII2_TX_CLK = 0x04,
+ CLKSRC_MII2_RX_CLK = 0x05,
+ CLKSRC_MII3_TX_CLK = 0x06,
+ CLKSRC_MII3_RX_CLK = 0x07,
+ CLKSRC_MII4_TX_CLK = 0x08,
+ CLKSRC_MII4_RX_CLK = 0x09,
+ CLKSRC_PLL0 = 0x0B,
+ CLKSRC_PLL1 = 0x0E,
+ CLKSRC_IDIV0 = 0x11,
+ CLKSRC_IDIV1 = 0x12,
+ CLKSRC_IDIV2 = 0x13,
+ CLKSRC_IDIV3 = 0x14,
+ CLKSRC_IDIV4 = 0x15,
+};
+
+/* UM10944 Table 83.
+ * MIIx clock control registers 1 to 30
+ * (addresses 100013h to 100035h)
+ */
+struct sja1105_cgu_mii_ctrl {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
+ sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
+ bool enabled, int factor)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ struct sja1105_cgu_idiv idiv;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (enabled && factor != 1 && factor != 10) {
+ dev_err(dev, "idiv factor must be 1 or 10\n");
+ return -ERANGE;
+ }
+
+ /* Payload for packed_buf */
+ idiv.clksrc = 0x0A; /* 25MHz */
+ idiv.autoblock = 1; /* Block clk automatically */
+ idiv.idiv = factor - 1; /* Divide by 1 or 10 */
+ idiv.pd = enabled ? 0 : 1; /* Power down? */
+ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_mii_role_t role)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_tx_clk;
+ const int mac_clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+ const int phy_clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (role == XMII_MAC)
+ clksrc = mac_clk_sources[port];
+ else
+ clksrc = phy_clk_sources[port];
+
+ /* Payload for packed_buf */
+ mii_tx_clk.clksrc = clksrc;
+ mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_RX_CLK,
+ CLKSRC_MII1_RX_CLK,
+ CLKSRC_MII2_RX_CLK,
+ CLKSRC_MII3_RX_CLK,
+ CLKSRC_MII4_RX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ mii_rx_clk.clksrc = clk_sources[port];
+ mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_tx_clk.clksrc = clk_sources[port];
+ mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_rx_clk.clksrc = clk_sources[port];
+ mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring MII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* If role is MAC, disable IDIV
+ * If role is PHY, enable IDIV and configure for 1/1 divider
+ */
+ rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_TX_CLK_n
+ * * If role is MAC, select TX_CLK_n
+ * * If role is PHY, select IDIV_n
+ */
+ rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_RX_CLK_n
+ * Select RX_CLK_n
+ */
+ rc = sja1105_cgu_mii_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ if (role == XMII_PHY) {
+ /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
+
+ /* Configure CLKSRC of EXT_TX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of EXT_RX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
+ sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_speed_t speed)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl txc;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (speed == SJA1105_SPEED_1000MBPS) {
+ clksrc = CLKSRC_PLL0;
+ } else {
+ int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
+ CLKSRC_IDIV3, CLKSRC_IDIV4};
+ clksrc = clk_sources[port];
+ }
+
+ /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
+ txc.clksrc = clksrc;
+ /* Autoblock clk while changing clksrc */
+ txc.autoblock = 1;
+ /* Power Down off => enabled */
+ txc.pd = 0;
+ sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+/* AGU */
+static void
+sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
+ sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
+ sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
+ sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
+ sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
+}
+
+static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_tx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
+ pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
+ pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
+ pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
+ pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_rx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage weak pull-up/down: */
+ /* pull-down */
+ pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
+ /* medium noise/fast speed (default) */
+ pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
+ /* plain input (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+/* Valid range in degrees is an integer between 73.8 and 101.7 */
+static u64 sja1105_rgmii_delay(u64 phase)
+{
+ /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
+ * To avoid floating point operations we'll multiply by 10
+ * and get 1 decimal point precision.
+ */
+ phase *= 10;
+ return (phase - 738) / 9;
+}
+
+/* The RGMII delay setup procedure is 2-step and gets called upon each
+ * .phylink_mac_config. Both are strategic.
+ * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
+ * with recovering from a frequency change of the link partner's RGMII clock.
+ * The easiest way to recover from this is to temporarily power down the TDL,
+ * as it will re-lock at the new frequency afterwards.
+ */
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int rc;
+
+ if (priv->rgmii_rx_delay[port])
+ pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (priv->rgmii_tx_delay[port])
+ pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+
+ /* Stage 1: Turn the RGMII delay lines off. */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 1;
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0)
+ return rc;
+
+ /* Stage 2: Turn the RGMII delay lines on. */
+ if (priv->rgmii_rx_delay[port]) {
+ pad_mii_id.rxc_bypass = 0;
+ pad_mii_id.rxc_pd = 0;
+ }
+ if (priv->rgmii_tx_delay[port]) {
+ pad_mii_id.txc_bypass = 0;
+ pad_mii_id.txc_pd = 0;
+ }
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ struct sja1105_mac_config_entry *mac;
+ sja1105_speed_t speed;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ speed = mac[port].speed;
+
+ dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
+ port, speed);
+
+ switch (speed) {
+ case SJA1105_SPEED_1000MBPS:
+ /* 1000Mbps, IDIV disabled (125 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ break;
+ case SJA1105_SPEED_100MBPS:
+ /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 1);
+ break;
+ case SJA1105_SPEED_10MBPS:
+ /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 10);
+ break;
+ case SJA1105_SPEED_AUTO:
+ /* Skip CGU configuration if there is no speed available
+ * (e.g. link is not established yet)
+ */
+ dev_dbg(dev, "Speed not available, skipping CGU config\n");
+ return 0;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure idiv\n");
+ return rc;
+ }
+ rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure RGMII Tx clock\n");
+ return rc;
+ }
+ rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure Tx pad registers\n");
+ return rc;
+ }
+ if (!priv->info->setup_rgmii_delay)
+ return 0;
+ /* The role has no hardware effect for RGMII. However we use it as
+ * a proxy for this interface being a MAC-to-MAC connection, with
+ * the RGMII internal delays needing to be applied by us.
+ */
+ if (role == XMII_MAC)
+ return 0;
+
+ return priv->info->setup_rgmii_delay(priv, port);
+}
+
+static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ref_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ ref_clk.clksrc = clk_sources[port];
+ ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ref_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload for packed_buf */
+ ext_tx_clk.clksrc = CLKSRC_PLL1;
+ ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1105_cgu_pll_ctrl pll = {0};
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ /* PLL1 must be enabled and output 50 Mhz.
+ * This is done by writing first 0x0A010941 to
+ * the PLL_1_C register and then deasserting
+ * power down (PD) 0x0A010940.
+ */
+
+ /* Step 1: PLL1 setup for 50Mhz */
+ pll.pllclksrc = 0xA;
+ pll.msel = 0x1;
+ pll.autoblock = 0x1;
+ pll.psel = 0x1;
+ pll.direct = 0x0;
+ pll.fbsel = 0x1;
+ pll.bypass = 0x0;
+ pll.pd = 0x1;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to configure PLL1 for 50MHz\n");
+ return rc;
+ }
+
+ /* Step 2: Enable PLL1 */
+ pll.pd = 0x0;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable PLL1\n");
+ return rc;
+ }
+ return rc;
+}
+
+static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring RMII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* AH1601.pdf chapter 2.5.1. Sources */
+ if (role == XMII_MAC) {
+ /* Configure and enable PLL1 for 50Mhz output */
+ rc = sja1105_cgu_rmii_pll_config(priv);
+ if (rc < 0)
+ return rc;
+ }
+ /* Disable IDIV for this port */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ if (rc < 0)
+ return rc;
+ /* Source to sink mappings */
+ rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ if (role == XMII_MAC) {
+ rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* RGMII etc */
+ phy_mode = mii->xmii_mode[port];
+ /* MAC or PHY, for applicable types (not RGMII) */
+ role = mii->phy_mac[port];
+
+ switch (phy_mode) {
+ case XMII_MODE_MII:
+ rc = sja1105_mii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RMII:
+ rc = sja1105_rmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RGMII:
+ rc = sja1105_rgmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_SGMII:
+ /* Nothing to do in the CGU for SGMII */
+ rc = 0;
+ break;
+ default:
+ dev_err(dev, "Invalid interface mode specified: %d\n",
+ phy_mode);
+ return -EINVAL;
+ }
+ if (rc) {
+ dev_err(dev, "Clocking setup for port %d failed: %d\n",
+ port, rc);
+ return rc;
+ }
+
+ /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
+ return sja1105_cfg_pad_rx_config(priv, port);
+}
+
+int sja1105_clocking_setup(struct sja1105_private *priv)
+{
+ int port, rc;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
new file mode 100644
index 000000000..8e3d185c8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright 2020 NXP Semiconductors
+ */
+#include "sja1105.h"
+
+/* Since devlink regions have a fixed size and the static config has a variable
+ * size, we need to calculate the maximum possible static config size by
+ * creating a dummy config with all table entries populated to the max, and get
+ * its packed length. This is done dynamically as opposed to simply hardcoding
+ * a number, since currently not all static config tables are implemented, so
+ * we are avoiding a possible code desynchronization.
+ */
+static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
+{
+ struct sja1105_static_config config;
+ enum sja1105_blk_idx blk_idx;
+ int rc;
+
+ rc = sja1105_static_config_init(&config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return 0;
+
+ for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
+ struct sja1105_table *table = &config.tables[blk_idx];
+
+ table->entry_count = table->ops->max_entry_count;
+ }
+
+ return sja1105_static_config_get_length(&config);
+}
+
+static int
+sja1105_region_static_config_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct sja1105_private *priv = ds->priv;
+ size_t max_len, len;
+
+ len = sja1105_static_config_get_length(&priv->static_config);
+ max_len = sja1105_static_config_get_max_size(priv);
+
+ *data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return static_config_buf_prepare_for_upload(priv, *data, len);
+}
+
+static struct devlink_region_ops sja1105_region_static_config_ops = {
+ .name = "static-config",
+ .snapshot = sja1105_region_static_config_snapshot,
+ .destructor = kfree,
+};
+
+enum sja1105_region_id {
+ SJA1105_REGION_STATIC_CONFIG = 0,
+};
+
+struct sja1105_region {
+ const struct devlink_region_ops *ops;
+ size_t (*get_size)(struct sja1105_private *priv);
+};
+
+static struct sja1105_region sja1105_regions[] = {
+ [SJA1105_REGION_STATIC_CONFIG] = {
+ .ops = &sja1105_region_static_config_ops,
+ .get_size = sja1105_static_config_get_max_size,
+ },
+};
+
+static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+ const struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+
+ priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
+ GFP_KERNEL);
+ if (!priv->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regions; i++) {
+ size = sja1105_regions[i].get_size(priv);
+ ops = sja1105_regions[i].ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ while (--i >= 0)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+ return PTR_ERR(region);
+ }
+
+ priv->regions[i] = region;
+ }
+
+ return 0;
+}
+
+static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+
+ for (i = 0; i < num_regions; i++)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+}
+
+static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
+ bool *be_vlan)
+{
+ *be_vlan = priv->best_effort_vlan_filtering;
+
+ return 0;
+}
+
+static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
+ bool be_vlan)
+{
+ struct dsa_switch *ds = priv->ds;
+ bool vlan_filtering;
+ int port;
+ int rc;
+
+ priv->best_effort_vlan_filtering = be_vlan;
+
+ rtnl_lock();
+ for (port = 0; port < ds->num_ports; port++) {
+ struct switchdev_trans trans;
+ struct dsa_port *dp;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp = dsa_to_port(ds, port);
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ trans.ph_prepare = true;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+
+ trans.ph_prepare = false;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+ }
+ rtnl_unlock();
+
+ return rc;
+}
+
+enum sja1105_devlink_param_id {
+ SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+};
+
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_get(priv,
+ &ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_set(priv,
+ ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct devlink_param sja1105_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+ "best_effort_vlan_filtering",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int sja1105_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, "sja1105");
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
+ return rc;
+}
+
+int sja1105_devlink_setup(struct dsa_switch *ds)
+{
+ int rc;
+
+ rc = sja1105_setup_devlink_params(ds);
+ if (rc)
+ return rc;
+
+ rc = sja1105_setup_devlink_regions(ds);
+ if (rc < 0) {
+ sja1105_teardown_devlink_params(ds);
+ return rc;
+ }
+
+ return 0;
+}
+
+void sja1105_devlink_teardown(struct dsa_switch *ds)
+{
+ sja1105_teardown_devlink_params(ds);
+ sja1105_teardown_devlink_regions(ds);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
new file mode 100644
index 000000000..12cd04b56
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -0,0 +1,1034 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+/* In the dynamic configuration interface, the switch exposes a register-like
+ * view of some of the static configuration tables.
+ * Many times the field organization of the dynamic tables is abbreviated (not
+ * all fields are dynamically reconfigurable) and different from the static
+ * ones, but the key reason for having it is that we can spare a switch reset
+ * for settings that can be changed dynamically.
+ *
+ * This file creates a per-switch-family abstraction called
+ * struct sja1105_dynamic_table_ops and two operations that work with it:
+ * - sja1105_dynamic_config_write
+ * - sja1105_dynamic_config_read
+ *
+ * Compared to the struct sja1105_table_ops from sja1105_static_config.c,
+ * the dynamic accessors work with a compound buffer:
+ *
+ * packed_buf
+ *
+ * |
+ * V
+ * +-----------------------------------------+------------------+
+ * | ENTRY BUFFER | COMMAND BUFFER |
+ * +-----------------------------------------+------------------+
+ *
+ * <----------------------- packed_size ------------------------>
+ *
+ * The ENTRY BUFFER may or may not have the same layout, or size, as its static
+ * configuration table entry counterpart. When it does, the same packing
+ * function is reused (bar exceptional cases - see
+ * sja1105pqrs_dyn_l2_lookup_entry_packing).
+ *
+ * The reason for the COMMAND BUFFER being at the end is to be able to send
+ * a dynamic write command through a single SPI burst. By the time the switch
+ * reacts to the command, the ENTRY BUFFER is already populated with the data
+ * sent by the core.
+ *
+ * The COMMAND BUFFER is always SJA1105_SIZE_DYN_CMD bytes (one 32-bit word) in
+ * size.
+ *
+ * Sometimes the ENTRY BUFFER does not really exist (when the number of fields
+ * that can be reconfigured is small), then the switch repurposes some of the
+ * unused 32 bits of the COMMAND BUFFER to hold ENTRY data.
+ *
+ * The key members of struct sja1105_dynamic_table_ops are:
+ * - .entry_packing: A function that deals with packing an ENTRY structure
+ * into an SPI buffer, or retrieving an ENTRY structure
+ * from one.
+ * The @packed_buf pointer it's given does always point to
+ * the ENTRY portion of the buffer.
+ * - .cmd_packing: A function that deals with packing/unpacking the COMMAND
+ * structure to/from the SPI buffer.
+ * It is given the same @packed_buf pointer as .entry_packing,
+ * so most of the time, the @packed_buf points *behind* the
+ * COMMAND offset inside the buffer.
+ * To access the COMMAND portion of the buffer, the function
+ * knows its correct offset.
+ * Giving both functions the same pointer is handy because in
+ * extreme cases (see sja1105pqrs_dyn_l2_lookup_entry_packing)
+ * the .entry_packing is able to jump to the COMMAND portion,
+ * or vice-versa (sja1105pqrs_l2_lookup_cmd_packing).
+ * - .access: A bitmap of:
+ * OP_READ: Set if the hardware manual marks the ENTRY portion of the
+ * dynamic configuration table buffer as R (readable) after
+ * an SPI read command (the switch will populate the buffer).
+ * OP_WRITE: Set if the manual marks the ENTRY portion of the dynamic
+ * table buffer as W (writable) after an SPI write command
+ * (the switch will read the fields provided in the buffer).
+ * OP_DEL: Set if the manual says the VALIDENT bit is supported in the
+ * COMMAND portion of this dynamic config buffer (i.e. the
+ * specified entry can be invalidated through a SPI write
+ * command).
+ * OP_SEARCH: Set if the manual says that the index of an entry can
+ * be retrieved in the COMMAND portion of the buffer based
+ * on its ENTRY portion, as a result of a SPI write command.
+ * Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
+ * this.
+ * - .max_entry_count: The number of entries, counting from zero, that can be
+ * reconfigured through the dynamic interface. If a static
+ * table can be reconfigured at all dynamically, this
+ * number always matches the maximum number of supported
+ * static entries.
+ * - .packed_size: The length in bytes of the compound ENTRY + COMMAND BUFFER.
+ * Note that sometimes the compound buffer may contain holes in
+ * it (see sja1105_vlan_lookup_cmd_packing). The @packed_buf is
+ * contiguous however, so @packed_size includes any unused
+ * bytes.
+ * - .addr: The base SPI address at which the buffer must be written to the
+ * switch's memory. When looking at the hardware manual, this must
+ * always match the lowest documented address for the ENTRY, and not
+ * that of the COMMAND, since the other 32-bit words will follow along
+ * at the correct addresses.
+ */
+
+#define SJA1105_SIZE_DYN_CMD 4
+
+#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
+
+#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
+
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
+#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+
+#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+
+#define SJA1105_SIZE_RETAGGING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
+
+#define SJA1105ET_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY)
+
+#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+
+#define SJA1105_MAX_DYN_CMD_SIZE \
+ SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD
+
+struct sja1105_dyn_cmd {
+ bool search;
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+enum sja1105_hostcmd {
+ SJA1105_HOSTCMD_SEARCH = 1,
+ SJA1105_HOSTCMD_READ = 2,
+ SJA1105_HOSTCMD_WRITE = 3,
+ SJA1105_HOSTCMD_INVALIDATE = 4,
+};
+
+/* Command and entry overlap */
+static void
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->index, 9, 0, size, op);
+}
+
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
+static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
+
+ sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
+ sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
+ return size;
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u64 hostcmd;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+
+ /* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
+ * using it to delete a management route was unsupported. UM10944
+ * said about it:
+ *
+ * In case of a write access with the MGMTROUTE flag set,
+ * the flag will be ignored. It will always be found cleared
+ * for read accesses with the MGMTROUTE flag set.
+ *
+ * SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
+ * is now another flag called HOSTCMD which does more stuff (quoting
+ * from UM11040):
+ *
+ * A write request is accepted only when HOSTCMD is set to write host
+ * or invalid. A read request is accepted only when HOSTCMD is set to
+ * search host or read host.
+ *
+ * So it is possible to translate a RDWRSET/VALIDENT combination into
+ * HOSTCMD so that we keep the dynamic command API in place, and
+ * at the same time achieve compatibility with the management route
+ * command structure.
+ */
+ if (cmd->rdwrset == SPI_READ) {
+ if (cmd->search)
+ hostcmd = SJA1105_HOSTCMD_SEARCH;
+ else
+ hostcmd = SJA1105_HOSTCMD_READ;
+ } else {
+ /* SPI_WRITE */
+ if (cmd->valident)
+ hostcmd = SJA1105_HOSTCMD_WRITE;
+ else
+ hostcmd = SJA1105_HOSTCMD_INVALIDATE;
+ }
+ sja1105_packing(p, &hostcmd, 25, 23, size, op);
+
+ /* Hack - The hardware takes the 'index' field within
+ * struct sja1105_l2_lookup_entry as the index on which this command
+ * will operate. However it will ignore everything else, so 'index'
+ * is logically part of command but physically part of entry.
+ * Populate the 'index' entry field from within the command callback,
+ * such that our API doesn't need to ask for a full-blown entry
+ * structure when e.g. a delete is requested.
+ */
+ sja1105_packing(buf, &cmd->index, 15, 6,
+ SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+/* The switch is so retarded that it makes our command/entry abstraction
+ * crumble apart.
+ *
+ * On P/Q/R/S, the switch tries to say whether a FDB entry
+ * is statically programmed or dynamically learned via a flag called LOCKEDS.
+ * The hardware manual says about this fiels:
+ *
+ * On write will specify the format of ENTRY.
+ * On read the flag will be found cleared at times the VALID flag is found
+ * set. The flag will also be found cleared in response to a read having the
+ * MGMTROUTE flag set. In response to a read with the MGMTROUTE flag
+ * cleared, the flag be set if the most recent access operated on an entry
+ * that was either loaded by configuration or through dynamic reconfiguration
+ * (as opposed to automatically learned entries).
+ *
+ * The trouble with this flag is that it's part of the *command* to access the
+ * dynamic interface, and not part of the *entry* retrieved from it.
+ * Otherwise said, for a sja1105_dynamic_config_read, LOCKEDS is supposed to be
+ * an output from the switch into the command buffer, and for a
+ * sja1105_dynamic_config_write, the switch treats LOCKEDS as an input
+ * (hence we can write either static, or automatically learned entries, from
+ * the core).
+ * But the manual contradicts itself in the last phrase where it says that on
+ * read, LOCKEDS will be set to 1 for all FDB entries written through the
+ * dynamic interface (therefore, the value of LOCKEDS from the
+ * sja1105_dynamic_config_write is not really used for anything, it'll store a
+ * 1 anyway).
+ * This means you can't really write a FDB entry with LOCKEDS=0 (automatically
+ * learned) into the switch, which kind of makes sense.
+ * As for reading through the dynamic interface, it doesn't make too much sense
+ * to put LOCKEDS into the command, since the switch will inevitably have to
+ * ignore it (otherwise a command would be like "read the FDB entry 123, but
+ * only if it's dynamically learned" <- well how am I supposed to know?) and
+ * just use it as an output buffer for its findings. But guess what... that's
+ * what the entry buffer is for!
+ * Unfortunately, what really breaks this abstraction is the fact that it
+ * wasn't designed having the fact in mind that the switch can output
+ * entry-related data as writeback through the command buffer.
+ * However, whether a FDB entry is statically or dynamically learned *is* part
+ * of the entry and not the command data, no matter what the switch thinks.
+ * In order to do that, we'll need to wrap around the
+ * sja1105pqrs_l2_lookup_entry_packing from sja1105_static_config.c, and take
+ * a peek outside of the caller-supplied @buf (the entry buffer), to reach the
+ * command buffer.
+ */
+static size_t
+sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above. */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+
+ /* UM10944: To specify if a PTP egress timestamp shall be captured on
+ * each port upon transmission of the frame, the LSB of VLANID in the
+ * ENTRY field provided by the host must be set.
+ * Bit 1 of VLANID then specifies the register where the timestamp for
+ * this port is stored in.
+ */
+ sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
+ sja1105_packing(buf, &entry->takets, 84, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ return size;
+}
+
+static void
+sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+
+ /* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
+ * is the same (driver uses it to confirm that frame was sent).
+ * So just keep the name from E/T.
+ */
+ sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
+ sja1105_packing(buf, &entry->takets, 70, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ return size;
+}
+
+/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
+ * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
+ * between entry (0x2d, 0x2e) and command (0x30).
+ */
+static void
+sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above, applied for 'vlanid' field of
+ * struct sja1105_vlan_lookup_entry.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
+}
+
+static void
+sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+
+ sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+ u8 *reg2 = buf;
+
+ sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
+ sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
+ sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
+ sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
+ sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
+ sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
+ sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
+ sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
+ sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
+ sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
+ sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
+ sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
+ sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
+ /* MAC configuration table entries which can't be reconfigured:
+ * top, base, enabled, ifg, maxage, drpnona664
+ */
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 2, 0, size, op);
+}
+
+static void
+sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ sja1105_packing(buf, &cmd->valid, 31, 31,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->poly, 7, 0,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
+ struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+}
+
+static void
+sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+}
+
+static size_t
+sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+}
+
+static void
+sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+}
+
+static void
+sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 29, 29, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 5, 0, size, op);
+}
+
+static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->index, 19, 16, size, op);
+}
+
+static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u8 *cmd = buf + size;
+ u32 *p = buf;
+
+ sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op);
+ sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op);
+ sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op);
+ sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op);
+ return size;
+}
+
+static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->port, 159, 157, size, op);
+ sja1105_packing(buf, &entry->prio, 156, 154, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op);
+ sja1105_packing(buf, &entry->send_slope, 89, 58, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op);
+ return size;
+}
+
+#define OP_READ BIT(0)
+#define OP_WRITE BIT(1)
+#define OP_DEL BIT(2)
+#define OP_SEARCH BIT(3)
+
+/* SJA1105E/T: First generation */
+const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105et_vl_lookup_entry_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
+ .access = OP_WRITE,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x35,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105et_mgmt_route_entry_packing,
+ .cmd_packing = sja1105et_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x27,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105et_mac_config_entry_packing,
+ .cmd_packing = sja1105et_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x36,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x31,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105et_cbs_entry_packing,
+ .cmd_packing = sja1105et_cbs_cmd_packing,
+ .max_entry_count = SJA1105ET_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
+ .addr = 0x2c,
+ },
+};
+
+/* SJA1105P/Q/R/S: Second generation */
+const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105_vl_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x47,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105pqrs_mgmt_route_entry_packing,
+ .cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x2D,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x2A,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105pqrs_mac_config_entry_packing,
+ .cmd_packing = sja1105pqrs_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x4B,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x54,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = 0x8003,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105pqrs_general_params_entry_packing,
+ .cmd_packing = sja1105pqrs_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x3B,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105pqrs_cbs_entry_packing,
+ .cmd_packing = sja1105pqrs_cbs_cmd_packing,
+ .max_entry_count = SJA1105PQRS_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = 0x32,
+ },
+};
+
+/* Provides read access to the settings through the dynamic interface
+ * of the switch.
+ * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
+ * The selection is limited by the hardware in respect to which
+ * configuration blocks can be read through the dynamic interface.
+ * @index is used to retrieve a particular table entry. If negative,
+ * (and if the @blk_idx supports the searching operation) a search
+ * is performed by the @entry parameter.
+ * @entry Type-casted to an unpacked structure that holds a table entry
+ * of the type specified in @blk_idx.
+ * Usually an output argument. If @index is negative, then this
+ * argument is used as input/output: it should be pre-populated
+ * with the element to search for. Entries which support the
+ * search operation will have an "index" field (not the @index
+ * argument to this function) and that is where the found index
+ * will be returned (or left unmodified - thus negative - if not
+ * found).
+ */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int retries = 3;
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= 0 && index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0 && !(ops->access & OP_SEARCH))
+ return -EOPNOTSUPP;
+ if (!(ops->access & OP_READ))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_READ; /* Action is read */
+ if (index < 0) {
+ /* Avoid copying a signed negative number to an u64 */
+ cmd.index = 0;
+ cmd.search = true;
+ } else {
+ cmd.index = index;
+ cmd.search = false;
+ }
+ cmd.valident = true;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (cmd.search)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ /* Loop until we have confirmation that hardware has finished
+ * processing the command and has cleared the VALID field
+ */
+ do {
+ memset(packed_buf, 0, ops->packed_size);
+
+ /* Retrieve the read operation's result */
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ /* UM10944: [valident] will always be found cleared
+ * during a read access with MGMTROUTE set.
+ * So don't error out in that case.
+ */
+ if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
+ return -ENOENT;
+ cpu_relax();
+ } while (cmd.valid && --retries);
+
+ if (cmd.valid)
+ return -ETIMEDOUT;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+ return 0;
+}
+
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0)
+ return -ERANGE;
+ if (!(ops->access & OP_WRITE))
+ return -EOPNOTSUPP;
+ if (!keep && !(ops->access & OP_DEL))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+
+ cmd.valident = keep; /* If false, deletes entry */
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_WRITE; /* Action is write */
+ cmd.index = index;
+
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+ /* Don't dereference potentially NULL pointer if just
+ * deleting a table entry is what was requested. For cases
+ * where 'index' field is physically part of entry structure,
+ * and needed here, we deal with that in the cmd_packing callback.
+ */
+ if (keep)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ if (cmd.errors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((crc ^ byte) & (1 << 7)) {
+ crc <<= 1;
+ crc ^= poly;
+ } else {
+ crc <<= 1;
+ }
+ byte <<= 1;
+ }
+ return crc;
+}
+
+/* CRC8 algorithm with non-reversed input, non-reversed output,
+ * no input xor and no output xor. Code customized for receiving
+ * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
+ * is also received as argument in the Koopman notation that the switch
+ * hardware stores it in.
+ */
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params =
+ priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
+ u64 poly_koopman = l2_lookup_params->poly;
+ /* Convert polynomial from Koopman to 'normal' notation */
+ u8 poly = (u8)(1 + (poly_koopman << 1));
+ u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid;
+ u64 input = (vlanid << 48) | ether_addr_to_u64(addr);
+ u8 crc = 0; /* seed */
+ int i;
+
+ /* Mask the eight bytes starting from MSB one at a time */
+ for (i = 56; i >= 0; i -= 8) {
+ u8 byte = (input & (0xffull << i)) >> i;
+
+ crc = sja1105_crc8_add(crc, byte, poly);
+ }
+ return crc;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
new file mode 100644
index 000000000..28d4eb5ef
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_DYNAMIC_CONFIG_H
+#define _SJA1105_DYNAMIC_CONFIG_H
+
+#include "sja1105.h"
+#include <linux/packing.h>
+
+/* Special index that can be used for sja1105_dynamic_config_read */
+#define SJA1105_SEARCH -1
+
+struct sja1105_dyn_cmd;
+
+struct sja1105_dynamic_table_ops {
+ /* This returns size_t just to keep same prototype as the
+ * static config ops, of which we are reusing some functions.
+ */
+ size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op);
+ void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op);
+ size_t max_entry_count;
+ size_t packed_size;
+ u64 addr;
+ u8 access;
+};
+
+struct sja1105_mgmt_entry {
+ u64 tsreg;
+ u64 takets;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
new file mode 100644
index 000000000..9133a831e
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_SIZE_MAC_AREA (0x02 * 4)
+#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
+#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
+#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
+#define SJA1105_SIZE_ETHER_AREA (0x17 * 4)
+
+struct sja1105_port_status_mac {
+ u64 n_runt;
+ u64 n_soferr;
+ u64 n_alignerr;
+ u64 n_miierr;
+ u64 typeerr;
+ u64 sizeerr;
+ u64 tctimeout;
+ u64 priorerr;
+ u64 nomaster;
+ u64 memov;
+ u64 memerr;
+ u64 invtyp;
+ u64 intcyov;
+ u64 domerr;
+ u64 pcfbagdrop;
+ u64 spcprior;
+ u64 ageprior;
+ u64 portdrop;
+ u64 lendrop;
+ u64 bagdrop;
+ u64 policeerr;
+ u64 drpnona664err;
+ u64 spcerr;
+ u64 agedrp;
+};
+
+struct sja1105_port_status_hl1 {
+ u64 n_n664err;
+ u64 n_vlanerr;
+ u64 n_unreleased;
+ u64 n_sizeerr;
+ u64 n_crcerr;
+ u64 n_vlnotfound;
+ u64 n_ctpolerr;
+ u64 n_polerr;
+ u64 n_rxfrmsh;
+ u64 n_rxfrm;
+ u64 n_rxbytesh;
+ u64 n_rxbyte;
+ u64 n_txfrmsh;
+ u64 n_txfrm;
+ u64 n_txbytesh;
+ u64 n_txbyte;
+};
+
+struct sja1105_port_status_hl2 {
+ u64 n_qfull;
+ u64 n_part_drop;
+ u64 n_egr_disabled;
+ u64 n_not_reach;
+ u64 qlevel_hwm[8]; /* Only for P/Q/R/S */
+ u64 qlevel[8]; /* Only for P/Q/R/S */
+};
+
+struct sja1105_port_status_ether {
+ u64 n_drops_nolearn;
+ u64 n_drops_noroute;
+ u64 n_drops_ill_dtag;
+ u64 n_drops_dtag;
+ u64 n_drops_sotag;
+ u64 n_drops_sitag;
+ u64 n_drops_utag;
+ u64 n_tx_bytes_1024_2047;
+ u64 n_tx_bytes_512_1023;
+ u64 n_tx_bytes_256_511;
+ u64 n_tx_bytes_128_255;
+ u64 n_tx_bytes_65_127;
+ u64 n_tx_bytes_64;
+ u64 n_tx_mcast;
+ u64 n_tx_bcast;
+ u64 n_rx_bytes_1024_2047;
+ u64 n_rx_bytes_512_1023;
+ u64 n_rx_bytes_256_511;
+ u64 n_rx_bytes_128_255;
+ u64 n_rx_bytes_65_127;
+ u64 n_rx_bytes_64;
+ u64 n_rx_mcast;
+ u64 n_rx_bcast;
+};
+
+struct sja1105_port_status {
+ struct sja1105_port_status_mac mac;
+ struct sja1105_port_status_hl1 hl1;
+ struct sja1105_port_status_hl2 hl2;
+ struct sja1105_port_status_ether ether;
+};
+
+static void
+sja1105_port_status_mac_unpack(void *buf,
+ struct sja1105_port_status_mac *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x0, &status->n_runt, 31, 24, 4);
+ sja1105_unpack(p + 0x0, &status->n_soferr, 23, 16, 4);
+ sja1105_unpack(p + 0x0, &status->n_alignerr, 15, 8, 4);
+ sja1105_unpack(p + 0x0, &status->n_miierr, 7, 0, 4);
+ sja1105_unpack(p + 0x1, &status->typeerr, 27, 27, 4);
+ sja1105_unpack(p + 0x1, &status->sizeerr, 26, 26, 4);
+ sja1105_unpack(p + 0x1, &status->tctimeout, 25, 25, 4);
+ sja1105_unpack(p + 0x1, &status->priorerr, 24, 24, 4);
+ sja1105_unpack(p + 0x1, &status->nomaster, 23, 23, 4);
+ sja1105_unpack(p + 0x1, &status->memov, 22, 22, 4);
+ sja1105_unpack(p + 0x1, &status->memerr, 21, 21, 4);
+ sja1105_unpack(p + 0x1, &status->invtyp, 19, 19, 4);
+ sja1105_unpack(p + 0x1, &status->intcyov, 18, 18, 4);
+ sja1105_unpack(p + 0x1, &status->domerr, 17, 17, 4);
+ sja1105_unpack(p + 0x1, &status->pcfbagdrop, 16, 16, 4);
+ sja1105_unpack(p + 0x1, &status->spcprior, 15, 12, 4);
+ sja1105_unpack(p + 0x1, &status->ageprior, 11, 8, 4);
+ sja1105_unpack(p + 0x1, &status->portdrop, 6, 6, 4);
+ sja1105_unpack(p + 0x1, &status->lendrop, 5, 5, 4);
+ sja1105_unpack(p + 0x1, &status->bagdrop, 4, 4, 4);
+ sja1105_unpack(p + 0x1, &status->policeerr, 3, 3, 4);
+ sja1105_unpack(p + 0x1, &status->drpnona664err, 2, 2, 4);
+ sja1105_unpack(p + 0x1, &status->spcerr, 1, 1, 4);
+ sja1105_unpack(p + 0x1, &status->agedrp, 0, 0, 4);
+}
+
+static void
+sja1105_port_status_hl1_unpack(void *buf,
+ struct sja1105_port_status_hl1 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0xF, &status->n_n664err, 31, 0, 4);
+ sja1105_unpack(p + 0xE, &status->n_vlanerr, 31, 0, 4);
+ sja1105_unpack(p + 0xD, &status->n_unreleased, 31, 0, 4);
+ sja1105_unpack(p + 0xC, &status->n_sizeerr, 31, 0, 4);
+ sja1105_unpack(p + 0xB, &status->n_crcerr, 31, 0, 4);
+ sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31, 0, 4);
+ sja1105_unpack(p + 0x9, &status->n_ctpolerr, 31, 0, 4);
+ sja1105_unpack(p + 0x8, &status->n_polerr, 31, 0, 4);
+ sja1105_unpack(p + 0x7, &status->n_rxfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x6, &status->n_rxfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x5, &status->n_rxbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x4, &status->n_rxbyte, 31, 0, 4);
+ sja1105_unpack(p + 0x3, &status->n_txfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_txfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_txbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_txbyte, 31, 0, 4);
+ status->n_rxfrm += status->n_rxfrmsh << 32;
+ status->n_rxbyte += status->n_rxbytesh << 32;
+ status->n_txfrm += status->n_txfrmsh << 32;
+ status->n_txbyte += status->n_txbytesh << 32;
+}
+
+static void
+sja1105_port_status_hl2_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x3, &status->n_qfull, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_part_drop, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_not_reach, 31, 0, 4);
+}
+
+static void
+sja1105pqrs_port_status_qlevel_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4);
+ sja1105_unpack(p + i, &status->qlevel[i], 8, 0, 4);
+ }
+}
+
+static void
+sja1105pqrs_port_status_ether_unpack(void *buf,
+ struct sja1105_port_status_ether *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x16, &status->n_drops_nolearn, 31, 0, 4);
+ sja1105_unpack(p + 0x15, &status->n_drops_noroute, 31, 0, 4);
+ sja1105_unpack(p + 0x14, &status->n_drops_ill_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x13, &status->n_drops_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x12, &status->n_drops_sotag, 31, 0, 4);
+ sja1105_unpack(p + 0x11, &status->n_drops_sitag, 31, 0, 4);
+ sja1105_unpack(p + 0x10, &status->n_drops_utag, 31, 0, 4);
+ sja1105_unpack(p + 0x0F, &status->n_tx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x0E, &status->n_tx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x0D, &status->n_tx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x0C, &status->n_tx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x0B, &status->n_tx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x0A, &status->n_tx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x09, &status->n_tx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x08, &status->n_tx_bcast, 31, 0, 4);
+ sja1105_unpack(p + 0x07, &status->n_rx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x06, &status->n_rx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x05, &status->n_rx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x04, &status->n_rx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x03, &status->n_rx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x02, &status->n_rx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x01, &status->n_rx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x00, &status->n_rx_bcast, 31, 0, 4);
+}
+
+static int
+sja1105pqrs_port_status_get_ether(struct sja1105_private *priv,
+ struct sja1105_port_status_ether *ether,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_ETHER_AREA] = {0};
+ int rc;
+
+ /* Ethernet statistics area */
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ether_stats[port],
+ packed_buf, SJA1105_SIZE_ETHER_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_ether_unpack(packed_buf, ether);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_mac(struct sja1105_private *priv,
+ struct sja1105_port_status_mac *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_MAC_AREA] = {0};
+ int rc;
+
+ /* MAC area */
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf,
+ SJA1105_SIZE_MAC_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_mac_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
+ struct sja1105_port_status_hl1 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
+ int rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf,
+ SJA1105_SIZE_HL1_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl1_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
+ struct sja1105_port_status_hl2 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
+ int rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf,
+ SJA1105_SIZE_HL2_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl2_unpack(packed_buf, status);
+
+ /* Code below is strictly P/Q/R/S specific. */
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf,
+ SJA1105_SIZE_QLEVEL_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_qlevel_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get(struct sja1105_private *priv,
+ struct sja1105_port_status *status,
+ int port)
+{
+ int rc;
+
+ rc = sja1105_port_status_get_mac(priv, &status->mac, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl1(priv, &status->hl1, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl2(priv, &status->hl2, port);
+ if (rc < 0)
+ return rc;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ return sja1105pqrs_port_status_get_ether(priv, &status->ether, port);
+}
+
+static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
+ /* MAC-Level Diagnostic Counters */
+ "n_runt",
+ "n_soferr",
+ "n_alignerr",
+ "n_miierr",
+ /* MAC-Level Diagnostic Flags */
+ "typeerr",
+ "sizeerr",
+ "tctimeout",
+ "priorerr",
+ "nomaster",
+ "memov",
+ "memerr",
+ "invtyp",
+ "intcyov",
+ "domerr",
+ "pcfbagdrop",
+ "spcprior",
+ "ageprior",
+ "portdrop",
+ "lendrop",
+ "bagdrop",
+ "policeerr",
+ "drpnona664err",
+ "spcerr",
+ "agedrp",
+ /* High-Level Diagnostic Counters */
+ "n_n664err",
+ "n_vlanerr",
+ "n_unreleased",
+ "n_sizeerr",
+ "n_crcerr",
+ "n_vlnotfound",
+ "n_ctpolerr",
+ "n_polerr",
+ "n_rxfrm",
+ "n_rxbyte",
+ "n_txfrm",
+ "n_txbyte",
+ "n_qfull",
+ "n_part_drop",
+ "n_egr_disabled",
+ "n_not_reach",
+};
+
+static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
+ /* Queue Levels */
+ "qlevel_hwm_0",
+ "qlevel_hwm_1",
+ "qlevel_hwm_2",
+ "qlevel_hwm_3",
+ "qlevel_hwm_4",
+ "qlevel_hwm_5",
+ "qlevel_hwm_6",
+ "qlevel_hwm_7",
+ "qlevel_0",
+ "qlevel_1",
+ "qlevel_2",
+ "qlevel_3",
+ "qlevel_4",
+ "qlevel_5",
+ "qlevel_6",
+ "qlevel_7",
+ /* Ether Stats */
+ "n_drops_nolearn",
+ "n_drops_noroute",
+ "n_drops_ill_dtag",
+ "n_drops_dtag",
+ "n_drops_sotag",
+ "n_drops_sitag",
+ "n_drops_utag",
+ "n_tx_bytes_1024_2047",
+ "n_tx_bytes_512_1023",
+ "n_tx_bytes_256_511",
+ "n_tx_bytes_128_255",
+ "n_tx_bytes_65_127",
+ "n_tx_bytes_64",
+ "n_tx_mcast",
+ "n_tx_bcast",
+ "n_rx_bytes_1024_2047",
+ "n_rx_bytes_512_1023",
+ "n_rx_bytes_256_511",
+ "n_rx_bytes_128_255",
+ "n_rx_bytes_65_127",
+ "n_rx_bytes_64",
+ "n_rx_mcast",
+ "n_rx_bcast",
+};
+
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port_status *status;
+ int rc, i, k = 0;
+
+ status = kzalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ goto out;
+
+ rc = sja1105_port_status_get(priv, status, port);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read port %d counters: %d\n",
+ port, rc);
+ goto out;
+ }
+ memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
+ data[k++] = status->mac.n_runt;
+ data[k++] = status->mac.n_soferr;
+ data[k++] = status->mac.n_alignerr;
+ data[k++] = status->mac.n_miierr;
+ data[k++] = status->mac.typeerr;
+ data[k++] = status->mac.sizeerr;
+ data[k++] = status->mac.tctimeout;
+ data[k++] = status->mac.priorerr;
+ data[k++] = status->mac.nomaster;
+ data[k++] = status->mac.memov;
+ data[k++] = status->mac.memerr;
+ data[k++] = status->mac.invtyp;
+ data[k++] = status->mac.intcyov;
+ data[k++] = status->mac.domerr;
+ data[k++] = status->mac.pcfbagdrop;
+ data[k++] = status->mac.spcprior;
+ data[k++] = status->mac.ageprior;
+ data[k++] = status->mac.portdrop;
+ data[k++] = status->mac.lendrop;
+ data[k++] = status->mac.bagdrop;
+ data[k++] = status->mac.policeerr;
+ data[k++] = status->mac.drpnona664err;
+ data[k++] = status->mac.spcerr;
+ data[k++] = status->mac.agedrp;
+ data[k++] = status->hl1.n_n664err;
+ data[k++] = status->hl1.n_vlanerr;
+ data[k++] = status->hl1.n_unreleased;
+ data[k++] = status->hl1.n_sizeerr;
+ data[k++] = status->hl1.n_crcerr;
+ data[k++] = status->hl1.n_vlnotfound;
+ data[k++] = status->hl1.n_ctpolerr;
+ data[k++] = status->hl1.n_polerr;
+ data[k++] = status->hl1.n_rxfrm;
+ data[k++] = status->hl1.n_rxbyte;
+ data[k++] = status->hl1.n_txfrm;
+ data[k++] = status->hl1.n_txbyte;
+ data[k++] = status->hl2.n_qfull;
+ data[k++] = status->hl2.n_part_drop;
+ data[k++] = status->hl2.n_egr_disabled;
+ data[k++] = status->hl2.n_not_reach;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ goto out;
+
+ memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
+ sizeof(u64));
+ for (i = 0; i < 8; i++) {
+ data[k++] = status->hl2.qlevel_hwm[i];
+ data[k++] = status->hl2.qlevel[i];
+ }
+ data[k++] = status->ether.n_drops_nolearn;
+ data[k++] = status->ether.n_drops_noroute;
+ data[k++] = status->ether.n_drops_ill_dtag;
+ data[k++] = status->ether.n_drops_dtag;
+ data[k++] = status->ether.n_drops_sotag;
+ data[k++] = status->ether.n_drops_sitag;
+ data[k++] = status->ether.n_drops_utag;
+ data[k++] = status->ether.n_tx_bytes_1024_2047;
+ data[k++] = status->ether.n_tx_bytes_512_1023;
+ data[k++] = status->ether.n_tx_bytes_256_511;
+ data[k++] = status->ether.n_tx_bytes_128_255;
+ data[k++] = status->ether.n_tx_bytes_65_127;
+ data[k++] = status->ether.n_tx_bytes_64;
+ data[k++] = status->ether.n_tx_mcast;
+ data[k++] = status->ether.n_tx_bcast;
+ data[k++] = status->ether.n_rx_bytes_1024_2047;
+ data[k++] = status->ether.n_rx_bytes_512_1023;
+ data[k++] = status->ether.n_rx_bytes_256_511;
+ data[k++] = status->ether.n_rx_bytes_128_255;
+ data[k++] = status->ether.n_rx_bytes_65_127;
+ data[k++] = status->ether.n_rx_bytes_64;
+ data[k++] = status->ether.n_rx_mcast;
+ data[k++] = status->ether.n_rx_bcast;
+out:
+ kfree(status);
+}
+
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(sja1105_port_stats); i++) {
+ strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return;
+ for (i = 0; i < ARRAY_SIZE(sja1105pqrs_extra_port_stats); i++) {
+ strlcpy(p, sja1105pqrs_extra_port_stats[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int count = ARRAY_SIZE(sja1105_port_stats);
+ struct sja1105_private *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ if (priv->info->device_id == SJA1105PR_DEVICE_ID ||
+ priv->info->device_id == SJA1105QS_DEVICE_ID)
+ count += ARRAY_SIZE(sja1105pqrs_extra_port_stats);
+
+ return count;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
new file mode 100644
index 000000000..12e76020b
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include "sja1105.h"
+#include "sja1105_vl.h"
+
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
+{
+ struct sja1105_rule *rule;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list)
+ if (rule->cookie == cookie)
+ return rule;
+
+ return NULL;
+}
+
+static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
+ if (!priv->flow_block.l2_policer_used[i])
+ return i;
+
+ return -1;
+}
+
+static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_BCAST_POLICER;
+ rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_BCAST;
+ new_rule = true;
+ }
+
+ if (rule->bcast_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the broadcast policers of all ports attached to this block
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+
+ policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->bcast_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_setup_tc_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port, int tc,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_TC_POLICER;
+ rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_TC;
+ rule->key.tc.pcp = tc;
+ new_rule = true;
+ }
+
+ if (rule->tc_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port-TC pair already has an L2 policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the policers for traffic class @tc of all ports attached to
+ * this block point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int index = (p * SJA1105_NUM_TC) + tc;
+
+ policing[index].sharindx = rule->tc_pol.sharindx;
+ }
+
+ policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->tc_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_flower_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ switch (key->type) {
+ case SJA1105_KEY_BCAST:
+ return sja1105_setup_bcast_policer(priv, extack, cookie, port,
+ rate_bytes_per_sec, burst);
+ case SJA1105_KEY_TC:
+ return sja1105_setup_tc_policer(priv, extack, cookie, port,
+ key->tc.pcp, rate_bytes_per_sec,
+ burst);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sja1105_flower_parse_key(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ struct sja1105_key *key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ bool is_bcast_dmac = false;
+ u64 dmac = U64_MAX;
+ u16 vid = U16_MAX;
+ u16 pcp = U16_MAX;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on protocol not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(match.mask->dst, bcast)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ dmac = ether_addr_to_u64(match.key->dst);
+ is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_id &&
+ match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on VID is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_priority &&
+ match.mask->vlan_priority != 0x7) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on PCP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id)
+ vid = match.key->vlan_id;
+ if (match.mask->vlan_priority)
+ pcp = match.key->vlan_priority;
+ }
+
+ if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
+ key->type = SJA1105_KEY_BCAST;
+ return 0;
+ }
+ if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_TC;
+ key->tc.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_VLAN_AWARE_VL;
+ key->vl.dmac = dmac;
+ key->vl.vid = vid;
+ key->vl.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX) {
+ key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
+ key->vl.dmac = dmac;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
+ return -EOPNOTSUPP;
+}
+
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct sja1105_private *priv = ds->priv;
+ const struct flow_action_entry *act;
+ unsigned long cookie = cls->cookie;
+ bool routing_rule = false;
+ struct sja1105_key key;
+ bool gate_rule = false;
+ bool vl_rule = false;
+ int rc, i;
+
+ rc = sja1105_flower_parse_key(priv, extack, cls, &key);
+ if (rc)
+ return rc;
+
+ rc = -EOPNOTSUPP;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ rc = sja1105_flower_policer(priv, port, extack, cookie,
+ &key,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_TRAP: {
+ int cpu = dsa_upstream_port(ds, port);
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(cpu), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_REDIRECT: {
+ struct dsa_port *to_dp;
+
+ to_dp = dsa_port_from_netdev(act->dev);
+ if (IS_ERR(to_dp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a switch port");
+ return -EOPNOTSUPP;
+ }
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(to_dp->index), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_DROP:
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, 0, false);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_GATE:
+ gate_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_gate(priv, port, extack, cookie,
+ &key, act->gate.index,
+ act->gate.prio,
+ act->gate.basetime,
+ act->gate.cycletime,
+ act->gate.cycletimeext,
+ act->gate.num_entries,
+ act->gate.entries);
+ if (rc)
+ goto out;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (vl_rule && !rc) {
+ /* Delay scheduling configuration until DESTPORTS has been
+ * populated by all other actions.
+ */
+ if (gate_rule) {
+ if (!routing_rule) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload gate action together with redirect or trap");
+ return -EOPNOTSUPP;
+ }
+ rc = sja1105_init_scheduling(priv);
+ if (rc)
+ goto out;
+ }
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+ }
+
+out:
+ return rc;
+}
+
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ struct sja1105_l2_policing_entry *policing;
+ int old_sharindx;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type == SJA1105_RULE_VL)
+ return sja1105_vl_delete(priv, port, rule, cls->common.extack);
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+ } else if (rule->type == SJA1105_RULE_TC_POLICER) {
+ int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
+
+ old_sharindx = policing[index].sharindx;
+ policing[index].sharindx = port;
+ } else {
+ return -EINVAL;
+ }
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ priv->flow_block.l2_policer_used[old_sharindx] = false;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ int rc;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type != SJA1105_RULE_VL)
+ return 0;
+
+ rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
+ cls->common.extack);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void sja1105_flower_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+}
+
+void sja1105_flower_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &priv->flow_block.rules) {
+ rule = list_entry(pos, struct sja1105_rule, list);
+ list_del(&rule->list);
+ kfree(rule);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
new file mode 100644
index 000000000..4362fe0f3
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -0,0 +1,3656 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+#include "sja1105_sgmii.h"
+#include "sja1105_tas.h"
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
+
+static const struct dsa_switch_ops sja1105_switch_ops;
+
+static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
+ unsigned int startup_delay)
+{
+ gpiod_set_value_cansleep(gpio, 1);
+ /* Wait for minimum reset pulse length */
+ msleep(pulse_len);
+ gpiod_set_value_cansleep(gpio, 0);
+ /* Wait until chip is ready after reset */
+ msleep(startup_delay);
+}
+
+static void
+sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to, bool allow)
+{
+ if (allow) {
+ l2_fwd[from].bc_domain |= BIT(to);
+ l2_fwd[from].reach_port |= BIT(to);
+ l2_fwd[from].fl_domain |= BIT(to);
+ } else {
+ l2_fwd[from].bc_domain &= ~BIT(to);
+ l2_fwd[from].reach_port &= ~BIT(to);
+ l2_fwd[from].fl_domain &= ~BIT(to);
+ }
+}
+
+/* Structure used to temporarily transport device tree
+ * settings into sja1105_setup
+ */
+struct sja1105_dt_port {
+ phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+};
+
+static int sja1105_init_mac_settings(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry default_mac = {
+ /* Enable all 8 priority queues on egress.
+ * Every queue i holds top[i] - base[i] frames.
+ * Sum of top[i] - base[i] is 511 (max hardware limit).
+ */
+ .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
+ .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
+ .enabled = {true, true, true, true, true, true, true, true},
+ /* Keep standard IFG of 12 bytes on egress. */
+ .ifg = 0,
+ /* Always put the MAC speed in automatic mode, where it can be
+ * adjusted at runtime by PHYLINK.
+ */
+ .speed = SJA1105_SPEED_AUTO,
+ /* No static correction for 1-step 1588 events */
+ .tp_delin = 0,
+ .tp_delout = 0,
+ /* Disable aging for critical TTEthernet traffic */
+ .maxage = 0xFF,
+ /* Internal VLAN (pvid) to apply to untagged ingress */
+ .vlanprio = 0,
+ .vlanid = 1,
+ .ing_mirr = false,
+ .egr_mirr = false,
+ /* Don't drop traffic with other EtherType than ETH_P_IP */
+ .drpnona664 = false,
+ /* Don't drop double-tagged traffic */
+ .drpdtag = false,
+ /* Don't drop untagged traffic */
+ .drpuntag = false,
+ /* Don't retag 802.1p (VID 0) traffic with the pvid */
+ .retag = false,
+ /* Disable learning and I/O on user ports by default -
+ * STP will enable it.
+ */
+ .dyn_learn = false,
+ .egress = false,
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
+
+ /* Discard previous MAC Configuration Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_NUM_PORTS,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_NUM_PORTS;
+
+ mac = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ mac[i] = default_mac;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ /* STP doesn't get called for CPU port, so we need to
+ * set the I/O parameters statically.
+ */
+ mac[i].dyn_learn = true;
+ mac[i].ingress = true;
+ mac[i].egress = true;
+ }
+ }
+
+ return 0;
+}
+
+static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
+{
+ if (priv->info->part_no != SJA1105R_PART_NO &&
+ priv->info->part_no != SJA1105S_PART_NO)
+ return false;
+
+ if (port != SJA1105_SGMII_PORT)
+ return false;
+
+ if (dsa_is_unused_port(priv->ds, port))
+ return false;
+
+ return true;
+}
+
+static int sja1105_init_mii_settings(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
+
+ /* Discard previous xMII Mode Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on PHYLINK DT bindings */
+ table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
+
+ mii = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ switch (ports[i].phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ mii->xmii_mode[i] = XMII_MODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mii->xmii_mode[i] = XMII_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!sja1105_supports_sgmii(priv, i))
+ return -EINVAL;
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ break;
+ default:
+ dev_err(dev, "Unsupported PHY mode %s!\n",
+ phy_modes(ports[i].phy_mode));
+ return -EINVAL;
+ }
+
+ /* Even though the SerDes port is able to drive SGMII autoneg
+ * like a PHY would, from the perspective of the XMII tables,
+ * the SGMII port should always be put in MAC mode.
+ */
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mii->phy_mac[i] = XMII_MAC;
+ else
+ mii->phy_mac[i] = ports[i].role;
+ }
+ return 0;
+}
+
+static int sja1105_init_static_fdb(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ /* We only populate the FDB table through dynamic
+ * L2 Address Lookup entries
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+ return 0;
+}
+
+static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+ /* All entries within a FDB bin are available for learning */
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
+ .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
+ max_fdb_entries, max_fdb_entries, },
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* This selects between Independent VLAN Learning (IVL) and
+ * Shared VLAN Learning (SVL)
+ */
+ .shared_learn = true,
+ /* Don't discard management traffic based on ENFPORT -
+ * we don't perform SMAC port enforcement anyway, so
+ * what we are setting here doesn't matter.
+ */
+ .no_enf_hostprt = false,
+ /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
+ * Maybe correlate with no_linklocal_learn from bridge driver?
+ */
+ .no_mgmt_learn = true,
+ /* P/Q/R/S only */
+ .use_static = true,
+ /* Dynamically learned FDB entries can overwrite other (older)
+ * dynamic FDB entries
+ */
+ .owr_dyn = true,
+ .drpnolearn = true,
+ };
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
+ default_l2_lookup_params;
+
+ return 0;
+}
+
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
+static int sja1105_init_static_vlan(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_vlan_lookup_entry pvid = {
+ .ving_mirr = 0,
+ .vegr_mirr = 0,
+ .vmemb_port = 0,
+ .vlan_bc = 0,
+ .tag_port = 0,
+ .vlanid = SJA1105_DEFAULT_VLAN,
+ };
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_bridge_vlan *v;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ pvid.vmemb_port |= BIT(port);
+ pvid.vlan_bc |= BIT(port);
+ pvid.tag_port &= ~BIT(port);
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ v->port = port;
+ v->vid = SJA1105_DEFAULT_VLAN;
+ v->untagged = true;
+ if (dsa_is_cpu_port(ds, port))
+ v->pvid = true;
+ list_add(&v->list, &priv->dsa_8021q_vlans);
+
+ v = kmemdup(v, sizeof(*v), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ list_add(&v->list, &priv->bridge_vlans);
+ }
+
+ ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2fwd;
+ struct sja1105_table *table;
+ int i, j;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
+
+ l2fwd = table->entries;
+
+ /* First 5 entries define the forwarding rules */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, i);
+
+ for (j = 0; j < SJA1105_NUM_TC; j++)
+ l2fwd[i].vlan_pmap[j] = j;
+
+ if (i == upstream)
+ continue;
+
+ sja1105_port_allow_traffic(l2fwd, i, upstream, true);
+ sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+ }
+ /* Next 8 entries define VLAN PCP mapping from ingress to egress.
+ * Create a one-to-one mapping.
+ */
+ for (i = 0; i < SJA1105_NUM_TC; i++)
+ for (j = 0; j < SJA1105_NUM_PORTS; j++)
+ l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
+
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ .max_dynp = 0,
+ /* Use a single memory partition for all ingress queues */
+ .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
+ };
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
+ default_l2fwd_params;
+
+ return 0;
+}
+
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ struct sja1105_table *table;
+ int max_mem;
+
+ /* VLAN retagging is implemented using a loopback port that consumes
+ * frame buffers. That leaves less for us.
+ */
+ if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+ l2_fwd_params = table->entries;
+ l2_fwd_params->part_spc[0] = max_mem;
+
+ /* If we have any critical-traffic virtual links, we need to reserve
+ * some frame buffer memory for them. At the moment, hardcode the value
+ * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
+ * remaining for best-effort traffic. TODO: figure out a more flexible
+ * way to perform the frame buffer partitioning.
+ */
+ if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ vl_fwd_params = table->entries;
+
+ l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
+ vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
+}
+
+static int sja1105_init_general_params(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry default_general_params = {
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
+ .switchid = priv->ds->index,
+ /* Priority queue for link-local management frames
+ * (both ingress to and egress from CPU - PTP, STP etc)
+ */
+ .hostprio = 7,
+ .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
+ .incl_srcpt1 = false,
+ .send_meta1 = false,
+ .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
+ .incl_srcpt0 = false,
+ .send_meta0 = false,
+ /* The destination for traffic matching mac_fltres1 and
+ * mac_fltres0 on all ports except host_port. Such traffic
+ * receieved on host_port itself would be dropped, except
+ * by installing a temporary 'management route'
+ */
+ .host_port = dsa_upstream_port(priv->ds, 0),
+ /* Default to an invalid value */
+ .mirr_port = SJA1105_NUM_PORTS,
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address (presumably because it
+ * is a cascaded port and a downstream SJA switch already did
+ * that). Default to an invalid port (to disable the feature)
+ * and overwrite this if we find any DSA (cascaded) ports.
+ */
+ .casc_port = SJA1105_NUM_PORTS,
+ /* No TTEthernet */
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
+ .vlmarker = 0,
+ .vlmask = 0,
+ /* Only update correctionField for 1-step PTP (L2 transport) */
+ .ignore2stf = 0,
+ /* Forcefully disable VLAN filtering by telling
+ * the switch that VLAN has a different EtherType.
+ */
+ .tpid = ETH_P_SJA1105,
+ .tpid2 = ETH_P_SJA1105,
+ };
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_general_params_entry *)table->entries)[0] =
+ default_general_params;
+
+ return 0;
+}
+
+static int sja1105_init_avb_params(struct sja1105_private *priv)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ /* Configure the MAC addresses for meta frames */
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+ /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
+ * default. This is because there might be boards with a hardware
+ * layout where enabling the pin as output might cause an electrical
+ * clash. On E/T the pin is always an output, which the board designers
+ * probably already knew, so even if there are going to be electrical
+ * issues, there's nothing we can do.
+ */
+ avb->cas_master = false;
+
+ return 0;
+}
+
+/* The L2 policing table is 2-stage. The table is looked up for each frame
+ * according to the ingress port, whether it was broadcast or not, and the
+ * classified traffic class (given by VLAN PCP). This portion of the lookup is
+ * fixed, and gives access to the SHARINDX, an indirection register pointing
+ * within the policing table itself, which is used to resolve the policer that
+ * will be used for this frame.
+ *
+ * Stage 1 Stage 2
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 2: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 5: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 7: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 TC 7 |SHARINDX| ...
+ * +------------+--------+
+ * |Port 0 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * |Port 1 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * ... ...
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ *
+ * In this driver, we shall use policers 0-4 as statically alocated port
+ * (matchall) policers. So we need to make the SHARINDX for all lookups
+ * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
+ * lookup) equal.
+ * The remaining policers (40) shall be dynamically allocated for flower
+ * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
+ */
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
+static int sja1105_init_l2_policing(struct sja1105_private *priv)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
+
+ /* Discard previous L2 Policing Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
+
+ policing = table->entries;
+
+ /* Setup shared indices for the matchall policers */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
+
+ policing[bcast].sharindx = port;
+ }
+
+ /* Setup the matchall policer parameters */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(priv->ds, port))
+ mtu += VLAN_HLEN;
+
+ policing[port].smax = 65535; /* Burst size in bytes */
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].maxlen = mtu;
+ policing[port].partition = 0;
+ }
+
+ return 0;
+}
+
+static int sja1105_static_config_load(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ int rc;
+
+ sja1105_static_config_free(&priv->static_config);
+ rc = sja1105_static_config_init(&priv->static_config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return rc;
+
+ /* Build static configuration */
+ rc = sja1105_init_mac_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_mii_settings(priv, ports);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_fdb(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_vlan(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_lookup_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_policing(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_general_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_avb_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Send initial configuration to hardware via SPI */
+ return sja1105_static_config_upload(priv);
+}
+
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
+ const struct sja1105_dt_port *ports)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (ports[i].role == XMII_MAC)
+ continue;
+
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_rx_delay[i] = true;
+
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_tx_delay[i] = true;
+
+ if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
+ !priv->info->setup_rgmii_delay)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sja1105_parse_ports_node(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports,
+ struct device_node *ports_node)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ u32 index;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &index) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ index);
+ of_node_put(child);
+ return -ENODEV;
+ }
+ ports[index].phy_mode = phy_mode;
+
+ phy_node = of_parse_phandle(child, "phy-handle", 0);
+ if (!phy_node) {
+ if (!of_phy_is_fixed_link(child)) {
+ dev_err(dev, "phy-handle or fixed-link "
+ "properties missing!\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+ /* phy-handle is missing, but fixed-link isn't.
+ * So it's a fixed link. Default to PHY role.
+ */
+ ports[index].role = XMII_PHY;
+ } else {
+ /* phy-handle present => put port in MAC role */
+ ports[index].role = XMII_MAC;
+ of_node_put(phy_node);
+ }
+
+ /* The MAC/PHY role can be overridden with explicit bindings */
+ if (of_property_read_bool(child, "sja1105,role-mac"))
+ ports[index].role = XMII_MAC;
+ else if (of_property_read_bool(child, "sja1105,role-phy"))
+ ports[index].role = XMII_PHY;
+ }
+
+ return 0;
+}
+
+static int sja1105_parse_dt(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *switch_node = dev->of_node;
+ struct device_node *ports_node;
+ int rc;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ rc = sja1105_parse_ports_node(priv, ports, ports_node);
+ of_node_put(ports_node);
+
+ return rc;
+}
+
+static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
+ u16 pcs_val)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val = pcs_val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
+ bool an_enabled, bool an_master)
+{
+ u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
+
+ /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
+ * stop the clock during LPI mode, make the MAC reconfigure
+ * autonomously after PCS autoneg is done, flush the internal FIFOs.
+ */
+ sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
+ SJA1105_DC1_CLOCK_STOP_EN |
+ SJA1105_DC1_MAC_AUTO_SW |
+ SJA1105_DC1_INIT);
+ /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
+ sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
+ /* AUTONEG_CONTROL: Use SGMII autoneg */
+ if (an_master)
+ ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
+ sja1105_sgmii_write(priv, SJA1105_AC, ac);
+ /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
+ * sja1105_sgmii_pcs_force_speed must be called later for the link
+ * to become operational.
+ */
+ if (an_enabled)
+ sja1105_sgmii_write(priv, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
+ int speed)
+{
+ int pcs_speed;
+
+ switch (speed) {
+ case SPEED_1000:
+ pcs_speed = BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ pcs_speed = BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ pcs_speed = BMCR_SPEED10;
+ break;
+ default:
+ dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
+ return;
+ }
+ sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
+}
+
+/* Convert link speed from SJA1105 to ethtool encoding */
+static int sja1105_speed[] = {
+ [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
+ [SJA1105_SPEED_10MBPS] = SPEED_10,
+ [SJA1105_SPEED_100MBPS] = SPEED_100,
+ [SJA1105_SPEED_1000MBPS] = SPEED_1000,
+};
+
+/* Set link speed in the MAC configuration for a specific port. */
+static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ int speed_mbps)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_speed_t speed;
+ int rc;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ switch (speed_mbps) {
+ case SPEED_UNKNOWN:
+ /* PHYLINK called sja1105_mac_config() to inform us about
+ * the state->interface, but AN has not completed and the
+ * speed is not yet valid. UM10944.pdf says that setting
+ * SJA1105_SPEED_AUTO at runtime disables the port, so that is
+ * ok for power consumption in case AN will never complete -
+ * otherwise PHYLINK should come back with a new update.
+ */
+ speed = SJA1105_SPEED_AUTO;
+ break;
+ case SPEED_10:
+ speed = SJA1105_SPEED_10MBPS;
+ break;
+ case SPEED_100:
+ speed = SJA1105_SPEED_100MBPS;
+ break;
+ case SPEED_1000:
+ speed = SJA1105_SPEED_1000MBPS;
+ break;
+ default:
+ dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ return -EINVAL;
+ }
+
+ /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
+ * table, since this will be used for the clocking setup, and we no
+ * longer need to store it in the static config (already told hardware
+ * we want auto during upload phase).
+ * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
+ * we need to configure the PCS only (if even that).
+ */
+ if (sja1105_supports_sgmii(priv, port))
+ mac[port].speed = SJA1105_SPEED_1000MBPS;
+ else
+ mac[port].speed = speed;
+
+ /* Write to the dynamic reconfiguration tables */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to write MAC config: %d\n", rc);
+ return rc;
+ }
+
+ /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
+ * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
+ * RMII no change of the clock setup is required. Actually, changing
+ * the clock setup does interrupt the clock signal for a certain time
+ * which causes trouble for all PHYs relying on this signal.
+ */
+ phy_mode = mii->xmii_mode[port];
+ if (phy_mode != XMII_MODE_RGMII)
+ return 0;
+
+ return sja1105_clocking_setup_port(priv, port);
+}
+
+/* The SJA1105 MAC programming model is through the static config (the xMII
+ * Mode table cannot be dynamically reconfigured), and we have to program
+ * that early (earlier than PHYLINK calls us, anyway).
+ * So just error out in case the connected PHY attempts to change the initial
+ * system interface MII protocol from what is defined in the DT, at least for
+ * now.
+ */
+static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
+ phy_interface_t interface)
+{
+ struct sja1105_xmii_params_entry *mii;
+ sja1105_phy_interface_t phy_mode;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ phy_mode = mii->xmii_mode[port];
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return (phy_mode != XMII_MODE_MII);
+ case PHY_INTERFACE_MODE_RMII:
+ return (phy_mode != XMII_MODE_RMII);
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return (phy_mode != XMII_MODE_RGMII);
+ case PHY_INTERFACE_MODE_SGMII:
+ return (phy_mode != XMII_MODE_SGMII);
+ default:
+ return true;
+ }
+}
+
+static void sja1105_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct sja1105_private *priv = ds->priv;
+ bool is_sgmii = sja1105_supports_sgmii(priv, port);
+
+ if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
+ dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
+ phy_modes(state->interface));
+ return;
+ }
+
+ if (phylink_autoneg_inband(mode) && !is_sgmii) {
+ dev_err(ds->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ if (is_sgmii)
+ sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
+ false);
+}
+
+static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ sja1105_inhibit_tx(ds->priv, BIT(port), true);
+}
+
+static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_adjust_port_config(priv, port, speed);
+
+ if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+
+ sja1105_inhibit_tx(priv, BIT(port), false);
+}
+
+static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ /* Construct a new mask which exhaustively contains all link features
+ * supported by the MAC, and then apply that (logical AND) to what will
+ * be sent to the PHY for "marketing".
+ */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_xmii_params_entry *mii;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ sja1105_phy_mode_mismatch(priv, port, state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* The MAC does not support pause frames, and also doesn't
+ * support half-duplex traffic modes.
+ */
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, MII);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 100baseT1_Full);
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
+ mii->xmii_mode[port] == XMII_MODE_SGMII)
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct sja1105_private *priv = ds->priv;
+ int ais;
+
+ /* Read the vendor-specific AUTONEG_INTR_STATUS register */
+ ais = sja1105_sgmii_read(priv, SJA1105_AIS);
+ if (ais < 0)
+ return ais;
+
+ switch (SJA1105_AIS_SPEED(ais)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
+ SJA1105_AIS_SPEED(ais));
+ }
+ state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
+ state->an_complete = SJA1105_AIS_COMPLETE(ais);
+ state->link = SJA1105_AIS_LINK_STATUS(ais);
+
+ return 0;
+}
+
+static int
+sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++)
+ if (l2_lookup[i].macaddr == requested->macaddr &&
+ l2_lookup[i].vlanid == requested->vlanid &&
+ l2_lookup[i].destports & BIT(port))
+ return i;
+
+ return -1;
+}
+
+/* We want FDB entries added statically through the bridge command to persist
+ * across switch resets, which are a common thing during normal SJA1105
+ * operation. So we have to back them up in the static configuration tables
+ * and hence apply them on next static config upload... yay!
+ */
+static int
+sja1105_static_fdb_change(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested,
+ bool keep)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int rc, match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ match = sja1105_find_static_fdb_entry(priv, port, requested);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!keep)
+ return 0;
+
+ /* No match => new entry */
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it may be new memory) */
+ l2_lookup = table->entries;
+
+ /* We have a match.
+ * If the job was to add this FDB entry, it's already done (mostly
+ * anyway, since the port forwarding mask may have changed, case in
+ * which we update it).
+ * Otherwise we have to delete it.
+ */
+ if (keep) {
+ l2_lookup[match] = *requested;
+ return 0;
+ }
+
+ /* To remove, the strategy is to overwrite the element with
+ * the last one, and then reduce the array size by 1
+ */
+ l2_lookup[match] = l2_lookup[table->entry_count - 1];
+ return sja1105_table_resize(table, table->entry_count - 1);
+}
+
+/* First-generation switches have a 4-way set associative TCAM that
+ * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
+ * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
+ * For the placement of a newly learnt FDB entry, the switch selects the bin
+ * based on a hash function, and the way within that bin incrementally.
+ */
+static int sja1105et_fdb_index(int bin, int way)
+{
+ return bin * SJA1105ET_FDB_BIN_SIZE + way;
+}
+
+static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
+{
+ int way;
+
+ for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* Skip unused entries, optionally marking them
+ * into the return value
+ */
+ if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup)) {
+ if (last_unused)
+ *last_unused = way;
+ continue;
+ }
+
+ if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
+ l2_lookup.vlanid == vid) {
+ if (match)
+ *match = l2_lookup;
+ return way;
+ }
+ }
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int last_unused = -1;
+ int start, end, i;
+ int bin, way, rc;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
+ if (way >= 0) {
+ /* We have an FDB entry. Is our port in the destination
+ * mask? If yes, we need to do nothing. If not, we need
+ * to rewrite the entry by adding this port to it.
+ */
+ if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
+ return 0;
+ l2_lookup.destports |= BIT(port);
+ } else {
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* We don't have an FDB entry. We construct a new one and
+ * try to find a place for it within the FDB table.
+ */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.destports = BIT(port);
+ l2_lookup.vlanid = vid;
+
+ if (last_unused >= 0) {
+ way = last_unused;
+ } else {
+ /* Bin is full, need to evict somebody.
+ * Choose victim at random. If you get these messages
+ * often, you may need to consider changing the
+ * distribution function:
+ * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
+ */
+ get_random_bytes(&way, sizeof(u8));
+ way %= SJA1105ET_FDB_BIN_SIZE;
+ dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
+ bin, addr, way);
+ /* Evict entry */
+ sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, NULL, false);
+ }
+ }
+ l2_lookup.lockeds = true;
+ l2_lookup.index = sja1105et_fdb_index(bin, way);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* Invalidate a dynamically learned entry if that exists */
+ start = sja1105et_fdb_index(bin, 0);
+ end = sja1105et_fdb_index(bin, way);
+
+ for (i = start; i < end; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &tmp);
+ if (rc == -ENOENT)
+ continue;
+ if (rc)
+ return rc;
+
+ if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL, false);
+ if (rc)
+ return rc;
+
+ break;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int index, bin, way, rc;
+ bool keep;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
+ if (way < 0)
+ return 0;
+ index = sja1105et_fdb_index(bin, way);
+
+ /* We have an FDB entry. Is our port in the destination mask? If yes,
+ * we need to remove it. If the resulting port mask becomes empty, we
+ * need to completely evict the FDB entry.
+ * Otherwise we just write it back.
+ */
+ l2_lookup.destports &= ~BIT(port);
+
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ /* Search for an existing entry in the FDB table */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc == 0) {
+ /* Found a static entry and this port is already in the entry's
+ * port mask => job done
+ */
+ if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
+ return 0;
+ /* l2_lookup.index is populated by the switch in case it
+ * found something.
+ */
+ l2_lookup.destports |= BIT(port);
+ goto skip_finding_an_index;
+ }
+
+ /* Not found, so try to find an unused spot in the FDB.
+ * This is slightly inefficient because the strategy is knock-knock at
+ * every possible position from 0 to 1023.
+ */
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL);
+ if (rc < 0)
+ break;
+ }
+ if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
+ dev_err(ds->dev, "FDB is full, cannot add entry.\n");
+ return -EINVAL;
+ }
+ l2_lookup.index = i;
+
+skip_finding_an_index:
+ l2_lookup.lockeds = true;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* The switch learns dynamic entries and looks up the FDB left to
+ * right. It is possible that our addition was concurrent with the
+ * dynamic learning of the same address, so now that the static entry
+ * has been installed, we are certain that address learning for this
+ * particular address has been turned off, so the dynamic entry either
+ * is in the FDB at an index smaller than the static one, or isn't (it
+ * can also be at a larger index, but in that case it is inactive
+ * because the static FDB entry will match first, and the dynamic one
+ * will eventually age out). Search for a dynamically learned address
+ * prior to our static one and invalidate it.
+ */
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "port %d failed to read back entry for %pM vid %d: %pe\n",
+ port, addr, vid, ERR_PTR(rc));
+ return rc;
+ }
+
+ if (tmp.index < l2_lookup.index) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ tmp.index, NULL, false);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ bool keep;
+ int rc;
+
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc < 0)
+ return 0;
+
+ l2_lookup.destports &= ~BIT(port);
+
+ /* Decide whether we remove just this port from the FDB entry,
+ * or if we remove it completely.
+ */
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
+ * so the switch still does some VLAN processing internally.
+ * But Shared VLAN Learning (SVL) is also active, and it will take
+ * care of autonomous forwarding between the unique pvid's of each
+ * port. Here we just make sure that users can't add duplicate FDB
+ * entries when in this mode - the actual VID doesn't matter except
+ * for what gets printed in 'bridge fdb show'. In the case of zero,
+ * no VID gets printed at all.
+ */
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
+ vid = 0;
+
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
+ vid = 0;
+
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+}
+
+static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(dev, "Failed to dump FDB: %d\n", rc);
+ return rc;
+ }
+
+ /* FDB dump callback is per port. This means we have to
+ * disregard a valid entry if it's not for this port, even if
+ * only to revisit it later. This is inefficient because the
+ * 1024-sized FDB table needs to be traversed 4 times through
+ * SPI during a 'bridge fdb show' command.
+ */
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ /* We need to hide the dsa_8021q VLANs from the user. */
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ l2_lookup.vlanid = 0;
+ rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/* This callback needs to be present */
+static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return 0;
+}
+
+static void sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+ struct net_device *br, bool member)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct sja1105_private *priv = ds->priv;
+ int i, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ /* For the ports already under the bridge, only one thing needs
+ * to be done, and that is to add this port to their
+ * reachability domain. So we can perform the SPI write for
+ * them immediately. However, for this port itself (the one
+ * that is new to the bridge), we need to add all other ports
+ * to its reachability domain. So we do that incrementally in
+ * this loop, and perform the SPI write only at the end, once
+ * the domain contains all other bridge ports.
+ */
+ if (i == port)
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev != br)
+ continue;
+ sja1105_port_allow_traffic(l2_fwd, i, port, member);
+ sja1105_port_allow_traffic(l2_fwd, port, i, member);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ i, &l2_fwd[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+}
+
+static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ /* From UM10944 description of DRPDTAG (why put this there?):
+ * "Management traffic flows to the port regardless of the state
+ * of the INGRESS flag". So BPDUs are still be allowed to pass.
+ * At the moment no difference between DISABLED and BLOCKING.
+ */
+ mac[port].ingress = false;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LISTENING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LEARNING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = true;
+ break;
+ case BR_STATE_FORWARDING:
+ mac[port].ingress = true;
+ mac[port].egress = true;
+ mac[port].dyn_learn = true;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ return sja1105_bridge_member(ds, port, br, true);
+}
+
+static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ sja1105_bridge_member(ds, port, br, false);
+}
+
+#define BYTES_PER_KBIT (1000LL / 8)
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (cbs->port == port && cbs->prio == prio) {
+ memset(cbs, 0, sizeof(*cbs));
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
+ i, cbs, true);
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *offload)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
+ int index;
+
+ if (!offload->enable)
+ return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
+
+ cbs = &priv->cbs[index];
+ cbs->port = port;
+ cbs->prio = offload->queue;
+ /* locredit and sendslope are negative by definition. In hardware,
+ * positive values must be provided, and the negative sign is implicit.
+ */
+ cbs->credit_hi = offload->hicredit;
+ cbs->credit_lo = abs(offload->locredit);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
+ /* Convert the negative values from 64-bit 2's complement
+ * to 32-bit 2's complement (for the case of 0x80000000 whose
+ * negative is still negative).
+ */
+ cbs->credit_lo &= GENMASK_ULL(31, 0);
+ cbs->send_slope &= GENMASK_ULL(31, 0);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
+ true);
+}
+
+static int sja1105_reload_cbs(struct sja1105_private *priv)
+{
+ int rc = 0, i;
+
+ /* The credit based shapers are only allocated if
+ * CONFIG_NET_SCH_CBS is enabled.
+ */
+ if (!priv->cbs)
+ return 0;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (!cbs->idle_slope && !cbs->send_slope)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
+ true);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+ [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+ [SJA1105_VIRTUAL_LINKS] = "Virtual links",
+};
+
+/* For situations where we need to change a setting at runtime that is only
+ * available through the static configuration, resetting the switch in order
+ * to upload the new static config is unavoidable. Back up the settings we
+ * modify at runtime (currently only MAC) and restore them after uploading,
+ * such that this operation is relatively seamless.
+ */
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
+{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
+ struct sja1105_mac_config_entry *mac;
+ int speed_mbps[SJA1105_NUM_PORTS];
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
+ u16 bmcr = 0;
+ int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->mgmt_lock);
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
+ * switch wants to see in the static config in order to allow us to
+ * change it through the dynamic interface later.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ speed_mbps[i] = sja1105_speed[mac[i].speed];
+ mac[i].speed = SJA1105_SPEED_AUTO;
+ }
+
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
+ bmcr = sja1105_sgmii_read(priv, MII_BMCR);
+
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ /* Reset switch and send updated static configuration */
+ rc = sja1105_static_config_upload(priv);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+out_unlock_ptp:
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
+
+ /* Configure the CGU (PLLs) for MII and RMII PHYs.
+ * For these interfaces there is no dynamic configuration
+ * needed, since PLLs have same settings at all speeds.
+ */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
+ bool an_enabled = !!(bmcr & BMCR_ANENABLE);
+
+ sja1105_sgmii_pcs_config(priv, an_enabled, false);
+
+ if (!an_enabled) {
+ int speed = SPEED_UNKNOWN;
+
+ if (bmcr & BMCR_SPEED1000)
+ speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+ }
+ }
+
+ rc = sja1105_reload_cbs(priv);
+ if (rc < 0)
+ goto out;
+out:
+ mutex_unlock(&priv->mgmt_lock);
+
+ return rc;
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port, struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port, rc;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return 0;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
+ port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
+ if (rc)
+ return rc;
+
+ rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx,
+ port);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port,
+ struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
+
+ dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx, port);
+ }
+}
+
+static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
+ if (rc)
+ return rc;
+
+ dev_info(ds->dev, "%s switch tagging\n",
+ enabled ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_SJA1105;
+}
+
+static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
+{
+ int subvlan;
+
+ if (pvid)
+ return 0;
+
+ for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == VLAN_N_VID)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == vid)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
+ int port, u16 vid)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+
+ return sja1105_find_subvlan(sp->subvlan_map, vid);
+}
+
+static void sja1105_init_subvlan_map(u16 *subvlan_map)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ subvlan_map[subvlan] = VLAN_N_VID;
+}
+
+static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
+ u16 *subvlan_map)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = subvlan_map[subvlan];
+}
+
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int
+sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
+ int count, int from_port, u16 from_vid,
+ u16 to_vid)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (retagging[i].ing_port == BIT(from_port) &&
+ retagging[i].vlan_ing == from_vid &&
+ retagging[i].vlan_egr == to_vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_commit_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int num_retagging)
+{
+ struct sja1105_retagging_entry *retagging;
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ int num_vlans = 0;
+ int rc, i, k = 0;
+
+ /* VLAN table */
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ vlan = table->entries;
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ int match = sja1105_is_vlan_configured(priv, i);
+
+ if (new_vlan[i].vlanid != VLAN_N_VID)
+ num_vlans++;
+
+ if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
+ /* Was there before, no longer is. Delete */
+ dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &vlan[match], false);
+ if (rc < 0)
+ return rc;
+ } else if (new_vlan[i].vlanid != VLAN_N_VID) {
+ /* Nothing changed, don't do anything */
+ if (match >= 0 &&
+ vlan[match].vlanid == new_vlan[i].vlanid &&
+ vlan[match].tag_port == new_vlan[i].tag_port &&
+ vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
+ vlan[match].vmemb_port == new_vlan[i].vmemb_port)
+ continue;
+ /* Update entry */
+ dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &new_vlan[i],
+ true);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_vlans;
+ vlan = table->entries;
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ if (new_vlan[i].vlanid == VLAN_N_VID)
+ continue;
+ vlan[k++] = new_vlan[i];
+ }
+
+ /* VLAN Retagging Table */
+ table = &priv->static_config.tables[BLK_IDX_RETAGGING];
+ retagging = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], false);
+ if (rc)
+ return rc;
+ }
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_retagging;
+ retagging = table->entries;
+
+ for (i = 0; i < num_retagging; i++) {
+ retagging[i] = new_retagging[i];
+
+ /* Update entry */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+struct sja1105_crosschip_vlan {
+ struct list_head list;
+ u16 vid;
+ bool untagged;
+ int port;
+ int other_port;
+ struct dsa_8021q_context *other_ctx;
+};
+
+struct sja1105_crosschip_switch {
+ struct list_head list;
+ struct dsa_8021q_context *other_ctx;
+};
+
+static int sja1105_commit_pvid(struct sja1105_private *priv)
+{
+ struct sja1105_bridge_vlan *v;
+ struct list_head *vlan_list;
+ int rc = 0;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ vlan_list = &priv->bridge_vlans;
+ else
+ vlan_list = &priv->dsa_8021q_vlans;
+
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->pvid) {
+ rc = sja1105_pvid_apply(priv, v->port, v->vid);
+ if (rc)
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+sja1105_build_bridge_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
+{
+ struct sja1105_bridge_vlan *v;
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
+
+ return 0;
+}
+
+static int
+sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
+{
+ struct sja1105_bridge_vlan *v;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
+
+ return 0;
+}
+
+static int sja1105_build_subvlans(struct sja1105_private *priv,
+ u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_bridge_vlan *v;
+ int k = *num_retagging;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int upstream = dsa_upstream_port(priv->ds, v->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ /* Only sub-VLANs on user ports need to be applied.
+ * Bridge VLANs also include VLANs added automatically
+ * by DSA on the CPU port.
+ */
+ if (!dsa_is_user_port(priv->ds, v->port))
+ continue;
+
+ subvlan = sja1105_find_subvlan(subvlan_map[v->port],
+ v->vid);
+ if (subvlan < 0) {
+ subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
+ v->pvid);
+ if (subvlan < 0) {
+ dev_err(priv->ds->dev, "No more free subvlans\n");
+ return -ENOSPC;
+ }
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
+
+ /* @v->vid on @v->port needs to be retagged to @rx_vid
+ * on @upstream. Assume @v->vid on @v->port and on
+ * @upstream was already configured by the previous
+ * iteration over bridge_vlans.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN
+ */
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ /* But it's always tagged towards the CPU */
+ new_vlan[match].tag_port |= BIT(upstream);
+
+ /* The Retagging Table generates packet *clones* with
+ * the new VLAN. This is a very odd hardware quirk
+ * which we need to suppress by dropping the original
+ * packet.
+ * Deny egress of the original VLAN towards the CPU
+ * port. This will force the switch to drop it, and
+ * we'll see only the retagged packets.
+ */
+ match = v->vid;
+ new_vlan[match].vlan_bc &= ~BIT(upstream);
+
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = v->vid;
+ new_retagging[k].vlan_egr = rx_vid;
+ new_retagging[k].ing_port = BIT(v->port);
+ new_retagging[k].egr_port = BIT(upstream);
+ if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ return -ENOSPC;
+ }
+
+ subvlan_map[v->port][subvlan] = v->vid;
+ }
+
+ *num_retagging = k;
+
+ return 0;
+}
+
+/* Sadly, in crosschip scenarios where the CPU port is also the link to another
+ * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
+ * the CPU port of neighbour switches.
+ */
+static int
+sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_crosschip_vlan *tmp, *pos;
+ struct dsa_8021q_crosschip_link *c;
+ struct sja1105_bridge_vlan *v, *w;
+ struct list_head crosschip_vlans;
+ int k = *num_retagging;
+ int rc = 0;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ INIT_LIST_HEAD(&crosschip_vlans);
+
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
+ struct sja1105_private *other_priv = c->other_ctx->ds->priv;
+
+ if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ continue;
+
+ /* Crosschip links are also added to the CPU ports.
+ * Ignore those.
+ */
+ if (!dsa_is_user_port(priv->ds, c->port))
+ continue;
+ if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
+ continue;
+
+ /* Search for VLANs on the remote port */
+ list_for_each_entry(v, &other_priv->bridge_vlans, list) {
+ bool already_added = false;
+ bool we_have_it = false;
+
+ if (v->port != c->other_port)
+ continue;
+
+ /* If @v is a pvid on @other_ds, it does not need
+ * re-retagging, because its SVL field is 0 and we
+ * already allow that, via the dsa_8021q crosschip
+ * links.
+ */
+ if (v->pvid)
+ continue;
+
+ /* Search for the VLAN on our local port */
+ list_for_each_entry(w, &priv->bridge_vlans, list) {
+ if (w->port == c->port && w->vid == v->vid) {
+ we_have_it = true;
+ break;
+ }
+ }
+
+ if (!we_have_it)
+ continue;
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ if (tmp->vid == v->vid &&
+ tmp->untagged == v->untagged &&
+ tmp->port == c->port &&
+ tmp->other_port == v->port &&
+ tmp->other_ctx == c->other_ctx) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ tmp->vid = v->vid;
+ tmp->port = c->port;
+ tmp->other_port = v->port;
+ tmp->other_ctx = c->other_ctx;
+ tmp->untagged = v->untagged;
+ list_add(&tmp->list, &crosschip_vlans);
+ }
+ }
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
+ int upstream = dsa_upstream_port(priv->ds, tmp->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ subvlan = sja1105_find_committed_subvlan(other_priv,
+ tmp->other_port,
+ tmp->vid);
+ /* If this happens, it's a bug. The neighbour switch does not
+ * have a subvlan for tmp->vid on tmp->other_port, but it
+ * should, since we already checked for its vlan_state.
+ */
+ if (WARN_ON(subvlan < 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
+ tmp->other_port,
+ subvlan);
+
+ /* The @rx_vid retagged from @tmp->vid on
+ * {@tmp->other_ds, @tmp->other_port} needs to be
+ * re-retagged to @tmp->vid on the way back to us.
+ *
+ * Assume the original @tmp->vid is already configured
+ * on this local switch, otherwise we wouldn't be
+ * retagging its subvlan on the other switch in the
+ * first place. We just need to add a reverse retagging
+ * rule for @rx_vid and install @rx_vid on our ports.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(tmp->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN. And towards the CPU, it doesn't
+ * really matter, because @rx_vid will only receive
+ * traffic on that port. For consistency with other dsa_8021q
+ * VLANs, we'll keep the CPU port tagged.
+ */
+ if (!tmp->untagged)
+ new_vlan[match].tag_port |= BIT(tmp->port);
+ new_vlan[match].tag_port |= BIT(upstream);
+ /* Deny egress of @rx_vid towards our front-panel port.
+ * This will force the switch to drop it, and we'll see
+ * only the re-retagged packets (having the original,
+ * pre-initial-retagging, VLAN @tmp->vid).
+ */
+ new_vlan[match].vlan_bc &= ~BIT(tmp->port);
+
+ /* On reverse retagging, the same ingress VLAN goes to multiple
+ * ports. So we have an opportunity to create composite rules
+ * to not waste the limited space in the retagging table.
+ */
+ k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
+ upstream, rx_vid, tmp->vid);
+ if (k < 0) {
+ if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ rc = -ENOSPC;
+ goto out;
+ }
+ k = (*num_retagging)++;
+ }
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = rx_vid;
+ new_retagging[k].vlan_egr = tmp->vid;
+ new_retagging[k].ing_port = BIT(upstream);
+ new_retagging[k].egr_port |= BIT(tmp->port);
+ }
+
+out:
+ list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
+
+static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
+{
+ struct sja1105_crosschip_switch *s, *pos;
+ struct list_head crosschip_switches;
+ struct dsa_8021q_crosschip_link *c;
+ int rc = 0;
+
+ INIT_LIST_HEAD(&crosschip_switches);
+
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
+ bool already_added = false;
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ if (s->other_ctx == c->other_ctx) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ s->other_ctx = c->other_ctx;
+ list_add(&s->list, &crosschip_switches);
+ }
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ struct sja1105_private *other_priv = s->other_ctx->ds->priv;
+
+ rc = sja1105_build_vlan_table(other_priv, false);
+ if (rc)
+ goto out;
+ }
+
+out:
+ list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
+ list_del(&s->list);
+ kfree(s);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
+{
+ u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
+ struct sja1105_retagging_entry *new_retagging;
+ struct sja1105_vlan_lookup_entry *new_vlan;
+ struct sja1105_table *table;
+ int i, num_retagging = 0;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_vlan = kcalloc(VLAN_N_VID,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_vlan)
+ return -ENOMEM;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_retagging) {
+ kfree(new_vlan);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < VLAN_N_VID; i++)
+ new_vlan[i].vlanid = VLAN_N_VID;
+
+ for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
+ new_retagging[i].vlan_ing = VLAN_N_VID;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_init_subvlan_map(subvlan_map[i]);
+
+ /* Bridge VLANs */
+ rc = sja1105_build_bridge_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
+ * - RX VLANs
+ * - TX VLANs
+ * - Crosschip links
+ */
+ rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* Private VLANs necessary for dsa_8021q operation, which we need to
+ * determine on our own:
+ * - Sub-VLANs
+ * - Sub-VLANs of crosschip switches
+ */
+ rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_pvid(priv);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
+
+ if (notify) {
+ rc = sja1105_notify_crosschip_switches(priv);
+ if (rc)
+ goto out;
+ }
+
+out:
+ kfree(new_vlan);
+ kfree(new_retagging);
+
+ return rc;
+}
+
+static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (vid_is_dsa_8021q(vid)) {
+ dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_vlan_state state;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ bool want_tagging;
+ u16 tpid, tpid2;
+ int rc;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering with active VL rules\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+ }
+
+ if (enabled) {
+ /* Enable VLAN filtering. */
+ tpid = ETH_P_8021Q;
+ tpid2 = ETH_P_8021AD;
+ } else {
+ /* Disable VLAN filtering. */
+ tpid = ETH_P_SJA1105;
+ tpid2 = ETH_P_SJA1105;
+ }
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (enabled)
+ sp->xmit_tpid = priv->info->qinq_tpid;
+ else
+ sp->xmit_tpid = ETH_P_SJA1105;
+ }
+
+ if (!enabled)
+ state = SJA1105_VLAN_UNAWARE;
+ else if (priv->best_effort_vlan_filtering)
+ state = SJA1105_VLAN_BEST_EFFORT;
+ else
+ state = SJA1105_VLAN_FILTERING_FULL;
+
+ if (priv->vlan_state == state)
+ return 0;
+
+ priv->vlan_state = state;
+ want_tagging = (state == SJA1105_VLAN_UNAWARE ||
+ state == SJA1105_VLAN_BEST_EFFORT);
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
+ general_params->tpid2 = tpid2;
+ /* When VLAN filtering is on, we need to at least be able to
+ * decode management traffic through the "backup plan".
+ */
+ general_params->incl_srcpt1 = enabled;
+ general_params->incl_srcpt0 = enabled;
+
+ want_tagging = priv->best_effort_vlan_filtering || !enabled;
+
+ /* VLAN filtering => independent VLAN learning.
+ * No VLAN filtering (or best effort) => shared VLAN learning.
+ *
+ * In shared VLAN learning mode, untagged traffic still gets
+ * pvid-tagged, and the FDB table gets populated with entries
+ * containing the "real" (pvid or from VLAN tag) VLAN ID.
+ * However the switch performs a masked L2 lookup in the FDB,
+ * effectively only looking up a frame's DMAC (and not VID) for the
+ * forwarding decision.
+ *
+ * This is extremely convenient for us, because in modes with
+ * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
+ * each front panel port. This is good for identification but breaks
+ * learning badly - the VID of the learnt FDB entry is unique, aka
+ * no frames coming from any other port are going to have it. So
+ * for forwarding purposes, this is as though learning was broken
+ * (all frames get flooded).
+ */
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+ l2_lookup_params->shared_learn = want_tagging;
+
+ sja1105_frame_memory_partitioning(priv);
+
+ rc = sja1105_build_vlan_table(priv, false);
+ if (rc)
+ return rc;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
+ if (rc)
+ dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
+
+ /* Switch port identification based on 802.1Q is only passable
+ * if we are not under a vlan_filtering bridge. So make sure
+ * the two configurations are mutually exclusive (of course, the
+ * user may know better, i.e. best_effort_vlan_filtering).
+ */
+ return sja1105_setup_8021q_tagging(ds, want_tagging);
+}
+
+/* Returns number of VLANs added (0 or 1) on success,
+ * or a negative error code.
+ */
+static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags, struct list_head *vlan_list)
+{
+ bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct sja1105_bridge_vlan *v;
+
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ /* Already added */
+ if (v->untagged == untagged && v->pvid == pvid)
+ /* Nothing changed */
+ return 0;
+
+ /* It's the same VLAN, but some of the flags changed
+ * and the user did not bother to delete it first.
+ * Update it and trigger sja1105_build_vlan_table.
+ */
+ v->untagged = untagged;
+ v->pvid = pvid;
+ return 1;
+ }
+ }
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ return -ENOMEM;
+ }
+
+ v->port = port;
+ v->vid = vid;
+ v->untagged = untagged;
+ v->pvid = pvid;
+ list_add(&v->list, vlan_list);
+
+ return 1;
+}
+
+/* Returns number of VLANs deleted (0 or 1) */
+static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
+ struct list_head *vlan_list)
+{
+ struct sja1105_bridge_vlan *v, *n;
+
+ list_for_each_entry_safe(v, n, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ list_del(&v->list);
+ kfree(v);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
+ return;
+ if (rc > 0)
+ vlan_table_changed = true;
+ }
+
+ if (!vlan_table_changed)
+ return;
+
+ rc = sja1105_build_vlan_table(priv, true);
+ if (rc)
+ dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
+}
+
+static int sja1105_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
+ }
+
+ if (!vlan_table_changed)
+ return 0;
+
+ return sja1105_build_vlan_table(priv, true);
+}
+
+static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
+ if (rc <= 0)
+ return rc;
+
+ return sja1105_build_vlan_table(priv, true);
+}
+
+static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
+ if (!rc)
+ return 0;
+
+ return sja1105_build_vlan_table(priv, true);
+}
+
+static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
+ .vlan_add = sja1105_dsa_8021q_vlan_add,
+ .vlan_del = sja1105_dsa_8021q_vlan_del,
+};
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_parse_dt(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
+
+ /* Error out early if internal delays are required through DT
+ * and we can't apply them.
+ */
+ rc = sja1105_parse_rgmii_delays(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "RGMII delay not supported\n");
+ return rc;
+ }
+
+ rc = sja1105_ptp_clock_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
+ return rc;
+ }
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ goto out_ptp_clock_unregister;
+ }
+ /* Configure the CGU (PHY link modes and speeds) */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
+ goto out_static_config_free;
+ }
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Advertise the 8 egress queues */
+ ds->num_tx_queues = SJA1105_NUM_TC;
+
+ ds->mtu_enforcement_ingress = true;
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ rc = sja1105_devlink_setup(ds);
+ if (rc < 0)
+ goto out_static_config_free;
+
+ /* The DSA/switchdev model brings up switch ports in standalone mode by
+ * default, and that means vlan_filtering is 0 since they're not under
+ * a bridge, so it's safe to set up switch tagging at this time.
+ */
+ rtnl_lock();
+ rc = sja1105_setup_8021q_tagging(ds, true);
+ rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
+
+ return rc;
+}
+
+static void sja1105_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_bridge_vlan *v, *n;
+ int port;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ if (sp->xmit_worker)
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+
+ sja1105_devlink_teardown(ds);
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+ sja1105_ptp_clock_unregister(ds);
+ sja1105_static_config_free(&priv->static_config);
+
+ list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
+
+ list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
+}
+
+static int sja1105_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct net_device *slave;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ slave = dsa_to_port(ds, port)->slave;
+
+ slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static void sja1105_port_disable(struct dsa_switch *ds, int port)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ kthread_cancel_work_sync(&sp->xmit_work);
+ skb_queue_purge(&sp->xmit_queue);
+}
+
+static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
+ struct sk_buff *skb, bool takets)
+{
+ struct sja1105_mgmt_entry mgmt_route = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct ethhdr *hdr;
+ int timeout = 10;
+ int rc;
+
+ hdr = eth_hdr(skb);
+
+ mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
+ mgmt_route.destports = BIT(port);
+ mgmt_route.enfport = 1;
+ mgmt_route.tsreg = 0;
+ mgmt_route.takets = takets;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, true);
+ if (rc < 0) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ /* Transfer skb to the host port. */
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route);
+ if (rc < 0) {
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to poll for mgmt route\n");
+ continue;
+ }
+
+ /* UM10944: The ENFPORT flag of the respective entry is
+ * cleared when a match is found. The host can use this
+ * flag as an acknowledgment.
+ */
+ cpu_relax();
+ } while (mgmt_route.enfport && --timeout);
+
+ if (!timeout) {
+ /* Clean up the management route so that a follow-up
+ * frame may not match on it by mistake.
+ * This is only hardware supported on P/Q/R/S - on E/T it is
+ * a no-op and we are silently discarding the -EOPNOTSUPP.
+ */
+ sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, false);
+ dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#define work_to_port(work) \
+ container_of((work), struct sja1105_port, xmit_work)
+#define tagger_to_sja1105(t) \
+ container_of((t), struct sja1105_private, tagger_data)
+
+/* Deferred work is unfortunately necessary because setting up the management
+ * route cannot be done from atomit context (SPI transfer takes a sleepable
+ * lock on the bus)
+ */
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
+{
+ struct sja1105_port *sp = work_to_port(work);
+ struct sja1105_tagger_data *tagger_data = sp->data;
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ int port = sp - priv->ports;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
+
+ mutex_lock(&priv->mgmt_lock);
+
+ sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
+
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
+
+ mutex_unlock(&priv->mgmt_lock);
+ }
+}
+
+/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
+ * which cannot be reconfigured at runtime. So a switch reset is required.
+ */
+static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ unsigned int maxage;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+
+ maxage = SJA1105_AGEING_TIME_MS(ageing_time);
+
+ if (l2_lookup_params->maxage == maxage)
+ return 0;
+
+ l2_lookup_params->maxage = maxage;
+
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
+}
+
+static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += VLAN_HLEN;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[port].maxlen == new_mtu)
+ return 0;
+
+ policing[port].maxlen = new_mtu;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return sja1105_setup_tc_cbs(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
+ */
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
+ struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
+ }
+
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
+
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = SJA1105_NUM_PORTS;
+ }
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (ingress)
+ mac[from].ing_mirr = enabled;
+ else
+ mac[from].egr_mirr = enabled;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
+}
+
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
+}
+
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
+}
+
+static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ /* In hardware, every 8 microseconds the credit level is incremented by
+ * the value of RATE bytes divided by 64, up to a maximum of SMAX
+ * bytes.
+ */
+ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ 1000000);
+ policing[port].smax = policer->burst;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].smax = 65535;
+
+ sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .setup = sja1105_setup,
+ .teardown = sja1105_teardown,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .port_change_mtu = sja1105_change_mtu,
+ .port_max_mtu = sja1105_get_max_mtu,
+ .phylink_validate = sja1105_phylink_validate,
+ .phylink_mac_link_state = sja1105_mac_pcs_get_state,
+ .phylink_mac_config = sja1105_mac_config,
+ .phylink_mac_link_up = sja1105_mac_link_up,
+ .phylink_mac_link_down = sja1105_mac_link_down,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+ .get_ts_info = sja1105_get_ts_info,
+ .port_enable = sja1105_port_enable,
+ .port_disable = sja1105_port_disable,
+ .port_fdb_dump = sja1105_fdb_dump,
+ .port_fdb_add = sja1105_fdb_add,
+ .port_fdb_del = sja1105_fdb_del,
+ .port_bridge_join = sja1105_bridge_join,
+ .port_bridge_leave = sja1105_bridge_leave,
+ .port_stp_state_set = sja1105_bridge_stp_state_set,
+ .port_vlan_prepare = sja1105_vlan_prepare,
+ .port_vlan_filtering = sja1105_vlan_filtering,
+ .port_vlan_add = sja1105_vlan_add,
+ .port_vlan_del = sja1105_vlan_del,
+ .port_mdb_prepare = sja1105_mdb_prepare,
+ .port_mdb_add = sja1105_mdb_add,
+ .port_mdb_del = sja1105_mdb_del,
+ .port_hwtstamp_get = sja1105_hwtstamp_get,
+ .port_hwtstamp_set = sja1105_hwtstamp_set,
+ .port_rxtstamp = sja1105_port_rxtstamp,
+ .port_txtstamp = sja1105_port_txtstamp,
+ .port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
+ .port_policer_add = sja1105_port_policer_add,
+ .port_policer_del = sja1105_port_policer_del,
+ .cls_flower_add = sja1105_cls_flower_add,
+ .cls_flower_del = sja1105_cls_flower_del,
+ .cls_flower_stats = sja1105_cls_flower_stats,
+ .crosschip_bridge_join = sja1105_crosschip_bridge_join,
+ .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
+ .devlink_param_get = sja1105_devlink_param_get,
+ .devlink_param_set = sja1105_devlink_param_set,
+ .devlink_info_get = sja1105_devlink_info_get,
+};
+
+static const struct of_device_id sja1105_dt_ids[];
+
+static int sja1105_check_device_id(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
+ struct device *dev = &priv->spidev->dev;
+ const struct of_device_id *match;
+ u32 device_id;
+ u64 part_no;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
+
+ for (match = sja1105_dt_ids; match->compatible[0]; match++) {
+ const struct sja1105_info *info = match->data;
+
+ /* Is what's been probed in our match table at all? */
+ if (info->device_id != device_id || info->part_no != part_no)
+ continue;
+
+ /* But is it what's in the device tree? */
+ if (priv->info->device_id != device_id ||
+ priv->info->part_no != part_no) {
+ dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
+ priv->info->name, info->name);
+ /* It isn't. No problem, pick that up. */
+ priv->info = info;
+ }
+
+ return 0;
+ }
+
+ dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
+ device_id, part_no);
+
+ return -ENODEV;
+}
+
+static int sja1105_probe(struct spi_device *spi)
+{
+ struct sja1105_tagger_data *tagger_data;
+ struct device *dev = &spi->dev;
+ struct sja1105_private *priv;
+ struct dsa_switch *ds;
+ int rc, port;
+
+ if (!dev->of_node) {
+ dev_err(dev, "No DTS bindings for SJA1105 driver\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Configure the optional reset pin and bring up switch */
+ priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ dev_dbg(dev, "reset-gpios not defined, ignoring\n");
+ else
+ sja1105_hw_reset(priv->reset_gpio, 1, 1);
+
+ /* Populate our driver private structure (priv) based on
+ * the device tree node that was probed (spi)
+ */
+ priv->spidev = spi;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ rc = spi_setup(spi);
+ if (rc < 0) {
+ dev_err(dev, "Could not init SPI\n");
+ return rc;
+ }
+
+ priv->info = of_device_get_match_data(dev);
+
+ /* Detect hardware device */
+ rc = sja1105_check_device_id(priv);
+ if (rc < 0) {
+ dev_err(dev, "Device ID check failed: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->num_ports = SJA1105_NUM_PORTS;
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+
+ tagger_data = &priv->tagger_data;
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->mgmt_lock);
+
+ priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
+ GFP_KERNEL);
+ if (!priv->dsa_8021q_ctx)
+ return -ENOMEM;
+
+ priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
+ priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
+ priv->dsa_8021q_ctx->ds = ds;
+
+ INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
+ INIT_LIST_HEAD(&priv->bridge_vlans);
+ INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
+
+ sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
+
+ rc = dsa_register_switch(priv->ds);
+ if (rc)
+ return rc;
+
+ if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
+ priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
+ sizeof(struct sja1105_cbs_entry),
+ GFP_KERNEL);
+ if (!priv->cbs) {
+ rc = -ENOMEM;
+ goto out_unregister_switch;
+ }
+ }
+
+ /* Connections between dsa_port and sja1105_port */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *slave;
+ int subvlan;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp->priv = sp;
+ sp->dp = dp;
+ sp->data = tagger_data;
+ slave = dp->slave;
+ kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
+ sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
+ slave->name);
+ if (IS_ERR(sp->xmit_worker)) {
+ rc = PTR_ERR(sp->xmit_worker);
+ dev_err(ds->dev,
+ "failed to create deferred xmit thread: %d\n",
+ rc);
+ goto out_destroy_workers;
+ }
+ skb_queue_head_init(&sp->xmit_queue);
+ sp->xmit_tpid = ETH_P_SJA1105;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = VLAN_N_VID;
+ }
+
+ return 0;
+
+out_destroy_workers:
+ while (port-- > 0) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+
+out_unregister_switch:
+ dsa_unregister_switch(ds);
+
+ return rc;
+}
+
+static int sja1105_remove(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(priv->ds);
+ return 0;
+}
+
+static const struct of_device_id sja1105_dt_ids[] = {
+ { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
+ { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
+ { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
+ { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
+ { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
+ { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+
+static struct spi_driver sja1105_driver = {
+ .driver = {
+ .name = "sja1105",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sja1105_dt_ids),
+ },
+ .probe = sja1105_probe,
+ .remove = sja1105_remove,
+};
+
+module_spi_driver(sja1105_driver);
+
+MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
+MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
+MODULE_DESCRIPTION("SJA1105 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
new file mode 100644
index 000000000..1b90570b2
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include "sja1105.h"
+
+/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
+ * therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
+ * Set the maximum supported ppb to a round value smaller than the maximum.
+ *
+ * Percentually speaking, this is a +/- 0.032x adjustment of the
+ * free-running counter (0.968x to 1.032x).
+ */
+#define SJA1105_MAX_ADJ_PPB 32000000
+#define SJA1105_SIZE_PTP_CMD 4
+
+/* PTPSYNCTS has no interrupt or update mechanism, because the intended
+ * hardware use case is for the timestamp to be collected synchronously,
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
+ */
+#define SJA1105_EXTTS_INTERVAL (HZ / 6)
+
+/* This range is actually +/- SJA1105_MAX_ADJ_PPB
+ * divided by 1000 (ppb -> ppm) and with a 16-bit
+ * "fractional" part (actually fixed point).
+ * |
+ * v
+ * Convert scaled_ppm from the +/- ((10^6) << 16) range
+ * into the +/- (1 << 31) range.
+ *
+ * This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
+ * and defines the scaling factor between scaled_ppm and the actual
+ * frequency adjustments of the PHC.
+ *
+ * ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
+ * simplifies to
+ * ptpclkrate = scaled_ppm * 2^9 / 5^6
+ */
+#define SJA1105_CC_MULT_NUM (1 << 9)
+#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
+
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
+};
+
+#define extts_to_data(t) \
+ container_of((t), struct sja1105_ptp_data, extts_timer)
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
+static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->send_meta1 = on;
+ general_params->send_meta0 = on;
+
+ /* Initialize the meta state machine to a known state */
+ if (priv->tagger_data.stampable_skb) {
+ kfree_skb(priv->tagger_data.stampable_skb);
+ priv->tagger_data.stampable_skb = NULL;
+ }
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+
+ return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
+}
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+ bool rx_on;
+ int rc;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->ports[port].hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->ports[port].hwts_tx_en = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_on = false;
+ break;
+ default:
+ rx_on = true;
+ break;
+ }
+
+ if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
+ rc = sja1105_change_rxtstamping(priv, rx_on);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to change RX timestamping: %d\n", rc);
+ return rc;
+ }
+ if (rx_on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->ports[port].hwts_tx_en)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ /* Called during cleanup */
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
+ return 0;
+}
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ const struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
+
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
+
+ return rc;
+}
+
+/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
+ * around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
+ * seconds).
+ *
+ * This receives the RX or TX MAC timestamps, provided by hardware as
+ * the lower bits of the cycle counter, sampled at the time the timestamp was
+ * collected.
+ *
+ * To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
+ * read and the high-order bits are filled in.
+ *
+ * Must be called within one wraparound period of the partial timestamp since
+ * it was generated by the MAC.
+ */
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
+ u64 ts_reconstructed;
+
+ ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
+
+ /* Check lower bits of current cycle counter against the timestamp.
+ * If the current cycle counter is lower than the partial timestamp,
+ * then wraparound surely occurred and must be accounted for.
+ */
+ if ((now & partial_tstamp_mask) <= ts_partial)
+ ts_reconstructed -= (partial_tstamp_mask + 1);
+
+ return ts_reconstructed;
+}
+
+/* Reads the SPI interface for an egress timestamp generated by the switch
+ * for frames sent using management routes.
+ *
+ * SJA1105 E/T layout of the 4-byte SPI payload:
+ *
+ * 31 23 15 7 0
+ * | | | | |
+ * +-----+-----+-----+ ^
+ * ^ |
+ * | |
+ * 24-bit timestamp Update bit
+ *
+ *
+ * SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
+ *
+ * 31 23 15 7 0 63 55 47 39 32
+ * | | | | | | | | | |
+ * ^ +-----+-----+-----+-----+
+ * | ^
+ * | |
+ * Update bit 32-bit timestamp
+ *
+ * Notice that the update bit is in the same place.
+ * To have common code for E/T and P/Q/R/S for reading the timestamp,
+ * we need to juggle with the offset and the bit indices.
+ */
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int tstamp_bit_start, tstamp_bit_end;
+ int timeout = 10;
+ u8 packed_buf[8];
+ u64 update;
+ int rc;
+
+ do {
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(packed_buf, &update, 0, 0,
+ priv->info->ptpegr_ts_bytes);
+ if (update)
+ break;
+
+ usleep_range(10, 50);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Point the end bit to the second 32-bit word on P/Q/R/S,
+ * no-op on E/T.
+ */
+ tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
+ /* Shift the 24-bit timestamp on E/T to be collected from 31:8.
+ * No-op on P/Q/R/S.
+ */
+ tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
+ tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
+
+ *ts = 0;
+
+ sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
+ priv->info->ptpegr_ts_bytes);
+
+ return 0;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
+}
+
+static void sja1105_extts_poll(struct sja1105_private *priv)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+}
+
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ struct dsa_switch *ds = priv->ds;
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx_ni(skb);
+ }
+
+ if (ptp_data->extts_enabled)
+ sja1105_extts_poll(priv);
+
+ mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
+ return true;
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
+ * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!sp->hwts_tx_en)
+ return false;
+
+ return true;
+}
+
+static int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ cmd.resptp = 1;
+
+ dev_dbg(ds->dev, "Resetting PTP clock\n");
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
+
+ *ns = sja1105_ticks_to_ns(ticks);
+
+ return 0;
+}
+
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 now = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 ns = timespec64_to_ns(ts);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
+ s64 clkrate;
+ int rc;
+
+ clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
+ clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
+
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
+
+ sja1105_tas_adjfreq(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ struct sja1105_private *priv = ds->priv;
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data)
+{
+ unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) *
+ SJA1105_EXTTS_INTERVAL;
+
+ mod_timer(&ptp_data->extts_timer, expires);
+}
+
+static void sja1105_ptp_extts_timer(struct timer_list *t)
+{
+ struct sja1105_ptp_data *ptp_data = extts_to_data(t);
+
+ ptp_schedule_worker(ptp_data->clock, 0);
+
+ sja1105_ptp_extts_setup_timer(ptp_data);
+}
+
+static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
+ enum ptp_pin_function func)
+{
+ struct sja1105_avb_params_entry *avb;
+ enum ptp_pin_function old_func;
+
+ avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID ||
+ avb->cas_master)
+ old_func = PTP_PF_PEROUT;
+ else
+ old_func = PTP_PF_EXTTS;
+
+ if (func == old_func)
+ return 0;
+
+ avb->cas_master = (func == PTP_PF_PEROUT);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
+ true);
+}
+
+/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
+ * frequency f:
+ *
+ * NSEC_PER_SEC
+ * f = ----------------------
+ * (PTPPINDUR * 8 ns) * 2
+ */
+static int sja1105_per_out_enable(struct sja1105_private *priv,
+ struct ptp_perout_request *perout,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ /* We only support one channel */
+ if (perout->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
+ if (rc)
+ goto out;
+
+ if (on) {
+ struct timespec64 pin_duration_ts = {
+ .tv_sec = perout->period.sec,
+ .tv_nsec = perout->period.nsec,
+ };
+ struct timespec64 pin_start_ts = {
+ .tv_sec = perout->start.sec,
+ .tv_nsec = perout->start.nsec,
+ };
+ u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
+ u64 pin_start = timespec64_to_ns(&pin_start_ts);
+ u32 pin_duration32;
+ u64 now;
+
+ /* ptppindur: 32 bit register which holds the interval between
+ * 2 edges on PTP_CLK. So check for truncation which happens
+ * at periods larger than around 68.7 seconds.
+ */
+ pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
+ if (pin_duration > U32_MAX) {
+ rc = -ERANGE;
+ goto out;
+ }
+ pin_duration32 = pin_duration;
+
+ /* ptppins: 64 bit register which needs to hold a PTP time
+ * larger than the current time, otherwise the startptpcp
+ * command won't do anything. So advance the current time
+ * by a number of periods in a way that won't alter the
+ * phase offset.
+ */
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
+ if (rc < 0)
+ goto out;
+
+ pin_start = future_base_time(pin_start, pin_duration,
+ now + 1ull * NSEC_PER_SEC);
+ pin_start = ns_to_sja1105_ticks(pin_start);
+
+ rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
+ &pin_start, NULL);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
+ &pin_duration32, NULL);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (on)
+ cmd.startptpcp = true;
+ else
+ cmd.stopptpcp = true;
+
+ rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_extts_enable(struct sja1105_private *priv,
+ struct ptp_extts_request *extts,
+ bool on)
+{
+ int rc;
+
+ /* We only support one channel */
+ if (extts->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (extts->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* We can only enable time stamping on both edges, sadly. */
+ if ((extts->flags & PTP_STRICT_FLAGS) &&
+ (extts->flags & PTP_ENABLE_FEATURE) &&
+ (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+ if (rc)
+ return rc;
+
+ priv->ptp_data.extts_enabled = on;
+
+ if (on)
+ sja1105_ptp_extts_setup_timer(&priv->ptp_data);
+ else
+ del_timer_sync(&priv->ptp_data.extts_timer);
+
+ return 0;
+}
+
+static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc = -EOPNOTSUPP;
+
+ if (req->type == PTP_CLK_REQ_PEROUT)
+ rc = sja1105_per_out_enable(priv, &req->perout, on);
+ else if (req->type == PTP_CLK_REQ_EXTTS)
+ rc = sja1105_extts_enable(priv, &req->extts, on);
+
+ return rc;
+}
+
+static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+
+ if (chan != 0 || pin != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc sja1105_ptp_pin = {
+ .name = "ptp_clk",
+ .index = 0,
+ .func = PTP_PF_NONE,
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettimex64 = sja1105_ptp_gettimex,
+ .settime64 = sja1105_ptp_settime,
+ .enable = sja1105_ptp_enable,
+ .verify = sja1105_ptp_verify_pin,
+ .do_aux_work = sja1105_rxtstamp_work,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
+ .pin_config = &sja1105_ptp_pin,
+ .n_pins = 1,
+ .n_ext_ts = 1,
+ .n_per_out = 1,
+ };
+
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
+ spin_lock_init(&tagger_data->meta_lock);
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0);
+
+ return sja1105_ptp_reset(ds);
+}
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return;
+
+ del_timer_sync(&ptp_data->extts_timer);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
+}
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
new file mode 100644
index 000000000..3daa33e98
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_PTP_H
+#define _SJA1105_PTP_H
+
+#include <linux/timer.h>
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+/* This is not a preprocessor macro because the "ns" argument may or may not be
+ * s64 at caller side. This ensures it is properly type-cast before div_s64.
+ */
+static inline s64 ns_to_sja1105_delta(s64 ns)
+{
+ return div_s64(ns, 200);
+}
+
+static inline s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+struct sja1105_ptp_cmd {
+ u64 startptpcp; /* start toggling PTP_CLK pin */
+ u64 stopptpcp; /* stop toggling PTP_CLK pin */
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
+
+struct sja1105_ptp_data {
+ struct timer_list extts_timer;
+ struct sk_buff_head skb_rxtstamp_queue;
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ bool extts_enabled;
+ u64 ptpsyncts;
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
+
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
+
+#else
+
+struct sja1105_ptp_cmd;
+
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
+
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
+{
+}
+
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ return 0;
+}
+
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ return 0;
+}
+
+#define sja1105et_ptp_cmd_packing NULL
+
+#define sja1105pqrs_ptp_cmd_packing NULL
+
+#define sja1105_get_ts_info NULL
+
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+
+#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_sgmii.h b/drivers/net/dsa/sja1105/sja1105_sgmii.h
new file mode 100644
index 000000000..24d9bc046
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_sgmii.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_SGMII_H
+#define _SJA1105_SGMII_H
+
+#define SJA1105_SGMII_PORT 4
+
+/* DIGITAL_CONTROL_1 (address 1f8000h) */
+#define SJA1105_DC1 0x8000
+#define SJA1105_DC1_VS_RESET BIT(15)
+#define SJA1105_DC1_REMOTE_LOOPBACK BIT(14)
+#define SJA1105_DC1_EN_VSMMD1 BIT(13)
+#define SJA1105_DC1_POWER_SAVE BIT(11)
+#define SJA1105_DC1_CLOCK_STOP_EN BIT(10)
+#define SJA1105_DC1_MAC_AUTO_SW BIT(9)
+#define SJA1105_DC1_INIT BIT(8)
+#define SJA1105_DC1_TX_DISABLE BIT(4)
+#define SJA1105_DC1_AUTONEG_TIMER_OVRR BIT(3)
+#define SJA1105_DC1_BYP_POWERUP BIT(1)
+#define SJA1105_DC1_PHY_MODE_CONTROL BIT(0)
+
+/* DIGITAL_CONTROL_2 register (address 1f80E1h) */
+#define SJA1105_DC2 0x80e1
+#define SJA1105_DC2_TX_POL_INV_DISABLE BIT(4)
+#define SJA1105_DC2_RX_POL_INV BIT(0)
+
+/* DIGITAL_ERROR_CNT register (address 1f80E2h) */
+#define SJA1105_DEC 0x80e2
+#define SJA1105_DEC_ICG_EC_ENA BIT(4)
+#define SJA1105_DEC_CLEAR_ON_READ BIT(0)
+
+/* AUTONEG_CONTROL register (address 1f8001h) */
+#define SJA1105_AC 0x8001
+#define SJA1105_AC_MII_CONTROL BIT(8)
+#define SJA1105_AC_SGMII_LINK BIT(4)
+#define SJA1105_AC_PHY_MODE BIT(3)
+#define SJA1105_AC_AUTONEG_MODE(x) (((x) << 1) & GENMASK(2, 1))
+#define SJA1105_AC_AUTONEG_MODE_SGMII SJA1105_AC_AUTONEG_MODE(2)
+
+/* AUTONEG_INTR_STATUS register (address 1f8002h) */
+#define SJA1105_AIS 0x8002
+#define SJA1105_AIS_LINK_STATUS(x) (!!((x) & BIT(4)))
+#define SJA1105_AIS_SPEED(x) (((x) & GENMASK(3, 2)) >> 2)
+#define SJA1105_AIS_DUPLEX_MODE(x) (!!((x) & BIT(1)))
+#define SJA1105_AIS_COMPLETE(x) (!!((x) & BIT(0)))
+
+/* DEBUG_CONTROL register (address 1f8005h) */
+#define SJA1105_DC 0x8005
+#define SJA1105_DC_SUPPRESS_LOS BIT(4)
+#define SJA1105_DC_RESTART_SYNC BIT(0)
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
new file mode 100644
index 000000000..591c57347
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_RESET_CMD 4
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
+
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
+
+static void
+sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
+{
+ const int size = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &msg->access, 31, 31, size);
+ sja1105_pack(buf, &msg->read_count, 30, 25, size);
+ sja1105_pack(buf, &msg->address, 24, 4, size);
+}
+
+#define sja1105_hdr_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk))
+#define sja1105_chunk_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk) + 1)
+#define sja1105_hdr_buf(hdr_bufs, chunk) \
+ ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking @len bytes from *buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing @len bytes into *buf
+ */
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_chunk chunk = {
+ .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
+ .reg_addr = reg_addr,
+ .buf = buf,
+ };
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer *xfers;
+ int num_chunks;
+ int rc, i = 0;
+ u8 *hdr_bufs;
+
+ num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ /* One transfer for each message header, one for each message
+ * payload (chunk).
+ */
+ xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
+ GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
+
+ /* Packed buffers for the num_chunks SPI message headers,
+ * stored as a contiguous array
+ */
+ hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
+ GFP_KERNEL);
+ if (!hdr_bufs) {
+ kfree(xfers);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
+ struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
+ u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
+ struct spi_transfer *ptp_sts_xfer;
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ /* De-assert the chip select after each chunk. */
+ if (chunk.len)
+ chunk_xfer->cs_change = 1;
+ }
+
+ rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
+ if (rc < 0)
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+
+ kfree(hdr_bufs);
+ kfree(xfers);
+
+ return rc;
+}
+
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr
+ *
+ * The u64 *value is unpacked, meaning that it's stored in the native
+ * CPU endianness and directly usable by software running on the core.
+ */
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[8];
+ int rc;
+
+ if (rw == SPI_WRITE)
+ sja1105_pack(packed_buf, value, 63, 0, 8);
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
+
+ if (rw == SPI_READ)
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
+
+ return rc;
+}
+
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[4];
+ u64 tmp;
+ int rc;
+
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
+ }
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
+
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
+ }
+
+ return rc;
+}
+
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
+
+ sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
+}
+
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
+
+ sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
+}
+
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 inhibit_cmd;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, NULL);
+ if (rc < 0)
+ return rc;
+
+ if (tx_inhibited)
+ inhibit_cmd |= port_bitmap;
+ else
+ inhibit_cmd &= ~port_bitmap;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, NULL);
+}
+
+struct sja1105_status {
+ u64 configs;
+ u64 crcchkl;
+ u64 ids;
+ u64 crcchkg;
+};
+
+/* This is not reading the entire General Status area, which is also
+ * divergent between E/T and P/Q/R/S, but only the relevant bits for
+ * ensuring that the static config upload procedure was successful.
+ */
+static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
+{
+ /* So that addition translates to 4 bytes */
+ u32 *p = buf;
+
+ /* device_id is missing from the buffer, but we don't
+ * want to diverge from the manual definition of the
+ * register addresses, so we'll back off one step with
+ * the register pointer, and never access p[0].
+ */
+ p--;
+ sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
+ sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
+}
+
+static int sja1105_status_get(struct sja1105_private *priv,
+ struct sja1105_status *status)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[4];
+ int rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sja1105_status_unpack(packed_buf, status);
+
+ return 0;
+}
+
+/* Not const because unpacking priv->static_config into buffers and preparing
+ * for upload requires the recalculation of table CRCs and updating the
+ * structures with these.
+ */
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ struct sja1105_table_header final_header;
+ sja1105_config_valid_t valid;
+ char *final_header_ptr;
+ int crc_len;
+
+ valid = sja1105_static_config_check_valid(config);
+ if (valid != SJA1105_CONFIG_OK) {
+ dev_err(&priv->spidev->dev,
+ sja1105_static_config_error_msg[valid]);
+ return -EINVAL;
+ }
+
+ /* Write Device ID and config tables to config_buf */
+ sja1105_static_config_pack(config_buf, config);
+ /* Recalculate CRC of the last header (right now 0xDEADBEEF).
+ * Don't include the CRC field itself.
+ */
+ crc_len = buf_len - 4;
+ /* Read the whole table header */
+ final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
+ sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
+ /* Modify */
+ final_header.crc = sja1105_crc32(config_buf, crc_len);
+ /* Rewrite */
+ sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
+
+ return 0;
+}
+
+#define RETRIES 10
+
+int sja1105_static_config_upload(struct sja1105_private *priv)
+{
+ unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+ int buf_len;
+
+ buf_len = sja1105_static_config_get_length(config);
+ config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Invalid config, cannot upload\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* Prevent PHY jabbering during switch reset by inhibiting
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+ rc = sja1105_inhibit_tx(priv, port_bitmap, true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ /* Wait for an eventual egress packet to finish transmission
+ * (reach IFG). It is guaranteed that a second one will not
+ * follow, and that switch cold reset is thus safe
+ */
+ usleep_range(500, 1000);
+ do {
+ /* Put the SJA1105 in programming mode */
+ rc = priv->info->reset_cmd(priv->ds);
+ if (rc < 0) {
+ dev_err(dev, "Failed to reset switch, retrying...\n");
+ continue;
+ }
+ /* Wait for the switch to come out of reset */
+ usleep_range(1000, 5000);
+ /* Upload the static config to the device */
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Failed to upload config, retrying...\n");
+ continue;
+ }
+ /* Check that SJA1105 responded well to the config upload */
+ rc = sja1105_status_get(priv, &status);
+ if (rc < 0)
+ continue;
+
+ if (status.ids == 1) {
+ dev_err(dev, "Mismatch between hardware and static config "
+ "device id. Wrote 0x%llx, wants 0x%llx\n",
+ config->device_id, priv->info->device_id);
+ continue;
+ }
+ if (status.crcchkl == 1) {
+ dev_err(dev, "Switch reported invalid local CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.crcchkg == 1) {
+ dev_err(dev, "Switch reported invalid global CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.configs == 0) {
+ dev_err(dev, "Switch reported that configuration is "
+ "invalid, retrying...\n");
+ continue;
+ }
+ /* Success! */
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ rc = -EIO;
+ dev_err(dev, "Failed to upload config to device, giving up\n");
+ goto out;
+ } else if (retries != RETRIES) {
+ dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
+ }
+
+out:
+ kfree(config_buf);
+ return rc;
+}
+
+static struct sja1105_regs sja1105et_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x11,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM10944.pdf, Table 78, CGU Register overview */
+ .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
+ .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
+ .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
+ .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
+ .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
+ .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
+ .ptppinst = 0x14,
+ .ptppindur = 0x16,
+ .ptp_control = 0x17,
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkrate = 0x1A,
+ .ptpclkcorp = 0x1D,
+};
+
+static struct sja1105_regs sja1105pqrs_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x12,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
+ .sgmii = 0x1F0000,
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
+ /* UM11040.pdf, Table 114 */
+ .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
+ .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
+ .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
+ .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
+ .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
+ .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
+ .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
+ .ptppinst = 0x15,
+ .ptppindur = 0x17,
+ .ptp_control = 0x18,
+ .ptpclkval = 0x19,
+ .ptpclkrate = 0x1B,
+ .ptpclkcorp = 0x1E,
+ .ptpsyncts = 0x1F,
+};
+
+const struct sja1105_info sja1105e_info = {
+ .device_id = SJA1105E_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105e_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105E",
+};
+
+const struct sja1105_info sja1105t_info = {
+ .device_id = SJA1105T_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105t_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105T",
+};
+
+const struct sja1105_info sja1105p_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105P_PART_NO,
+ .static_ops = sja1105p_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105P",
+};
+
+const struct sja1105_info sja1105q_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105Q_PART_NO,
+ .static_ops = sja1105q_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105Q",
+};
+
+const struct sja1105_info sja1105r_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105R_PART_NO,
+ .static_ops = sja1105r_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105R",
+};
+
+const struct sja1105_info sja1105s_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105S_PART_NO,
+ .static_ops = sja1105s_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .regs = &sja1105pqrs_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .name = "SJA1105S",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
new file mode 100644
index 000000000..139b7b4fb
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -0,0 +1,1470 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105_static_config.h"
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+/* Convenience wrappers over the generic packing functions. These take into
+ * account the SJA1105 memory layout quirks and provide some level of
+ * programmer protection against incorrect API use. The errors are not expected
+ * to occur durring runtime, therefore printing and swallowing them here is
+ * appropriate instead of clutterring up higher-level code.
+ */
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
+{
+ int rc = packing(buf, (u64 *)val, start, end, len,
+ PACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
+{
+ int rc = packing((void *)buf, val, start, end, len,
+ UNPACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL)
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ else if (rc == -ERANGE)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ dump_stack();
+}
+
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op)
+{
+ int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
+u32 sja1105_crc32(const void *buf, size_t len)
+{
+ unsigned int i;
+ u64 word;
+ u32 crc;
+
+ /* seed */
+ crc = ~0;
+ for (i = 0; i < len; i += 4) {
+ sja1105_unpack((void *)buf + i, &word, 31, 0, 4);
+ crc = crc32_le(crc, (u8 *)&word, 4);
+ }
+ return ~crc;
+}
+
+static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
+ sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
+ return size;
+}
+
+static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
+ sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
+ sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
+ sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
+ sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
+ sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
+ sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
+ sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
+ return size;
+}
+
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
+ sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
+ sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
+ sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
+ sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
+ sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
+ sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
+ return size;
+}
+
+static size_t
+sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 13; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
+ sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
+ for (i = 0, offset = 25; i < 8; i++, offset += 3)
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ return size;
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
+ sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
+ sja1105_packing(buf, &entry->poly, 13, 6, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 58; i < 5; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 42, 33, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 32, 28, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ sja1105_packing(buf, &entry->use_static, 24, 24, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 23, 23, size, op);
+ sja1105_packing(buf, &entry->learn_once, 22, 22, size, op);
+ return size;
+}
+
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ sja1105_packing(buf, &entry->index, 29, 20, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
+ sja1105_packing(buf, &entry->takets, 146, 146, size, op);
+ sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
+ sja1105_packing(buf, &entry->retag, 144, 144, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 159, 159, size, op);
+ sja1105_packing(buf, &entry->age, 158, 144, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
+ sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
+ sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ sja1105_packing(buf, &entry->index, 15, 6, size, op);
+ return size;
+}
+
+static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
+ sja1105_packing(buf, &entry->smax, 57, 42, size, op);
+ sja1105_packing(buf, &entry->rate, 41, 26, size, op);
+ sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
+ sja1105_packing(buf, &entry->partition, 14, 12, size, op);
+ return size;
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 72; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
+ sja1105_packing(buf, &entry->speed, 66, 65, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
+ sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
+ sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
+ sja1105_packing(buf, &entry->retag, 4, 4, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
+ sja1105_packing(buf, &entry->egress, 2, 2, size, op);
+ sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
+ sja1105_packing(buf, &entry->speed, 98, 97, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
+ sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
+ sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
+ sja1105_packing(buf, &entry->retag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->egress, 32, 32, size, op);
+ sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->clksrc, 31, 30, size, op);
+ sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 31, 29, size, op);
+ sja1105_packing(buf, &entry->delta, 28, 11, size, op);
+ sja1105_packing(buf, &entry->address, 10, 1, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 63, 54, size, op);
+ sja1105_packing(buf, &entry->winend, 53, 53, size, op);
+ sja1105_packing(buf, &entry->winst, 52, 52, size, op);
+ sja1105_packing(buf, &entry->destports, 51, 47, size, op);
+ sja1105_packing(buf, &entry->setvalid, 46, 46, size, op);
+ sja1105_packing(buf, &entry->txen, 45, 45, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op);
+ sja1105_packing(buf, &entry->resmedia, 43, 36, size, op);
+ sja1105_packing(buf, &entry->vlindex, 35, 26, size, op);
+ sja1105_packing(buf, &entry->delta, 25, 8, size, op);
+ return size;
+}
+
+static size_t
+sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 9, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
+ return size;
+}
+
+static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 20, size, op);
+ return size;
+}
+
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 89, 42, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 41, 30, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 26, 24, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 57, 42, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ }
+ return size;
+}
+
+static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
+ sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 17; i < 5; i++, offset += 3) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
+ sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
+ sja1105_packing(buf, &entry->destports, 27, 23, size, op);
+ return size;
+}
+
+size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_TABLE_HEADER;
+ struct sja1105_table_header *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
+ sja1105_packing(buf, &entry->len, 55, 32, size, op);
+ sja1105_packing(buf, &entry->crc, 95, 64, size, op);
+ return size;
+}
+
+/* WARNING: the *hdr pointer is really non-const, because it is
+ * modifying the CRC of the header for a 2-stage packing operation
+ */
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
+{
+ /* First pack the table as-is, then calculate the CRC, and
+ * finally put the proper CRC into the packed buffer
+ */
+ memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(buf, hdr, PACK);
+ hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
+ sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
+}
+
+static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
+{
+ u64 computed_crc;
+ int len_bytes;
+
+ len_bytes = (uintptr_t)(crc_ptr - table_start);
+ computed_crc = sja1105_crc32(table_start, len_bytes);
+ sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
+}
+
+/* The block IDs that the switches support are unfortunately sparse, so keep a
+ * mapping table to "block indices" and translate back and forth so that we
+ * don't waste useless memory in struct sja1105_static_config.
+ * Also, since the block id comes from essentially untrusted input (unpacking
+ * the static config from userspace) it has to be sanitized (range-checked)
+ * before blindly indexing kernel memory with the blk_idx.
+ */
+static u64 blk_id_map[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
+ [BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
+ [BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
+ [BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
+ [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
+ [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
+ [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
+ [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
+ [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
+ [BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
+ [BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
+ [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
+ [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
+ [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_RETAGGING] = BLKID_RETAGGING,
+ [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+};
+
+const char *sja1105_static_config_error_msg[] = {
+ [SJA1105_CONFIG_OK] = "",
+ [SJA1105_TTETHERNET_NOT_SUPPORTED] =
+ "schedule-table present, but TTEthernet is "
+ "only supported on T and Q/S",
+ [SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] =
+ "schedule-table present, but one of "
+ "schedule-entry-points-table, schedule-parameters-table or "
+ "schedule-entry-points-parameters table is empty",
+ [SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
+ "vl-lookup-table present, but one of vl-policing-table, "
+ "vl-forwarding-table or vl-forwarding-parameters-table is empty",
+ [SJA1105_MISSING_L2_POLICING_TABLE] =
+ "l2-policing-table needs to have at least one entry",
+ [SJA1105_MISSING_L2_FORWARDING_TABLE] =
+ "l2-forwarding-table is either missing or incomplete",
+ [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
+ "l2-forwarding-parameters-table is missing",
+ [SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
+ "general-parameters-table is missing",
+ [SJA1105_MISSING_VLAN_TABLE] =
+ "vlan-lookup-table needs to have at least the default untagged VLAN",
+ [SJA1105_MISSING_XMII_TABLE] =
+ "xmii-table is missing",
+ [SJA1105_MISSING_MAC_TABLE] =
+ "mac-configuration-table needs to contain an entry for each port",
+ [SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
+ "Not allowed to overcommit frame memory. L2 memory partitions "
+ "and VL memory partitions share the same space. The sum of all "
+ "16 memory partitions is not allowed to be larger than 929 "
+ "128-byte blocks (or 910 with retagging). Please adjust "
+ "l2-forwarding-parameters-table.part_spc and/or "
+ "vl-forwarding-parameters-table.partspc.",
+};
+
+static sja1105_config_valid_t
+static_config_check_memory_size(const struct sja1105_table *tables)
+{
+ const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int i, max_mem, mem = 0;
+
+ l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
+
+ for (i = 0; i < 8; i++)
+ mem += l2_fwd_params->part_spc[i];
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
+ vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
+ for (i = 0; i < 8; i++)
+ mem += vl_fwd_params->partspc[i];
+ }
+
+ if (tables[BLK_IDX_RETAGGING].entry_count)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ if (mem > max_mem)
+ return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
+
+ return SJA1105_CONFIG_OK;
+}
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config)
+{
+ const struct sja1105_table *tables = config->tables;
+#define IS_FULL(blk_idx) \
+ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
+
+ if (tables[BLK_IDX_SCHEDULE].entry_count) {
+ if (config->device_id != SJA1105T_DEVICE_ID &&
+ config->device_id != SJA1105QS_DEVICE_ID)
+ return SJA1105_TTETHERNET_NOT_SUPPORTED;
+
+ if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+ }
+ if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool has_critical_links = false;
+ int i;
+
+ vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
+
+ for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
+ if (vl_lookup[i].iscritical) {
+ has_critical_links = true;
+ break;
+ }
+ }
+
+ if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+ }
+
+ if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
+ return SJA1105_MISSING_L2_POLICING_TABLE;
+
+ if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
+ return SJA1105_MISSING_VLAN_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING))
+ return SJA1105_MISSING_L2_FORWARDING_TABLE;
+
+ if (!IS_FULL(BLK_IDX_MAC_CONFIG))
+ return SJA1105_MISSING_MAC_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
+ return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
+ return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_XMII_PARAMS))
+ return SJA1105_MISSING_XMII_TABLE;
+
+ return static_config_check_memory_size(tables);
+#undef IS_FULL
+}
+
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
+{
+ struct sja1105_table_header header = {0};
+ enum sja1105_blk_idx i;
+ char *p = buf;
+ int j;
+
+ sja1105_pack(p, &config->device_id, 31, 0, 4);
+ p += SJA1105_SIZE_DEVICE_ID;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+ char *table_start;
+
+ table = &config->tables[i];
+ if (!table->entry_count)
+ continue;
+
+ header.block_id = blk_id_map[i];
+ header.len = table->entry_count *
+ table->ops->packed_entry_size / 4;
+ sja1105_table_header_pack_with_crc(p, &header);
+ p += SJA1105_SIZE_TABLE_HEADER;
+ table_start = p;
+ for (j = 0; j < table->entry_count; j++) {
+ u8 *entry_ptr = table->entries;
+
+ entry_ptr += j * table->ops->unpacked_entry_size;
+ memset(p, 0, table->ops->packed_entry_size);
+ table->ops->packing(p, entry_ptr, PACK);
+ p += table->ops->packed_entry_size;
+ }
+ sja1105_table_write_crc(table_start, p);
+ p += 4;
+ }
+ /* Final header:
+ * Block ID does not matter
+ * Length of 0 marks that header is final
+ * CRC will be replaced on-the-fly on "config upload"
+ */
+ header.block_id = 0;
+ header.len = 0;
+ header.crc = 0xDEADBEEF;
+ memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(p, &header, PACK);
+}
+
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config)
+{
+ unsigned int sum;
+ unsigned int header_count;
+ enum sja1105_blk_idx i;
+
+ /* Ending header */
+ header_count = 1;
+ sum = SJA1105_SIZE_DEVICE_ID;
+
+ /* Tables (headers and entries) */
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+
+ table = &config->tables[i];
+ if (table->entry_count)
+ header_count++;
+
+ sum += table->ops->packed_entry_size * table->entry_count;
+ }
+ /* Headers have an additional CRC at the end */
+ sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
+ /* Last header does not have an extra CRC because there is no data */
+ sum -= 4;
+
+ return sum;
+}
+
+/* Compatibility matrices */
+
+/* SJA1105E: First generation, no TTEthernet */
+const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105T: First generation, TTEthernet */
+const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105P: Second generation, no TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105Q: Second generation, TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105R: Second generation, no TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105S: Second generation, TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id)
+{
+ enum sja1105_blk_idx i;
+
+ *config = (struct sja1105_static_config) {0};
+
+ /* Transfer static_ops array from priv into per-table ops
+ * for handier access
+ */
+ for (i = 0; i < BLK_IDX_MAX; i++)
+ config->tables[i].ops = &static_ops[i];
+
+ config->device_id = device_id;
+ return 0;
+}
+
+void sja1105_static_config_free(struct sja1105_static_config *config)
+{
+ enum sja1105_blk_idx i;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ if (config->tables[i].entry_count) {
+ kfree(config->tables[i].entries);
+ config->tables[i].entry_count = 0;
+ }
+ }
+}
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ u8 *entries = table->entries;
+
+ if (i > table->entry_count)
+ return -ERANGE;
+
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i) * entry_size);
+
+ table->entry_count--;
+
+ return 0;
+}
+
+/* No pointers to table->entries should be kept when this is called. */
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ void *new_entries, *old_entries = table->entries;
+
+ if (new_count > table->ops->max_entry_count)
+ return -ERANGE;
+
+ new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
+ if (!new_entries)
+ return -ENOMEM;
+
+ memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
+ entry_size);
+
+ table->entries = new_entries;
+ table->entry_count = new_count;
+ kfree(old_entries);
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
new file mode 100644
index 000000000..bc7606899
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_STATIC_CONFIG_H
+#define _SJA1105_STATIC_CONFIG_H
+
+#include <linux/packing.h>
+#include <linux/types.h>
+#include <asm/types.h>
+
+#define SJA1105_SIZE_DEVICE_ID 4
+#define SJA1105_SIZE_TABLE_HEADER 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY 8
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_VL_POLICING_ENTRY 8
+#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
+#define SJA1105_SIZE_L2_POLICING_ENTRY 8
+#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_RETAGGING_ENTRY 8
+#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
+#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
+#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_CBS_ENTRY 16
+#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_CBS_ENTRY 20
+
+/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
+enum {
+ BLKID_SCHEDULE = 0x00,
+ BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
+ BLKID_VL_LOOKUP = 0x02,
+ BLKID_VL_POLICING = 0x03,
+ BLKID_VL_FORWARDING = 0x04,
+ BLKID_L2_LOOKUP = 0x05,
+ BLKID_L2_POLICING = 0x06,
+ BLKID_VLAN_LOOKUP = 0x07,
+ BLKID_L2_FORWARDING = 0x08,
+ BLKID_MAC_CONFIG = 0x09,
+ BLKID_SCHEDULE_PARAMS = 0x0A,
+ BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
+ BLKID_VL_FORWARDING_PARAMS = 0x0C,
+ BLKID_L2_LOOKUP_PARAMS = 0x0D,
+ BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_AVB_PARAMS = 0x10,
+ BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_RETAGGING = 0x12,
+ BLKID_CBS = 0x13,
+ BLKID_XMII_PARAMS = 0x4E,
+};
+
+enum sja1105_blk_idx {
+ BLK_IDX_SCHEDULE = 0,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS,
+ BLK_IDX_VL_LOOKUP,
+ BLK_IDX_VL_POLICING,
+ BLK_IDX_VL_FORWARDING,
+ BLK_IDX_L2_LOOKUP,
+ BLK_IDX_L2_POLICING,
+ BLK_IDX_VLAN_LOOKUP,
+ BLK_IDX_L2_FORWARDING,
+ BLK_IDX_MAC_CONFIG,
+ BLK_IDX_SCHEDULE_PARAMS,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
+ BLK_IDX_VL_FORWARDING_PARAMS,
+ BLK_IDX_L2_LOOKUP_PARAMS,
+ BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_AVB_PARAMS,
+ BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_RETAGGING,
+ BLK_IDX_CBS,
+ BLK_IDX_XMII_PARAMS,
+ BLK_IDX_MAX,
+ /* Fake block indices that are only valid for dynamic access */
+ BLK_IDX_MGMT_ROUTE,
+ BLK_IDX_MAX_DYN,
+ BLK_IDX_INVAL = -1,
+};
+
+#define SJA1105_MAX_SCHEDULE_COUNT 1024
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
+#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
+#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
+#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
+#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
+#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_RETAGGING_COUNT 32
+#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+#define SJA1105_MAX_AVB_PARAMS_COUNT 1
+#define SJA1105ET_MAX_CBS_COUNT 10
+#define SJA1105PQRS_MAX_CBS_COUNT 16
+
+#define SJA1105_MAX_FRAME_MEMORY 929
+#define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910
+#define SJA1105_VL_FRAME_MEMORY 100
+
+#define SJA1105E_DEVICE_ID 0x9C00000Cull
+#define SJA1105T_DEVICE_ID 0x9E00030Eull
+#define SJA1105PR_DEVICE_ID 0xAF00030Eull
+#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+
+#define SJA1105ET_PART_NO 0x9A83
+#define SJA1105P_PART_NO 0x9A84
+#define SJA1105Q_PART_NO 0x9A85
+#define SJA1105R_PART_NO 0x9A86
+#define SJA1105S_PART_NO 0x9A87
+
+struct sja1105_schedule_entry {
+ u64 winstindex;
+ u64 winend;
+ u64 winst;
+ u64 destports;
+ u64 setvalid;
+ u64 txen;
+ u64 resmedia_en;
+ u64 resmedia;
+ u64 vlindex;
+ u64 delta;
+};
+
+struct sja1105_schedule_params_entry {
+ u64 subscheind[8];
+};
+
+struct sja1105_general_params_entry {
+ u64 vllupformat;
+ u64 mirr_ptacu;
+ u64 switchid;
+ u64 hostprio;
+ u64 mac_fltres1;
+ u64 mac_fltres0;
+ u64 mac_flt1;
+ u64 mac_flt0;
+ u64 incl_srcpt1;
+ u64 incl_srcpt0;
+ u64 send_meta1;
+ u64 send_meta0;
+ u64 casc_port;
+ u64 host_port;
+ u64 mirr_port;
+ u64 vlmarker;
+ u64 vlmask;
+ u64 tpid;
+ u64 ignore2stf;
+ u64 tpid2;
+ /* P/Q/R/S only */
+ u64 queue_ts;
+ u64 egrmirrvid;
+ u64 egrmirrpcp;
+ u64 egrmirrdei;
+ u64 replay_port;
+};
+
+struct sja1105_schedule_entry_points_entry {
+ u64 subschindx;
+ u64 delta;
+ u64 address;
+};
+
+struct sja1105_schedule_entry_points_params_entry {
+ u64 clksrc;
+ u64 actsubsch;
+};
+
+struct sja1105_vlan_lookup_entry {
+ u64 ving_mirr;
+ u64 vegr_mirr;
+ u64 vmemb_port;
+ u64 vlan_bc;
+ u64 tag_port;
+ u64 vlanid;
+};
+
+struct sja1105_l2_lookup_entry {
+ u64 vlanid;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+ /* P/Q/R/S only */
+ u64 mask_iotag;
+ u64 mask_vlanid;
+ u64 mask_macaddr;
+ u64 iotag;
+ u64 lockeds;
+ union {
+ /* LOCKEDS=1: Static FDB entries */
+ struct {
+ u64 tsreg;
+ u64 mirrvlan;
+ u64 takets;
+ u64 mirr;
+ u64 retag;
+ };
+ /* LOCKEDS=0: Dynamically learned FDB entries */
+ struct {
+ u64 touched;
+ u64 age;
+ };
+ };
+};
+
+struct sja1105_l2_lookup_params_entry {
+ u64 maxaddrp[5]; /* P/Q/R/S only */
+ u64 start_dynspc; /* P/Q/R/S only */
+ u64 drpnolearn; /* P/Q/R/S only */
+ u64 use_static; /* P/Q/R/S only */
+ u64 owr_dyn; /* P/Q/R/S only */
+ u64 learn_once; /* P/Q/R/S only */
+ u64 maxage; /* Shared */
+ u64 dyn_tbsz; /* E/T only */
+ u64 poly; /* E/T only */
+ u64 shared_learn; /* Shared */
+ u64 no_enf_hostprt; /* Shared */
+ u64 no_mgmt_learn; /* Shared */
+};
+
+struct sja1105_l2_forwarding_entry {
+ u64 bc_domain;
+ u64 reach_port;
+ u64 fl_domain;
+ u64 vlan_pmap[8];
+};
+
+struct sja1105_l2_forwarding_params_entry {
+ u64 max_dynp;
+ u64 part_spc[8];
+};
+
+struct sja1105_l2_policing_entry {
+ u64 sharindx;
+ u64 smax;
+ u64 rate;
+ u64 maxlen;
+ u64 partition;
+};
+
+struct sja1105_avb_params_entry {
+ u64 cas_master;
+ u64 destmeta;
+ u64 srcmeta;
+};
+
+struct sja1105_mac_config_entry {
+ u64 top[8];
+ u64 base[8];
+ u64 enabled[8];
+ u64 ifg;
+ u64 speed;
+ u64 tp_delin;
+ u64 tp_delout;
+ u64 maxage;
+ u64 vlanprio;
+ u64 vlanid;
+ u64 ing_mirr;
+ u64 egr_mirr;
+ u64 drpnona664;
+ u64 drpdtag;
+ u64 drpuntag;
+ u64 retag;
+ u64 dyn_learn;
+ u64 egress;
+ u64 ingress;
+};
+
+struct sja1105_retagging_entry {
+ u64 egr_port;
+ u64 ing_port;
+ u64 vlan_ing;
+ u64 vlan_egr;
+ u64 do_not_learn;
+ u64 use_dest_ports;
+ u64 destports;
+};
+
+struct sja1105_cbs_entry {
+ u64 port;
+ u64 prio;
+ u64 credit_hi;
+ u64 credit_lo;
+ u64 send_slope;
+ u64 idle_slope;
+};
+
+struct sja1105_xmii_params_entry {
+ u64 phy_mac[5];
+ u64 xmii_mode[5];
+};
+
+enum {
+ SJA1105_VL_FORMAT_PSFP = 0,
+ SJA1105_VL_FORMAT_ARINC664 = 1,
+};
+
+struct sja1105_vl_lookup_entry {
+ u64 format;
+ u64 port;
+ union {
+ /* SJA1105_VL_FORMAT_PSFP */
+ struct {
+ u64 destports;
+ u64 iscritical;
+ u64 macaddr;
+ u64 vlanid;
+ u64 vlanprior;
+ };
+ /* SJA1105_VL_FORMAT_ARINC664 */
+ struct {
+ u64 egrmirr;
+ u64 ingrmirr;
+ u64 vlid;
+ };
+ };
+ /* Not part of hardware structure */
+ unsigned long flow_cookie;
+};
+
+struct sja1105_vl_policing_entry {
+ u64 type;
+ u64 maxlen;
+ u64 sharindx;
+ u64 bag;
+ u64 jitter;
+};
+
+struct sja1105_vl_forwarding_entry {
+ u64 type;
+ u64 priority;
+ u64 partition;
+ u64 destports;
+};
+
+struct sja1105_vl_forwarding_params_entry {
+ u64 partspc[8];
+ u64 debugen;
+};
+
+struct sja1105_table_header {
+ u64 block_id;
+ u64 len;
+ u64 crc;
+};
+
+struct sja1105_table_ops {
+ size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op);
+ size_t unpacked_entry_size;
+ size_t packed_entry_size;
+ size_t max_entry_count;
+};
+
+struct sja1105_table {
+ const struct sja1105_table_ops *ops;
+ size_t entry_count;
+ void *entries;
+};
+
+struct sja1105_static_config {
+ u64 device_id;
+ struct sja1105_table tables[BLK_IDX_MAX];
+};
+
+extern const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+
+size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr);
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config);
+
+typedef enum {
+ SJA1105_CONFIG_OK = 0,
+ SJA1105_TTETHERNET_NOT_SUPPORTED,
+ SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
+ SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION,
+ SJA1105_MISSING_L2_POLICING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
+ SJA1105_MISSING_GENERAL_PARAMS_TABLE,
+ SJA1105_MISSING_VLAN_TABLE,
+ SJA1105_MISSING_XMII_TABLE,
+ SJA1105_MISSING_MAC_TABLE,
+ SJA1105_OVERCOMMITTED_FRAME_MEMORY,
+} sja1105_config_valid_t;
+
+extern const char *sja1105_static_config_error_msg[];
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config);
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id);
+void sja1105_static_config_free(struct sja1105_static_config *config);
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i);
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
+
+u32 sja1105_crc32(const void *buf, size_t len);
+
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len);
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op);
+
+/* Common implementations for the static and dynamic configs */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
new file mode 100644
index 000000000..31d8acff1
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_TAS_CLKSRC_DISABLED 0
+#define SJA1105_TAS_CLKSRC_STANDALONE 1
+#define SJA1105_TAS_CLKSRC_AS6802 2
+#define SJA1105_TAS_CLKSRC_PTP 3
+#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ tas_data->enabled = true;
+
+ if (max_cycle_time < gating_cfg->cycle_time)
+ max_cycle_time = gating_cfg->cycle_time;
+ if (latest_base_time < gating_cfg->base_time)
+ latest_base_time = gating_cfg->base_time;
+ if (earliest_base_time > gating_cfg->base_time) {
+ earliest_base_time = gating_cfg->base_time;
+ its_cycle_time = gating_cfg->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
+/* Lo and behold: the egress scheduler from hell.
+ *
+ * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
+ * all schedule entries for all ports. These are the Gate Control List (GCL)
+ * entries, let's call them "timeslots" for short. This linear array of
+ * timeslots is held in BLK_IDX_SCHEDULE.
+ *
+ * Then there are a maximum of 8 "execution threads" inside the switch, which
+ * iterate cyclically through the "schedule". Each "cycle" has an entry point
+ * and an exit point, both being timeslot indices in the schedule table. The
+ * hardware calls each cycle a "subschedule".
+ *
+ * Subschedule (cycle) i starts when
+ * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
+ *
+ * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
+ * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
+ * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
+ *
+ * For each schedule entry (timeslot) k, the engine executes the gate control
+ * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
+ *
+ * +---------+
+ * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
+ * +---------+
+ * |
+ * +-----------------+
+ * | .actsubsch
+ * BLK_IDX_SCHEDULE_ENTRY_POINTS v
+ * +-------+-------+
+ * |cycle 0|cycle 1|
+ * +-------+-------+
+ * | | | |
+ * +----------------+ | | +-------------------------------------+
+ * | .subschindx | | .subschindx |
+ * | | +---------------+ |
+ * | .address | .address | |
+ * | | | |
+ * | | | |
+ * | BLK_IDX_SCHEDULE v v |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | ^ ^ ^ ^ |
+ * | | | | | |
+ * | +-------------------------+ | | | |
+ * | | +-------------------------------+ | | |
+ * | | | +-------------------+ | |
+ * | | | | | |
+ * | +---------------------------------------------------------------+ |
+ * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
+ * | +---------------------------------------------------------------+ |
+ * | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
+ * | | | |
+ * +--------+ +-------------------------------------------+
+ *
+ * In the above picture there are two subschedules (cycles):
+ *
+ * - cycle 0: iterates the schedule table from 0 to 2 (and back)
+ * - cycle 1: iterates the schedule table from 3 to 5 (and back)
+ *
+ * All other possible execution threads must be marked as unused by making
+ * their "subschedule end index" (subscheind) equal to the last valid
+ * subschedule's end index (in this case 5).
+ */
+int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ struct sja1105_schedule_entry_points_entry *schedule_entry_points;
+ struct sja1105_schedule_entry_points_params_entry
+ *schedule_entry_points_params;
+ struct sja1105_schedule_params_entry *schedule_params;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct sja1105_schedule_entry *schedule;
+ struct sja1105_table *table;
+ int schedule_start_idx;
+ s64 entry_point_delta;
+ int schedule_end_idx;
+ int num_entries = 0;
+ int num_cycles = 0;
+ int cycle = 0;
+ int i, k = 0;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Discard previous Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Figure out the dimensioning of the problem */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ if (tas_data->offload[port]) {
+ num_entries += tas_data->offload[port]->num_entries;
+ num_cycles++;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ num_entries += gating_cfg->num_entries;
+ num_cycles++;
+ }
+
+ /* Nothing to do */
+ if (!num_cycles)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_entries;
+ schedule = table->entries;
+
+ /* Schedule Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ /* Previously allocated memory will be freed automatically in
+ * sja1105_static_config_free. This is true for all early
+ * returns below.
+ */
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
+ schedule_entry_points_params = table->entries;
+
+ /* Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
+ schedule_params = table->entries;
+
+ /* Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_cycles;
+ schedule_entry_points = table->entries;
+
+ /* Finally start populating the static config tables */
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
+ schedule_entry_points_params->actsubsch = num_cycles - 1;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + offload->num_entries - 1;
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
+ */
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ /* The subschedule end indices need to be
+ * monotonically increasing.
+ */
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ for (i = 0; i < offload->num_entries; i++, k++) {
+ s64 delta_ns = offload->entries[i].interval;
+
+ schedule[k].delta = ns_to_sja1105_delta(delta_ns);
+ schedule[k].destports = BIT(port);
+ schedule[k].resmedia_en = true;
+ schedule[k].resmedia = SJA1105_GATE_MASK &
+ ~offload->entries[i].gate_mask;
+ }
+ cycle++;
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ struct sja1105_gate_entry *e;
+
+ /* Relative base time */
+ s64 rbt;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + gating_cfg->num_entries - 1;
+ rbt = future_base_time(gating_cfg->base_time,
+ gating_cfg->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ schedule[k].delta = ns_to_sja1105_delta(e->interval);
+ schedule[k].destports = e->rule->vl.destports;
+ schedule[k].setvalid = true;
+ schedule[k].txen = true;
+ schedule[k].vlindex = e->rule->vl.sharindx;
+ schedule[k].winstindex = e->rule->vl.sharindx;
+ if (e->gate_state) /* Gate open */
+ schedule[k].winst = true;
+ else /* Gate closed */
+ schedule[k].winend = true;
+ k++;
+ }
+ }
+
+ return 0;
+}
+
+/* Be there 2 port subschedules, each executing an arbitrary number of gate
+ * open/close events cyclically.
+ * None of those gate events must ever occur at the exact same time, otherwise
+ * the switch is known to act in exotically strange ways.
+ * However the hardware doesn't bother performing these integrity checks.
+ * So here we are with the task of validating whether the new @admin offload
+ * has any conflict with the already established TAS configuration in
+ * tas_data->offload. We already know the other ports are in harmony with one
+ * another, otherwise we wouldn't have saved them.
+ * Each gate event executes periodically, with a period of @cycle_time and a
+ * phase given by its cycle's @base_time plus its offset within the cycle
+ * (which in turn is given by the length of the events prior to it).
+ * There are two aspects to possible collisions:
+ * - Collisions within one cycle's (actually the longest cycle's) time frame.
+ * For that, we need to compare the cartesian product of each possible
+ * occurrence of each event within one cycle time.
+ * - Collisions in the future. Events may not collide within one cycle time,
+ * but if two port schedules don't have the same periodicity (aka the cycle
+ * times aren't multiples of one another), they surely will some time in the
+ * future (actually they will collide an infinite amount of times).
+ */
+static bool
+sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
+ const struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ const struct tc_taprio_qopt_offload *offload;
+ s64 max_cycle_time, min_cycle_time;
+ s64 delta1, delta2;
+ s64 rbt1, rbt2;
+ s64 stop_time;
+ s64 t1, t2;
+ int i, j;
+ s32 rem;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ return false;
+
+ /* Check if the two cycle times are multiples of one another.
+ * If they aren't, then they will surely collide.
+ */
+ max_cycle_time = max(offload->cycle_time, admin->cycle_time);
+ min_cycle_time = min(offload->cycle_time, admin->cycle_time);
+ div_s64_rem(max_cycle_time, min_cycle_time, &rem);
+ if (rem)
+ return true;
+
+ /* Calculate the "reduced" base time of each of the two cycles
+ * (transposed back as close to 0 as possible) by dividing to
+ * the cycle time.
+ */
+ div_s64_rem(offload->base_time, offload->cycle_time, &rem);
+ rbt1 = rem;
+
+ div_s64_rem(admin->base_time, admin->cycle_time, &rem);
+ rbt2 = rem;
+
+ stop_time = max_cycle_time + max(rbt1, rbt2);
+
+ /* delta1 is the relative base time of each GCL entry within
+ * the established ports' TAS config.
+ */
+ for (i = 0, delta1 = 0;
+ i < offload->num_entries;
+ delta1 += offload->entries[i].interval, i++) {
+ /* delta2 is the relative base time of each GCL entry
+ * within the newly added TAS config.
+ */
+ for (j = 0, delta2 = 0;
+ j < admin->num_entries;
+ delta2 += admin->entries[j].interval, j++) {
+ /* t1 follows all possible occurrences of the
+ * established ports' GCL entry i within the
+ * first cycle time.
+ */
+ for (t1 = rbt1 + delta1;
+ t1 <= stop_time;
+ t1 += offload->cycle_time) {
+ /* t2 follows all possible occurrences
+ * of the newly added GCL entry j
+ * within the first cycle time.
+ */
+ for (t2 = rbt2 + delta2;
+ t2 <= stop_time;
+ t2 += admin->cycle_time) {
+ if (t1 == t2) {
+ dev_warn(priv->ds->dev,
+ "GCL entry %d collides with entry %d of port %d\n",
+ j, i, port);
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
+ * global subschedule. If @port is -1, check it against all ports.
+ * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
+ * convert the gating configuration to a dummy tc-taprio offload structure.
+ */
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+
+ if (list_empty(&gating_cfg->entries))
+ return false;
+
+ dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
+ if (!dummy) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
+ return true;
+ }
+
+ dummy->num_entries = num_entries;
+ dummy->base_time = gating_cfg->base_time;
+ dummy->cycle_time = gating_cfg->cycle_time;
+
+ list_for_each_entry(e, &gating_cfg->entries, list)
+ dummy->entries[i++].interval = e->interval;
+
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+ break;
+ }
+ }
+
+ kfree(dummy);
+
+ return conflict;
+}
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ int other_port, rc, i;
+
+ /* Can't change an already configured port (must delete qdisc first).
+ * Can't delete the qdisc from an unconfigured port.
+ */
+ if (!!tas_data->offload[port] == admin->enable)
+ return -EINVAL;
+
+ if (!admin->enable) {
+ taprio_offload_free(tas_data->offload[port]);
+ tas_data->offload[port] = NULL;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+ }
+
+ /* The cycle time extension is the amount of time the last cycle from
+ * the old OPER needs to be extended in order to phase-align with the
+ * base time of the ADMIN when that becomes the new OPER.
+ * But of course our switch needs to be reset to switch-over between
+ * the ADMIN and the OPER configs - so much for a seamless transition.
+ * So don't add insult over injury and just say we don't support cycle
+ * time extension.
+ */
+ if (admin->cycle_time_extension)
+ return -ENOTSUPP;
+
+ for (i = 0; i < admin->num_entries; i++) {
+ s64 delta_ns = admin->entries[i].interval;
+ s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
+ bool too_long, too_short;
+
+ too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
+ too_short = (delta_cycles == 0);
+ if (too_long || too_short) {
+ dev_err(priv->ds->dev,
+ "Interval %llu too %s for GCL entry %d\n",
+ delta_ns, too_long ? "long" : "short", i);
+ return -ERANGE;
+ }
+ }
+
+ for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
+ if (other_port == port)
+ continue;
+
+ if (sja1105_tas_check_conflicts(priv, other_port, admin))
+ return -ERANGE;
+ }
+
+ if (sja1105_gating_check_conflicts(priv, port, NULL)) {
+ dev_err(ds->dev, "Conflict with tc-gate schedule\n");
+ return -ERANGE;
+ }
+
+ tas_data->offload[port] = taprio_offload_get(admin);
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
+
+ INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
+}
+
+void sja1105_tas_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct tc_taprio_qopt_offload *offload;
+ int port;
+
+ cancel_work_sync(&priv->tas_data.tas_work);
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ offload = priv->tas_data.offload[port];
+ if (!offload)
+ continue;
+
+ taprio_offload_free(offload);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
new file mode 100644
index 000000000..0c173ff51
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_TAS_H
+#define _SJA1105_TAS_H
+
+#include <net/pkt_sched.h>
+
+#define SJA1105_TAS_MAX_DELTA BIT(18)
+
+struct sja1105_private;
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
+struct sja1105_gate_entry {
+ struct list_head list;
+ struct sja1105_rule *rule;
+ s64 interval;
+ u8 gate_state;
+};
+
+struct sja1105_gating_config {
+ u64 cycle_time;
+ s64 base_time;
+ int num_entries;
+ struct list_head entries;
+};
+
+struct sja1105_tas_data {
+ struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ struct sja1105_gating_config gating_cfg;
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
+};
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin);
+
+void sja1105_tas_setup(struct dsa_switch *ds);
+
+void sja1105_tas_teardown(struct dsa_switch *ds);
+
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack);
+
+int sja1105_init_scheduling(struct sja1105_private *priv);
+
+#else
+
+/* C doesn't allow empty structures, bah! */
+struct sja1105_tas_data {
+ u8 dummy;
+};
+
+static inline int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
+static inline bool
+sja1105_gating_check_conflicts(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ return true;
+}
+
+static inline int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
+
+#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
new file mode 100644
index 000000000..ffc4042b4
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include <net/tc_act/tc_gate.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105_vl.h"
+
+#define SJA1105_SIZE_VL_STATUS 8
+
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ sja1105_free_gating_config(gating_cfg);
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
+/* The switch flow classification core implements TTEthernet, which 'thinks' in
+ * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
+ * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
+ * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
+ * (Per-Stream Filtering and Policing), which is what the driver is going to be
+ * implementing.
+ *
+ * VL Lookup
+ * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
+ * && VLAN PCP | | VLMARKER)
+ * && INGRESS PORT} +---------+ (both fixed)
+ * (exact match, | && DMAC[15:0] == VLID
+ * all specified in rule) | (specified in rule)
+ * v && INGRESS PORT }
+ * ------------
+ * 0 (PSFP) / \ 1 (ARINC664)
+ * +-----------/ VLLUPFORMAT \----------+
+ * | \ (fixed) / |
+ * | \ / |
+ * 0 (forwarding) v ------------ |
+ * ------------ |
+ * / \ 1 (QoS classification) |
+ * +---/ ISCRITICAL \-----------+ |
+ * | \ (per rule) / | |
+ * | \ / VLID taken from VLID taken from
+ * v ------------ index of rule contents of rule
+ * select that matched that matched
+ * DESTPORTS | |
+ * | +---------+--------+
+ * | |
+ * | v
+ * | VL Forwarding
+ * | (indexed by VLID)
+ * | +---------+
+ * | +--------------| |
+ * | | select TYPE +---------+
+ * | v
+ * | 0 (rate ------------ 1 (time
+ * | constrained) / \ triggered)
+ * | +------/ TYPE \------------+
+ * | | \ (per VLID) / |
+ * | v \ / v
+ * | VL Policing ------------ VL Policing
+ * | (indexed by VLID) (indexed by VLID)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select SHARINDX select SHARINDX to
+ * | to rate-limit re-enter VL Forwarding
+ * | groups of VL's with new VLID for egress
+ * | to same quota |
+ * | | |
+ * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
+ * | | |
+ * | v v
+ * | VL Forwarding VL Forwarding
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select PRIORITY, select PRIORITY,
+ * | PARTITION, DESTPORTS PARTITION, DESTPORTS
+ * | | |
+ * | v v
+ * | VL Policing VL Policing
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | | |
+ * | v |
+ * | select BAG, -> exceed => drop |
+ * | JITTER v
+ * | | ----------------------------------------------
+ * | | / Reception Window is open for this VL \
+ * | | / (the Schedule Table executes an entry i \
+ * | | / M <= i < N, for which these conditions hold): \ no
+ * | | +----/ \-+
+ * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
+ * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
+ * | | | \ / |
+ * | | | \ (the VL window has opened and not yet closed)/ |
+ * | | | ---------------------------------------------- |
+ * | | v v
+ * | | dispatch to DESTPORTS when the Schedule Table drop
+ * | | executes an entry i with TXEN == 1 && VLINDEX == i
+ * v v
+ * dispatch immediately to DESTPORTS
+ *
+ * The per-port classification key is always composed of {DMAC, VID, PCP} and
+ * is non-maskable. This 'looks like' the NULL stream identification function
+ * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
+ * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
+ * ID and PCP, and then the port-based defaults will be used.
+ *
+ * In TTEthernet, routing is something that needs to be done manually for each
+ * Virtual Link. So the flow action must always include one of:
+ * a. 'redirect', 'trap' or 'drop': select the egress port list
+ * Additionally, the following actions may be applied on a Virtual Link,
+ * turning it into 'critical' traffic:
+ * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
+ * given by the maximum frame length, bandwidth allocation gap (BAG) and
+ * maximum jitter.
+ * c. 'gate': turn it into a time-triggered VL, which can be only be received
+ * and forwarded according to a given schedule.
+ */
+
+static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
+ struct sja1105_vl_lookup_entry *b)
+{
+ if (a->macaddr < b->macaddr)
+ return true;
+ if (a->macaddr > b->macaddr)
+ return false;
+ if (a->vlanid < b->vlanid)
+ return true;
+ if (a->vlanid > b->vlanid)
+ return false;
+ if (a->port < b->port)
+ return true;
+ if (a->port > b->port)
+ return false;
+ if (a->vlanprior < b->vlanprior)
+ return true;
+ if (a->vlanprior > b->vlanprior)
+ return false;
+ /* Keys are equal */
+ return false;
+}
+
+static int sja1105_init_virtual_links(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_vl_policing_entry *vl_policing;
+ struct sja1105_vl_forwarding_entry *vl_fwd;
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool have_critical_virtual_links = false;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ int num_virtual_links = 0;
+ int max_sharindx = 0;
+ int i, j, k;
+
+ /* Figure out the dimensioning of the problem */
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ /* Each VL lookup entry matches on a single ingress port */
+ num_virtual_links += hweight_long(rule->port_mask);
+
+ if (rule->vl.type != SJA1105_VL_NONCRITICAL)
+ have_critical_virtual_links = true;
+ if (max_sharindx < rule->vl.sharindx)
+ max_sharindx = rule->vl.sharindx;
+ }
+
+ if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
+ return -ENOSPC;
+ }
+
+ if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
+ return -ENOSPC;
+ }
+
+ max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
+
+ /* Discard previous VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Nothing to do */
+ if (!num_virtual_links)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ table->entries = kcalloc(num_virtual_links,
+ table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_virtual_links;
+ vl_lookup = table->entries;
+
+ k = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ unsigned long port;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+
+ for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
+ vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
+ vl_lookup[k].port = port;
+ vl_lookup[k].macaddr = rule->key.vl.dmac;
+ if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
+ vl_lookup[k].vlanid = rule->key.vl.vid;
+ vl_lookup[k].vlanprior = rule->key.vl.pcp;
+ } else {
+ u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+
+ vl_lookup[k].vlanid = vid;
+ vl_lookup[k].vlanprior = 0;
+ }
+ /* For critical VLs, the DESTPORTS mask is taken from
+ * the VL Forwarding Table, so no point in putting it
+ * in the VL Lookup Table
+ */
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ vl_lookup[k].destports = rule->vl.destports;
+ else
+ vl_lookup[k].iscritical = true;
+ vl_lookup[k].flow_cookie = rule->cookie;
+ k++;
+ }
+ }
+
+ /* UM10944.pdf chapter 4.2.3 VL Lookup table:
+ * "the entries in the VL Lookup table must be sorted in ascending
+ * order (i.e. the smallest value must be loaded first) according to
+ * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
+ */
+ for (i = 0; i < num_virtual_links; i++) {
+ struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
+
+ for (j = i + 1; j < num_virtual_links; j++) {
+ struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
+
+ if (sja1105_vl_key_lower(b, a)) {
+ struct sja1105_vl_lookup_entry tmp = *a;
+
+ *a = *b;
+ *b = tmp;
+ }
+ }
+ }
+
+ if (!have_critical_virtual_links)
+ return 0;
+
+ /* VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_policing = table->entries;
+
+ /* VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_fwd = table->entries;
+
+ /* VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = 1;
+
+ for (i = 0; i < num_virtual_links; i++) {
+ unsigned long cookie = vl_lookup[i].flow_cookie;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ continue;
+ if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
+ int sharindx = rule->vl.sharindx;
+
+ vl_policing[i].type = 1;
+ vl_policing[i].sharindx = sharindx;
+ vl_policing[i].maxlen = rule->vl.maxlen;
+ vl_policing[sharindx].type = 1;
+
+ vl_fwd[i].type = 1;
+ vl_fwd[sharindx].type = 1;
+ vl_fwd[sharindx].priority = rule->vl.ipv;
+ vl_fwd[sharindx].partition = 0;
+ vl_fwd[sharindx].destports = rule->vl.destports;
+ }
+ }
+
+ sja1105_frame_memory_partitioning(priv);
+
+ return 0;
+}
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int rc;
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on DMAC");
+ return -EOPNOTSUPP;
+ } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+ priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+ key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ list_add(&rule->list, &priv->flow_block.rules);
+ }
+
+ rule->port_mask |= BIT(port);
+ if (append)
+ rule->vl.destports |= destports;
+ else
+ rule->vl.destports = destports;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+}
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int ipv = -1;
+ int i, rc;
+ s32 rem;
+
+ if (cycle_time_ext) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Base time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on DMAC");
+ return -EOPNOTSUPP;
+ } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+ priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+ key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ list_add(&rule->list, &priv->flow_block.rules);
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
+ rule->vl.sharindx = index;
+ rule->vl.base_time = base_time;
+ rule->vl.cycle_time = cycle_time;
+ rule->vl.num_entries = num_entries;
+ rule->vl.entries = kcalloc(num_entries,
+ sizeof(struct action_gate_entry),
+ GFP_KERNEL);
+ if (!rule->vl.entries) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ div_s64_rem(entries[i].interval,
+ sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval must be multiple of 200 ns");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!entries[i].interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval cannot be zero");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (ns_to_sja1105_delta(entries[i].interval) >
+ SJA1105_TAS_MAX_DELTA) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Maximum interval is 52 ms");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (entries[i].maxoctets != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload IntervalOctetMax");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ipv == -1) {
+ ipv = entries[i].ipv;
+ } else if (ipv != entries[i].ipv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only support a single IPV per VL");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rule->vl.entries[i] = entries[i];
+ }
+
+ if (ipv == -1) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
+ ipv = key->vl.pcp;
+ else
+ ipv = 0;
+ }
+
+ /* TODO: support per-flow MTU */
+ rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ rule->vl.ipv = ipv;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ goto out;
+
+ if (sja1105_gating_check_conflicts(priv, -1, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
+ rc = -ERANGE;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule->vl.entries);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+static int sja1105_find_vlid(struct sja1105_private *priv, int port,
+ struct sja1105_key *key)
+{
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
+ return -1;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ vl_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac &&
+ vl_lookup[i].vlanid == key->vl.vid &&
+ vl_lookup[i].vlanprior == key->vl.pcp)
+ return i;
+ } else {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
+ u64 unreleased;
+ u64 timingerr;
+ u64 lengtherr;
+ int vlid, rc;
+ u64 pkts;
+
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ return 0;
+
+ vlid = sja1105_find_vlid(priv, port, &rule->key);
+ if (vlid < 0)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
+ SJA1105_SIZE_VL_STATUS);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
+ return rc;
+ }
+
+ sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
+
+ pkts = timingerr + unreleased + lengtherr;
+
+ flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
+ jiffies - rule->vl.stats.lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ rule->vl.stats.pkts = pkts;
+ rule->vl.stats.lastused = jiffies;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
new file mode 100644
index 000000000..173d78963
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_VL_H
+#define _SJA1105_VL_H
+
+#include "sja1105.h"
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append);
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack);
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries);
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack);
+
+#else
+
+static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ unsigned long destports,
+ bool append)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_delete(struct sja1105_private *priv,
+ int port, struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time,
+ u64 cycle_time_ext, u32 num_entries,
+ struct action_gate_entry *entries)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
+
+#endif /* _SJA1105_VL_H */