diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/microchip/lan966x | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microchip/lan966x')
26 files changed, 10742 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig new file mode 100644 index 000000000..49e1464a4 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -0,0 +1,11 @@ +config LAN966X_SWITCH + tristate "Lan966x switch driver" + depends on PTP_1588_CLOCK_OPTIONAL + depends on HAS_IOMEM + depends on OF + depends on NET_SWITCHDEV + depends on BRIDGE || BRIDGE=n + select PHYLINK + select PACKING + help + This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile new file mode 100644 index 000000000..962f7c5f9 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip Lan966x network device drivers. +# + +obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o + +lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ + lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \ + lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \ + lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \ + lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \ + lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \ + lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c new file mode 100644 index 000000000..70cbbf8d2 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_cbs_add(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u32 cir, cbs; + u8 se_idx; + + /* Check for invalid values */ + if (qopt->idleslope <= 0 || + qopt->sendslope >= 0 || + qopt->locredit >= qopt->hicredit) + return -EINVAL; + + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue; + cir = qopt->idleslope; + cbs = (qopt->idleslope - qopt->sendslope) * + (qopt->hicredit - qopt->locredit) / + -qopt->sendslope; + + /* Rate unit is 100 kbps */ + cir = DIV_ROUND_UP(cir, 100); + /* Avoid using zero rate */ + cir = cir ?: 1; + /* Burst unit is 4kB */ + cbs = DIV_ROUND_UP(cbs, 4096); + /* Avoid using zero burst */ + cbs = cbs ?: 1; + + /* Check that actually the result can be written */ + if (cir > GENMASK(15, 0) || + cbs > GENMASK(6, 0)) + return -EINVAL; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) | + QSYS_SE_CFG_SE_FRM_MODE_SET(1), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) | + QSYS_CIR_CFG_CIR_BURST_SET(cbs), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} + +int lan966x_cbs_del(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u8 se_idx; + + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) | + QSYS_SE_CFG_SE_FRM_MODE_SET(0), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) | + QSYS_CIR_CFG_CIR_BURST_SET(0), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c new file mode 100644 index 000000000..06811c60d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> + +#include "lan966x_main.h" + +/* Number of traffic classes */ +#define LAN966X_NUM_TC 8 +#define LAN966X_STATS_CHECK_DELAY (2 * HZ) + +static const struct lan966x_stat_layout lan966x_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02 }, + { .name = "rx_broadcast", .offset = 0x03 }, + { .name = "rx_short", .offset = 0x04 }, + { .name = "rx_frag", .offset = 0x05 }, + { .name = "rx_jabber", .offset = 0x06 }, + { .name = "rx_crc", .offset = 0x07 }, + { .name = "rx_symbol_err", .offset = 0x08 }, + { .name = "rx_sz_64", .offset = 0x09 }, + { .name = "rx_sz_65_127", .offset = 0x0a}, + { .name = "rx_sz_128_255", .offset = 0x0b}, + { .name = "rx_sz_256_511", .offset = 0x0c }, + { .name = "rx_sz_512_1023", .offset = 0x0d }, + { .name = "rx_sz_1024_1526", .offset = 0x0e }, + { .name = "rx_sz_jumbo", .offset = 0x0f }, + { .name = "rx_pause", .offset = 0x10 }, + { .name = "rx_control", .offset = 0x11 }, + { .name = "rx_long", .offset = 0x12 }, + { .name = "rx_cat_drop", .offset = 0x13 }, + { .name = "rx_red_prio_0", .offset = 0x14 }, + { .name = "rx_red_prio_1", .offset = 0x15 }, + { .name = "rx_red_prio_2", .offset = 0x16 }, + { .name = "rx_red_prio_3", .offset = 0x17 }, + { .name = "rx_red_prio_4", .offset = 0x18 }, + { .name = "rx_red_prio_5", .offset = 0x19 }, + { .name = "rx_red_prio_6", .offset = 0x1a }, + { .name = "rx_red_prio_7", .offset = 0x1b }, + { .name = "rx_yellow_prio_0", .offset = 0x1c }, + { .name = "rx_yellow_prio_1", .offset = 0x1d }, + { .name = "rx_yellow_prio_2", .offset = 0x1e }, + { .name = "rx_yellow_prio_3", .offset = 0x1f }, + { .name = "rx_yellow_prio_4", .offset = 0x20 }, + { .name = "rx_yellow_prio_5", .offset = 0x21 }, + { .name = "rx_yellow_prio_6", .offset = 0x22 }, + { .name = "rx_yellow_prio_7", .offset = 0x23 }, + { .name = "rx_green_prio_0", .offset = 0x24 }, + { .name = "rx_green_prio_1", .offset = 0x25 }, + { .name = "rx_green_prio_2", .offset = 0x26 }, + { .name = "rx_green_prio_3", .offset = 0x27 }, + { .name = "rx_green_prio_4", .offset = 0x28 }, + { .name = "rx_green_prio_5", .offset = 0x29 }, + { .name = "rx_green_prio_6", .offset = 0x2a }, + { .name = "rx_green_prio_7", .offset = 0x2b }, + { .name = "rx_assembly_err", .offset = 0x2c }, + { .name = "rx_smd_err", .offset = 0x2d }, + { .name = "rx_assembly_ok", .offset = 0x2e }, + { .name = "rx_merge_frag", .offset = 0x2f }, + { .name = "rx_pmac_octets", .offset = 0x30, }, + { .name = "rx_pmac_unicast", .offset = 0x31, }, + { .name = "rx_pmac_multicast", .offset = 0x32 }, + { .name = "rx_pmac_broadcast", .offset = 0x33 }, + { .name = "rx_pmac_short", .offset = 0x34 }, + { .name = "rx_pmac_frag", .offset = 0x35 }, + { .name = "rx_pmac_jabber", .offset = 0x36 }, + { .name = "rx_pmac_crc", .offset = 0x37 }, + { .name = "rx_pmac_symbol_err", .offset = 0x38 }, + { .name = "rx_pmac_sz_64", .offset = 0x39 }, + { .name = "rx_pmac_sz_65_127", .offset = 0x3a }, + { .name = "rx_pmac_sz_128_255", .offset = 0x3b }, + { .name = "rx_pmac_sz_256_511", .offset = 0x3c }, + { .name = "rx_pmac_sz_512_1023", .offset = 0x3d }, + { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e }, + { .name = "rx_pmac_sz_jumbo", .offset = 0x3f }, + { .name = "rx_pmac_pause", .offset = 0x40 }, + { .name = "rx_pmac_control", .offset = 0x41 }, + { .name = "rx_pmac_long", .offset = 0x42 }, + + { .name = "tx_octets", .offset = 0x80, }, + { .name = "tx_unicast", .offset = 0x81, }, + { .name = "tx_multicast", .offset = 0x82 }, + { .name = "tx_broadcast", .offset = 0x83 }, + { .name = "tx_col", .offset = 0x84 }, + { .name = "tx_drop", .offset = 0x85 }, + { .name = "tx_pause", .offset = 0x86 }, + { .name = "tx_sz_64", .offset = 0x87 }, + { .name = "tx_sz_65_127", .offset = 0x88 }, + { .name = "tx_sz_128_255", .offset = 0x89 }, + { .name = "tx_sz_256_511", .offset = 0x8a }, + { .name = "tx_sz_512_1023", .offset = 0x8b }, + { .name = "tx_sz_1024_1526", .offset = 0x8c }, + { .name = "tx_sz_jumbo", .offset = 0x8d }, + { .name = "tx_yellow_prio_0", .offset = 0x8e }, + { .name = "tx_yellow_prio_1", .offset = 0x8f }, + { .name = "tx_yellow_prio_2", .offset = 0x90 }, + { .name = "tx_yellow_prio_3", .offset = 0x91 }, + { .name = "tx_yellow_prio_4", .offset = 0x92 }, + { .name = "tx_yellow_prio_5", .offset = 0x93 }, + { .name = "tx_yellow_prio_6", .offset = 0x94 }, + { .name = "tx_yellow_prio_7", .offset = 0x95 }, + { .name = "tx_green_prio_0", .offset = 0x96 }, + { .name = "tx_green_prio_1", .offset = 0x97 }, + { .name = "tx_green_prio_2", .offset = 0x98 }, + { .name = "tx_green_prio_3", .offset = 0x99 }, + { .name = "tx_green_prio_4", .offset = 0x9a }, + { .name = "tx_green_prio_5", .offset = 0x9b }, + { .name = "tx_green_prio_6", .offset = 0x9c }, + { .name = "tx_green_prio_7", .offset = 0x9d }, + { .name = "tx_aged", .offset = 0x9e }, + { .name = "tx_llct", .offset = 0x9f }, + { .name = "tx_ct", .offset = 0xa0 }, + { .name = "tx_mm_hold", .offset = 0xa1 }, + { .name = "tx_merge_frag", .offset = 0xa2 }, + { .name = "tx_pmac_octets", .offset = 0xa3, }, + { .name = "tx_pmac_unicast", .offset = 0xa4, }, + { .name = "tx_pmac_multicast", .offset = 0xa5 }, + { .name = "tx_pmac_broadcast", .offset = 0xa6 }, + { .name = "tx_pmac_pause", .offset = 0xa7 }, + { .name = "tx_pmac_sz_64", .offset = 0xa8 }, + { .name = "tx_pmac_sz_65_127", .offset = 0xa9 }, + { .name = "tx_pmac_sz_128_255", .offset = 0xaa }, + { .name = "tx_pmac_sz_256_511", .offset = 0xab }, + { .name = "tx_pmac_sz_512_1023", .offset = 0xac }, + { .name = "tx_pmac_sz_1024_1526", .offset = 0xad }, + { .name = "tx_pmac_sz_jumbo", .offset = 0xae }, + + { .name = "dr_local", .offset = 0x100 }, + { .name = "dr_tail", .offset = 0x101 }, + { .name = "dr_yellow_prio_0", .offset = 0x102 }, + { .name = "dr_yellow_prio_1", .offset = 0x103 }, + { .name = "dr_yellow_prio_2", .offset = 0x104 }, + { .name = "dr_yellow_prio_3", .offset = 0x105 }, + { .name = "dr_yellow_prio_4", .offset = 0x106 }, + { .name = "dr_yellow_prio_5", .offset = 0x107 }, + { .name = "dr_yellow_prio_6", .offset = 0x108 }, + { .name = "dr_yellow_prio_7", .offset = 0x109 }, + { .name = "dr_green_prio_0", .offset = 0x10a }, + { .name = "dr_green_prio_1", .offset = 0x10b }, + { .name = "dr_green_prio_2", .offset = 0x10c }, + { .name = "dr_green_prio_3", .offset = 0x10d }, + { .name = "dr_green_prio_4", .offset = 0x10e }, + { .name = "dr_green_prio_5", .offset = 0x10f }, + { .name = "dr_green_prio_6", .offset = 0x110 }, + { .name = "dr_green_prio_7", .offset = 0x111 }, +}; + +/* The following numbers are indexes into lan966x_stats_layout[] */ +#define SYS_COUNT_RX_OCT 0 +#define SYS_COUNT_RX_UC 1 +#define SYS_COUNT_RX_MC 2 +#define SYS_COUNT_RX_BC 3 +#define SYS_COUNT_RX_SHORT 4 +#define SYS_COUNT_RX_FRAG 5 +#define SYS_COUNT_RX_JABBER 6 +#define SYS_COUNT_RX_CRC 7 +#define SYS_COUNT_RX_SYMBOL_ERR 8 +#define SYS_COUNT_RX_SZ_64 9 +#define SYS_COUNT_RX_SZ_65_127 10 +#define SYS_COUNT_RX_SZ_128_255 11 +#define SYS_COUNT_RX_SZ_256_511 12 +#define SYS_COUNT_RX_SZ_512_1023 13 +#define SYS_COUNT_RX_SZ_1024_1526 14 +#define SYS_COUNT_RX_SZ_JUMBO 15 +#define SYS_COUNT_RX_PAUSE 16 +#define SYS_COUNT_RX_CONTROL 17 +#define SYS_COUNT_RX_LONG 18 +#define SYS_COUNT_RX_CAT_DROP 19 +#define SYS_COUNT_RX_RED_PRIO_0 20 +#define SYS_COUNT_RX_RED_PRIO_1 21 +#define SYS_COUNT_RX_RED_PRIO_2 22 +#define SYS_COUNT_RX_RED_PRIO_3 23 +#define SYS_COUNT_RX_RED_PRIO_4 24 +#define SYS_COUNT_RX_RED_PRIO_5 25 +#define SYS_COUNT_RX_RED_PRIO_6 26 +#define SYS_COUNT_RX_RED_PRIO_7 27 +#define SYS_COUNT_RX_YELLOW_PRIO_0 28 +#define SYS_COUNT_RX_YELLOW_PRIO_1 29 +#define SYS_COUNT_RX_YELLOW_PRIO_2 30 +#define SYS_COUNT_RX_YELLOW_PRIO_3 31 +#define SYS_COUNT_RX_YELLOW_PRIO_4 32 +#define SYS_COUNT_RX_YELLOW_PRIO_5 33 +#define SYS_COUNT_RX_YELLOW_PRIO_6 34 +#define SYS_COUNT_RX_YELLOW_PRIO_7 35 +#define SYS_COUNT_RX_GREEN_PRIO_0 36 +#define SYS_COUNT_RX_GREEN_PRIO_1 37 +#define SYS_COUNT_RX_GREEN_PRIO_2 38 +#define SYS_COUNT_RX_GREEN_PRIO_3 39 +#define SYS_COUNT_RX_GREEN_PRIO_4 40 +#define SYS_COUNT_RX_GREEN_PRIO_5 41 +#define SYS_COUNT_RX_GREEN_PRIO_6 42 +#define SYS_COUNT_RX_GREEN_PRIO_7 43 +#define SYS_COUNT_RX_ASSEMBLY_ERR 44 +#define SYS_COUNT_RX_SMD_ERR 45 +#define SYS_COUNT_RX_ASSEMBLY_OK 46 +#define SYS_COUNT_RX_MERGE_FRAG 47 +#define SYS_COUNT_RX_PMAC_OCT 48 +#define SYS_COUNT_RX_PMAC_UC 49 +#define SYS_COUNT_RX_PMAC_MC 50 +#define SYS_COUNT_RX_PMAC_BC 51 +#define SYS_COUNT_RX_PMAC_SHORT 52 +#define SYS_COUNT_RX_PMAC_FRAG 53 +#define SYS_COUNT_RX_PMAC_JABBER 54 +#define SYS_COUNT_RX_PMAC_CRC 55 +#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56 +#define SYS_COUNT_RX_PMAC_SZ_64 57 +#define SYS_COUNT_RX_PMAC_SZ_65_127 58 +#define SYS_COUNT_RX_PMAC_SZ_128_255 59 +#define SYS_COUNT_RX_PMAC_SZ_256_511 60 +#define SYS_COUNT_RX_PMAC_SZ_512_1023 61 +#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62 +#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63 +#define SYS_COUNT_RX_PMAC_PAUSE 64 +#define SYS_COUNT_RX_PMAC_CONTROL 65 +#define SYS_COUNT_RX_PMAC_LONG 66 + +#define SYS_COUNT_TX_OCT 67 +#define SYS_COUNT_TX_UC 68 +#define SYS_COUNT_TX_MC 69 +#define SYS_COUNT_TX_BC 70 +#define SYS_COUNT_TX_COL 71 +#define SYS_COUNT_TX_DROP 72 +#define SYS_COUNT_TX_PAUSE 73 +#define SYS_COUNT_TX_SZ_64 74 +#define SYS_COUNT_TX_SZ_65_127 75 +#define SYS_COUNT_TX_SZ_128_255 76 +#define SYS_COUNT_TX_SZ_256_511 77 +#define SYS_COUNT_TX_SZ_512_1023 78 +#define SYS_COUNT_TX_SZ_1024_1526 79 +#define SYS_COUNT_TX_SZ_JUMBO 80 +#define SYS_COUNT_TX_YELLOW_PRIO_0 81 +#define SYS_COUNT_TX_YELLOW_PRIO_1 82 +#define SYS_COUNT_TX_YELLOW_PRIO_2 83 +#define SYS_COUNT_TX_YELLOW_PRIO_3 84 +#define SYS_COUNT_TX_YELLOW_PRIO_4 85 +#define SYS_COUNT_TX_YELLOW_PRIO_5 86 +#define SYS_COUNT_TX_YELLOW_PRIO_6 87 +#define SYS_COUNT_TX_YELLOW_PRIO_7 88 +#define SYS_COUNT_TX_GREEN_PRIO_0 89 +#define SYS_COUNT_TX_GREEN_PRIO_1 90 +#define SYS_COUNT_TX_GREEN_PRIO_2 91 +#define SYS_COUNT_TX_GREEN_PRIO_3 92 +#define SYS_COUNT_TX_GREEN_PRIO_4 93 +#define SYS_COUNT_TX_GREEN_PRIO_5 94 +#define SYS_COUNT_TX_GREEN_PRIO_6 95 +#define SYS_COUNT_TX_GREEN_PRIO_7 96 +#define SYS_COUNT_TX_AGED 97 +#define SYS_COUNT_TX_LLCT 98 +#define SYS_COUNT_TX_CT 99 +#define SYS_COUNT_TX_MM_HOLD 100 +#define SYS_COUNT_TX_MERGE_FRAG 101 +#define SYS_COUNT_TX_PMAC_OCT 102 +#define SYS_COUNT_TX_PMAC_UC 103 +#define SYS_COUNT_TX_PMAC_MC 104 +#define SYS_COUNT_TX_PMAC_BC 105 +#define SYS_COUNT_TX_PMAC_PAUSE 106 +#define SYS_COUNT_TX_PMAC_SZ_64 107 +#define SYS_COUNT_TX_PMAC_SZ_65_127 108 +#define SYS_COUNT_TX_PMAC_SZ_128_255 109 +#define SYS_COUNT_TX_PMAC_SZ_256_511 110 +#define SYS_COUNT_TX_PMAC_SZ_512_1023 111 +#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112 +#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113 + +#define SYS_COUNT_DR_LOCAL 114 +#define SYS_COUNT_DR_TAIL 115 +#define SYS_COUNT_DR_YELLOW_PRIO_0 116 +#define SYS_COUNT_DR_YELLOW_PRIO_1 117 +#define SYS_COUNT_DR_YELLOW_PRIO_2 118 +#define SYS_COUNT_DR_YELLOW_PRIO_3 119 +#define SYS_COUNT_DR_YELLOW_PRIO_4 120 +#define SYS_COUNT_DR_YELLOW_PRIO_5 121 +#define SYS_COUNT_DR_YELLOW_PRIO_6 122 +#define SYS_COUNT_DR_YELLOW_PRIO_7 123 +#define SYS_COUNT_DR_GREEN_PRIO_0 124 +#define SYS_COUNT_DR_GREEN_PRIO_1 125 +#define SYS_COUNT_DR_GREEN_PRIO_2 126 +#define SYS_COUNT_DR_GREEN_PRIO_3 127 +#define SYS_COUNT_DR_GREEN_PRIO_4 128 +#define SYS_COUNT_DR_GREEN_PRIO_5 129 +#define SYS_COUNT_DR_GREEN_PRIO_6 130 +#define SYS_COUNT_DR_GREEN_PRIO_7 131 + +/* Add a possibly wrapping 32 bit value to a 64 bit counter */ +static void lan966x_add_cnt(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +static void lan966x_stats_update(struct lan966x *lan966x) +{ + int i, j; + + mutex_lock(&lan966x->stats_lock); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + uint idx = i * lan966x->num_stats; + + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i), + lan966x, SYS_STAT_CFG); + + for (j = 0; j < lan966x->num_stats; j++) { + u32 offset = lan966x->stats_layout[j].offset; + + lan966x_add_cnt(&lan966x->stats[idx++], + lan_rd(lan966x, SYS_CNT(offset))); + } + } + + mutex_unlock(&lan966x->stats_lock); +} + +static int lan966x_get_sset_count(struct net_device *dev, int sset) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return lan966x->num_stats; +} + +static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct lan966x_port *port = netdev_priv(netdev); + struct lan966x *lan966x = port->lan966x; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < lan966x->num_stats; i++) + memcpy(data + i * ETH_GSTRING_LEN, + lan966x->stats_layout[i].name, ETH_GSTRING_LEN); +} + +static void lan966x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int i; + + /* check and update now */ + lan966x_stats_update(lan966x); + + /* Copy all counters */ + for (i = 0; i < lan966x->num_stats; i++) + *data++ = lan966x->stats[port->chip_port * + lan966x->num_stats + i]; +} + +static void lan966x_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + mac_stats->FramesTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_UC] + + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->SingleCollisionFrames = + lan966x->stats[idx + SYS_COUNT_TX_COL]; + mac_stats->MultipleCollisionFrames = 0; + mac_stats->FramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_UC] + + lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->FrameCheckSequenceErrors = + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_CRC]; + mac_stats->AlignmentErrors = 0; + mac_stats->OctetsTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + mac_stats->FramesWithDeferredXmissions = + lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD]; + mac_stats->LateCollisions = 0; + mac_stats->FramesAbortedDueToXSColls = 0; + mac_stats->FramesLostDueToIntMACXmitError = 0; + mac_stats->CarrierSenseErrors = 0; + mac_stats->OctetsReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_OCT]; + mac_stats->FramesLostDueToIntMACRcvError = 0; + mac_stats->MulticastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC]; + mac_stats->BroadcastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->FramesWithExcessiveDeferral = 0; + mac_stats->MulticastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_MC]; + mac_stats->BroadcastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->InRangeLengthErrors = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC]; + mac_stats->OutOfRangeLengthField = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + mac_stats->FrameTooLongErrors = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + + mutex_unlock(&lan966x->stats_lock); +} + +static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {} +}; + +static void lan966x_get_eth_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + rmon_stats->undersize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT]; + rmon_stats->oversize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + rmon_stats->fragments = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG]; + rmon_stats->jabbers = + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER]; + rmon_stats->hist[0] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64]; + rmon_stats->hist[1] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127]; + rmon_stats->hist[2] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255]; + rmon_stats->hist[3] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511]; + rmon_stats->hist[4] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023]; + rmon_stats->hist[5] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + rmon_stats->hist[6] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + + rmon_stats->hist_tx[0] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64]; + rmon_stats->hist_tx[1] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127]; + rmon_stats->hist_tx[2] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255]; + rmon_stats->hist_tx[3] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511]; + rmon_stats->hist_tx[4] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023]; + rmon_stats->hist_tx[5] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + rmon_stats->hist_tx[6] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + + mutex_unlock(&lan966x->stats_lock); + + *ranges = lan966x_rmon_ranges; +} + +static int lan966x_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int lan966x_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static void lan966x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int lan966x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int lan966x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_phc *phc; + + if (!lan966x->ptp) + return ethtool_op_get_ts_info(dev, info); + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + + info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +const struct ethtool_ops lan966x_ethtool_ops = { + .get_link_ksettings = lan966x_get_link_ksettings, + .set_link_ksettings = lan966x_set_link_ksettings, + .get_pauseparam = lan966x_get_pauseparam, + .set_pauseparam = lan966x_set_pauseparam, + .get_sset_count = lan966x_get_sset_count, + .get_strings = lan966x_get_strings, + .get_ethtool_stats = lan966x_get_ethtool_stats, + .get_eth_mac_stats = lan966x_get_eth_mac_stats, + .get_rmon_stats = lan966x_get_eth_rmon_stats, + .get_link = ethtool_op_get_link, + .get_ts_info = lan966x_get_ts_info, +}; + +static void lan966x_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct lan966x *lan966x = container_of(del_work, struct lan966x, + stats_work); + + lan966x_stats_update(lan966x); + + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); +} + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + int i; + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT]; + + stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO]; + + stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC]; + + stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_LONG]; + + stats->rx_dropped = dev->stats.rx_dropped + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_DR_LOCAL] + + lan966x->stats[idx + SYS_COUNT_DR_TAIL] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7]; + + for (i = 0; i < LAN966X_NUM_TC; i++) { + stats->rx_dropped += + (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] + + lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]); + } + + /* Get Tx stats */ + stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + + stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO]; + + stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] + + lan966x->stats[idx + SYS_COUNT_TX_AGED]; + + stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL]; + + mutex_unlock(&lan966x->stats_lock); +} + +int lan966x_stats_init(struct lan966x *lan966x) +{ + char queue_name[32]; + + lan966x->stats_layout = lan966x_stats_layout; + lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout); + lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports * + lan966x->num_stats, + sizeof(u64), GFP_KERNEL); + if (!lan966x->stats) + return -ENOMEM; + + /* Init stats worker */ + mutex_init(&lan966x->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(lan966x->dev)); + lan966x->stats_queue = create_singlethread_workqueue(queue_name); + if (!lan966x->stats_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work); + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c new file mode 100644 index 000000000..8310d3f35 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define DWRR_COST_BIT_WIDTH BIT(5) + +static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight) +{ + u32 res; + + /* Round half up: Multiply with 16 before division, + * add 8 and divide result with 16 again + */ + res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4; + return max_t(u32, 1, res) - 1; +} + +int lan966x_ets_add(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + struct tc_ets_qopt_offload_replace_params *params; + struct lan966x *lan966x = port->lan966x; + u32 w_min = 100; + u8 count = 0; + u32 se_idx; + u8 i; + + /* Check the input */ + if (qopt->parent != TC_H_ROOT) + return -EINVAL; + + params = &qopt->replace_params; + if (params->bands != NUM_PRIO_QUEUES) + return -EINVAL; + + for (i = 0; i < params->bands; ++i) { + /* In the switch the DWRR is always on the lowest consecutive + * priorities. Due to this, the first priority must map to the + * first DWRR band. + */ + if (params->priomap[i] != (7 - i)) + return -EINVAL; + + if (params->quanta[i] && params->weights[i] == 0) + return -EINVAL; + } + + se_idx = SE_IDX_PORT + port->chip_port; + + /* Find minimum weight */ + for (i = 0; i < params->bands; ++i) { + if (params->quanta[i] == 0) + continue; + + w_min = min(w_min, params->weights[i]); + } + + for (i = 0; i < params->bands; ++i) { + if (params->quanta[i] == 0) + continue; + + ++count; + + lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]), + lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i)); + } + + lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) | + QSYS_SE_CFG_SE_RR_ENA_SET(0), + QSYS_SE_CFG_SE_DWRR_CNT | + QSYS_SE_CFG_SE_RR_ENA, + lan966x, QSYS_SE_CFG(se_idx)); + + return 0; +} + +int lan966x_ets_del(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + u32 se_idx; + int i; + + se_idx = SE_IDX_PORT + port->chip_port; + + for (i = 0; i < NUM_PRIO_QUEUES; ++i) + lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i)); + + lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) | + QSYS_SE_CFG_SE_RR_ENA_SET(0), + QSYS_SE_CFG_SE_DWRR_CNT | + QSYS_SE_CFG_SE_RR_ENA, + lan966x, QSYS_SE_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c new file mode 100644 index 000000000..2ea263e89 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_fdb_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + struct net_device *orig_dev; + struct lan966x *lan966x; + unsigned long event; +}; + +struct lan966x_fdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u32 references; +}; + +static struct lan966x_fdb_entry * +lan966x_fdb_find_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) + return fdb_entry; + } + + return NULL; +} + +static void lan966x_fdb_add_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info); + if (fdb_entry) { + fdb_entry->references++; + return; + } + + fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); + if (!fdb_entry) + return; + + ether_addr_copy(fdb_entry->mac, fdb_info->addr); + fdb_entry->vid = fdb_info->vid; + fdb_entry->references = 1; + list_add_tail(&fdb_entry->list, &lan966x->fdb_entries); +} + +static bool lan966x_fdb_del_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, + list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) { + fdb_entry->references--; + if (!fdb_entry->references) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + return true; + } + break; + } + } + + return false; +} + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +static void lan966x_fdb_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + } +} + +int lan966x_fdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->fdb_entries); + lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0); + if (!lan966x->fdb_work) + return -ENOMEM; + + return 0; +} + +void lan966x_fdb_deinit(struct lan966x *lan966x) +{ + destroy_workqueue(lan966x->fdb_work); + lan966x_fdb_purge_entries(lan966x); +} + +void lan966x_fdb_flush_workqueue(struct lan966x *lan966x) +{ + flush_workqueue(lan966x->fdb_work); +} + +static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x_port *port; + struct lan966x *lan966x; + + lan966x = fdb_work->lan966x; + port = netdev_priv(fdb_work->orig_dev); + fdb_info = &fdb_work->fdb_info; + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_add_entry(lan966x, port, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_del_entry(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } +} + +static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x *lan966x; + int ret; + + lan966x = fdb_work->lan966x; + fdb_info = &fdb_work->fdb_info; + + /* In case the bridge is called */ + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + /* If there is no front port in this vlan, there is no + * point to copy the frame to CPU because it would be + * just dropped at later point. So add it only if + * there is a port but it is required to store the fdb + * entry for later point when a port actually gets in + * the vlan. + */ + lan966x_fdb_add_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + lan966x_mac_cpu_learn(lan966x, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + ret = lan966x_fdb_del_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + if (ret) + lan966x_mac_cpu_forget(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } +} + +static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work) +{ + struct switchdev_notifier_fdb_info *fdb_info; + struct lan966x_port *port; + struct lan966x *lan966x; + + if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev)) + return; + + lan966x = fdb_work->lan966x; + port = netdev_priv(fdb_work->dev); + fdb_info = &fdb_work->fdb_info; + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_add_entry(lan966x, port, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid); + break; + } +} + +static void lan966x_fdb_event_work(struct work_struct *work) +{ + struct lan966x_fdb_event_work *fdb_work = + container_of(work, struct lan966x_fdb_event_work, work); + + if (lan966x_netdevice_check(fdb_work->orig_dev)) + lan966x_fdb_port_event_work(fdb_work); + else if (netif_is_bridge_master(fdb_work->orig_dev)) + lan966x_fdb_bridge_event_work(fdb_work); + else if (netif_is_lag_master(fdb_work->orig_dev)) + lan966x_fdb_lag_event_work(fdb_work); + + kfree(fdb_work->fdb_info.addr); + kfree(fdb_work); +} + +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_fdb_event_work *fdb_work; + + if (ctx && ctx != port) + return 0; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (lan966x_netdevice_check(orig_dev) && + !fdb_info->added_by_user) + break; + + fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC); + if (!fdb_work) + return -ENOMEM; + + fdb_work->dev = dev; + fdb_work->orig_dev = orig_dev; + fdb_work->lan966x = lan966x; + fdb_work->event = event; + INIT_WORK(&fdb_work->work, lan966x_fdb_event_work); + memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info)); + fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!fdb_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr); + + queue_work(lan966x->fdb_work, &fdb_work->work); + break; + } + + return 0; +err_addr_alloc: + kfree(fdb_work); + return -ENOMEM; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c new file mode 100644 index 000000000..e6948939c --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); +} + +static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, + struct lan966x_db *db) +{ + struct lan966x *lan966x = rx->lan966x; + dma_addr_t dma_addr; + struct page *page; + + page = dev_alloc_pages(rx->page_order); + if (unlikely(!page)) + return NULL; + + dma_addr = dma_map_page(lan966x->dev, page, 0, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(lan966x->dev, dma_addr))) + goto free_page; + + db->dataptr = dma_addr; + + return page; + +free_page: + __free_pages(page, rx->page_order); + return NULL; +} + +static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + int i, j; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + dma_unmap_single(lan966x->dev, + (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + __free_pages(rx->page[i][j], rx->page_order); + } + } +} + +static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, + struct lan966x_rx_dcb *dcb, + u64 nextptr) +{ + struct lan966x_db *db; + int i; + + for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { + db = &dcb->db[i]; + db->status = FDMA_DCB_STATUS_INTR; + } + + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); + + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + struct page *page; + int i, j; + int size; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + + rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); + if (!rx->dcbs) + return -ENOMEM; + + rx->last_entry = rx->dcbs; + rx->db_index = 0; + rx->dcb_index = 0; + + /* Now for each dcb allocate the dbs */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + dcb->info = 0; + + /* For each db allocate a page and map it to the DB dataptr. */ + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (!page) + return -ENOMEM; + + db->status = 0; + rx->page[i][j] = page; + } + + lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); + } + + return 0; +} + +static void lan966x_fdma_rx_free(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 size; + + /* Now it is possible to do the cleanup of dcb */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); +} + +static void lan966x_fdma_rx_start(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP(rx->channel_id)); + lan_wr(upper_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP1(rx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(rx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), + FDMA_PORT_CTRL_XTR_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(rx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(rx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); +} + +static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, + struct lan966x_tx_dcb *dcb) +{ + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = 0; +} + +static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + struct lan966x_tx_dcb *dcb; + struct lan966x_db *db; + int size; + int i, j; + + tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + GFP_KERNEL); + if (!tx->dcbs_buf) + return -ENOMEM; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); + if (!tx->dcbs) + goto out; + + /* Now for each dcb allocate the db */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &tx->dcbs[i]; + + for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + db->dataptr = 0; + db->status = 0; + } + + lan966x_fdma_tx_add_dcb(tx, dcb); + } + + return 0; + +out: + kfree(tx->dcbs_buf); + return -ENOMEM; +} + +static void lan966x_fdma_tx_free(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + int size; + + kfree(tx->dcbs_buf); + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); +} + +static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP(tx->channel_id)); + lan_wr(upper_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP1(tx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(tx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), + FDMA_PORT_CTRL_INJ_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(tx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(tx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); + + tx->activated = false; + tx->last_in_use = -1; +} + +static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + + /* Write the registers to reload the channel */ + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + if (netif_queue_stopped(port->dev)) + netif_wake_queue(port->dev); + } +} + +static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + netif_stop_queue(port->dev); + } +} + +static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) +{ + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_tx_dcb_buf *dcb_buf; + struct lan966x_db *db; + unsigned long flags; + bool clear = false; + int i; + + spin_lock_irqsave(&lan966x->tx_lock, flags); + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + + if (!dcb_buf->used) + continue; + + db = &tx->dcbs[i].db[0]; + if (!(db->status & FDMA_DCB_STATUS_DONE)) + continue; + + dcb_buf->dev->stats.tx_packets++; + dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len; + + dcb_buf->used = false; + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->skb->len, + DMA_TO_DEVICE); + if (!dcb_buf->ptp) + dev_kfree_skb_any(dcb_buf->skb); + + clear = true; + } + + if (clear) + lan966x_fdma_wakeup_netdev(lan966x); + + spin_unlock_irqrestore(&lan966x->tx_lock, flags); +} + +static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) +{ + struct lan966x_db *db; + + /* Check if there is any data */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) + return false; + + return true; +} + +static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u64 src_port, timestamp; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + + /* Get the received frame and unmap it */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + page = rx->page[rx->dcb_index][rx->db_index]; + + dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + if (unlikely(!skb)) + goto unmap_page; + + skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); + + lan966x_ifh_get_src_port(skb->data, &src_port); + lan966x_ifh_get_timestamp(skb->data, ×tamp); + + if (WARN_ON(src_port >= lan966x->num_phys_ports)) + goto free_skb; + + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + + skb->dev = lan966x->ports[src_port]->dev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + return skb; + +free_skb: + kfree_skb(skb); +unmap_page: + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + __free_pages(page, rx->page_order); + + return NULL; +} + +static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan966x *lan966x = container_of(napi, struct lan966x, napi); + struct lan966x_rx *rx = &lan966x->rx; + int dcb_reload = rx->dcb_index; + struct lan966x_rx_dcb *old_dcb; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + int counter = 0; + u64 nextptr; + + lan966x_fdma_tx_clear_buf(lan966x, weight); + + /* Get all received skb */ + while (counter < weight) { + if (!lan966x_fdma_rx_more_frames(rx)) + break; + + skb = lan966x_fdma_rx_get_frame(rx); + + rx->page[rx->dcb_index][rx->db_index] = NULL; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + + if (!skb) + break; + + napi_gro_receive(&lan966x->napi, skb); + counter++; + } + + /* Allocate new pages and map them */ + while (dcb_reload != rx->dcb_index) { + db = &rx->dcbs[dcb_reload].db[rx->db_index]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (unlikely(!page)) + break; + rx->page[dcb_reload][rx->db_index] = page; + + old_dcb = &rx->dcbs[dcb_reload]; + dcb_reload++; + dcb_reload &= FDMA_DCB_MAX - 1; + + nextptr = rx->dma + ((unsigned long)old_dcb - + (unsigned long)rx->dcbs); + lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); + lan966x_fdma_rx_reload(rx); + } + + if (counter < weight && napi_complete_done(napi, counter)) + lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); + + return counter; +} + +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + u32 db, err, err_type; + + db = lan_rd(lan966x, FDMA_INTR_DB); + err = lan_rd(lan966x, FDMA_INTR_ERR); + + if (db) { + lan_wr(0, lan966x, FDMA_INTR_DB_ENA); + lan_wr(db, lan966x, FDMA_INTR_DB); + + napi_schedule(&lan966x->napi); + } + + if (err) { + err_type = lan_rd(lan966x, FDMA_ERRORS); + + WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); + + lan_wr(err, lan966x, FDMA_INTR_ERR); + lan_wr(err_type, lan966x, FDMA_ERRORS); + } + + return IRQ_HANDLED; +} + +static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) +{ + struct lan966x_tx_dcb_buf *dcb_buf; + int i; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + if (!dcb_buf->used && i != tx->last_in_use) + return i; + } + + return -1; +} + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_tx_dcb_buf *next_dcb_buf; + struct lan966x_tx_dcb *next_dcb, *dcb; + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_db *next_db; + int needed_headroom; + int needed_tailroom; + dma_addr_t dma_addr; + int next_to_use; + int err; + + /* Get next index */ + next_to_use = lan966x_fdma_get_next_dcb(tx); + if (next_to_use < 0) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* skb processing */ + needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0); + needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + } + + skb_tx_timestamp(skb); + skb_push(skb, IFH_LEN * sizeof(u32)); + memcpy(skb->data, ifh, IFH_LEN * sizeof(u32)); + skb_put(skb, 4); + + dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(lan966x->dev, dma_addr)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + + /* Setup next dcb */ + next_dcb = &tx->dcbs[next_to_use]; + next_dcb->nextptr = FDMA_DCB_INVALID_DATA; + + next_db = &next_dcb->db[0]; + next_db->dataptr = dma_addr; + next_db->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len); + + /* Fill up the buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + next_dcb_buf->skb = skb; + next_dcb_buf->dma_addr = dma_addr; + next_dcb_buf->used = true; + next_dcb_buf->ptp = false; + next_dcb_buf->dev = dev; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + next_dcb_buf->ptp = true; + + if (likely(lan966x->tx.activated)) { + /* Connect current dcb to the next db */ + dcb = &tx->dcbs[tx->last_in_use]; + dcb->nextptr = tx->dma + (next_to_use * + sizeof(struct lan966x_tx_dcb)); + + lan966x_fdma_tx_reload(tx); + } else { + /* Because it is first time, then just activate */ + lan966x->tx.activated = true; + lan966x_fdma_tx_activate(tx); + } + + /* Move to next dcb because this last in use */ + tx->last_in_use = next_to_use; + + return NETDEV_TX_OK; + +release: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + dev_kfree_skb_any(skb); + return err; +} + +static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) +{ + int max_mtu = 0; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + struct lan966x_port *port; + int mtu; + + port = lan966x->ports[i]; + if (!port) + continue; + + mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + if (mtu > max_mtu) + max_mtu = mtu; + } + + return max_mtu; +} + +static int lan966x_qsys_sw_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); +} + +static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) +{ + dma_addr_t rx_dma; + void *rx_dcbs; + u32 size; + int err; + + /* Store these for later to free them */ + rx_dma = lan966x->rx.dma; + rx_dcbs = lan966x->rx.dcbs; + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + lan966x_fdma_stop_netdev(lan966x); + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + goto restore; + lan966x_fdma_rx_start(&lan966x->rx); + + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + + lan966x_fdma_wakeup_netdev(lan966x); + napi_enable(&lan966x->napi); + + return err; +restore: + lan966x->rx.dma = rx_dma; + lan966x->rx.dcbs = rx_dcbs; + lan966x_fdma_rx_start(&lan966x->rx); + + return err; +} + +int lan966x_fdma_change_mtu(struct lan966x *lan966x) +{ + int max_mtu; + int err; + u32 val; + + max_mtu = lan966x_fdma_get_max_mtu(lan966x); + max_mtu += IFH_LEN * sizeof(u32); + max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + max_mtu += VLAN_HLEN * 2; + + if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == + lan966x->rx.page_order) + return 0; + + /* Disable the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Flush the CPU queues */ + readx_poll_timeout(lan966x_qsys_sw_status, lan966x, + val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), + READL_SLEEP_US, READL_TIMEOUT_US); + + /* Add a sleep in case there are frames between the queues and the CPU + * port + */ + usleep_range(1000, 2000); + + err = lan966x_fdma_reload(lan966x, max_mtu); + + /* Enable back the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + return err; +} + +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev) + return; + + lan966x->fdma_ndev = dev; + netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll); + napi_enable(&lan966x->napi); +} + +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev == dev) { + netif_napi_del(&lan966x->napi); + lan966x->fdma_ndev = NULL; + } +} + +int lan966x_fdma_init(struct lan966x *lan966x) +{ + int err; + + if (!lan966x->fdma) + return 0; + + lan966x->rx.lan966x = lan966x; + lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->tx.lan966x = lan966x; + lan966x->tx.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.last_in_use = -1; + + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + return err; + + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) { + lan966x_fdma_rx_free(&lan966x->rx); + return err; + } + + lan966x_fdma_rx_start(&lan966x->rx); + + return 0; +} + +void lan966x_fdma_deinit(struct lan966x *lan966x) +{ + if (!lan966x->fdma) + return; + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_tx_disable(&lan966x->tx); + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x_fdma_rx_free(&lan966x->rx); + lan966x_fdma_tx_free(&lan966x->tx); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h new file mode 100644 index 000000000..ca3314789 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_IFH_H__ +#define __LAN966X_IFH_H__ + +/* Fields with description (*) should just be cleared upon injection + * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte) + */ + +#define IFH_LEN 7 + +/* Timestamp for frame */ +#define IFH_POS_TIMESTAMP 192 + +/* Bypass analyzer with a prefilled IFH */ +#define IFH_POS_BYPASS 191 + +/* Masqueraded injection with masq_port defining logical source port */ +#define IFH_POS_MASQ 190 + +/* Masqueraded port number for injection */ +#define IFH_POS_MASQ_PORT 186 + +/* Frame length (*) */ +#define IFH_POS_LEN 178 + +/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */ +#define IFH_POS_WRDMODE 176 + +/* Frame has 16 bits rtag removed compared to line data */ +#define IFH_POS_RTAG48 175 + +/* Frame has a redundancy tag */ +#define IFH_POS_HAS_RED_TAG 174 + +/* Frame has been cut through forwarded (*) */ +#define IFH_POS_CUTTHRU 173 + +/* Rewriter command */ +#define IFH_POS_REW_CMD 163 + +/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */ +#define IFH_POS_REW_OAM 162 + +/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN, + * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM). + */ +#define IFH_POS_PDU_TYPE 158 + +/* Update FCS before transmission */ +#define IFH_POS_FCS_UPD 157 + +/* Classified DSCP value of frame */ +#define IFH_POS_DSCP 151 + +/* Yellow indication */ +#define IFH_POS_DP 150 + +/* Process in RTE/inbound */ +#define IFH_POS_RTE_INB_UPDATE 149 + +/* Number of tags to pop from frame */ +#define IFH_POS_POP_CNT 147 + +/* Number of tags in front of the ethertype */ +#define IFH_POS_ETYPE_OFS 145 + +/* Logical source port of frame (*) */ +#define IFH_POS_SRCPORT 141 + +/* Sequence number in redundancy tag */ +#define IFH_POS_SEQ_NUM 120 + +/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */ +#define IFH_POS_TCI 103 + +/* Classified internal priority for queuing */ +#define IFH_POS_QOS_CLASS 100 + +/* Bit mask with eight cpu copy classses */ +#define IFH_POS_CPUQ 92 + +/* Relearn + learn flags (*) */ +#define IFH_POS_LEARN_FLAGS 90 + +/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */ +#define IFH_POS_SFLOW_ID 86 + +/* Set if an ACL/S2 rule was hit (*). + * Super priority: acl_hit=0 and acl_hit(4)=1. + */ +#define IFH_POS_ACL_HIT 85 + +/* S2 rule index hit (*) */ +#define IFH_POS_ACL_IDX 79 + +/* ISDX as classified by S1 */ +#define IFH_POS_ISDX 71 + +/* Destination ports for frame */ +#define IFH_POS_DSTS 62 + +/* Storm policer to be applied: None/Uni/Multi/Broad (*) */ +#define IFH_POS_FLOOD 60 + +/* Redundancy tag operation */ +#define IFH_POS_SEQ_OP 58 + +/* Classified internal priority for resourcemgt, tagging etc */ +#define IFH_POS_IPV 55 + +/* Frame is for AFI use */ +#define IFH_POS_AFI 54 + +/* Internal aging value (*) */ +#define IFH_POS_AGED 52 + +/* RTP Identifier */ +#define IFH_POS_RTP_ID 42 + +/* RTP MRPD flow */ +#define IFH_POS_RTP_SUBID 41 + +/* Profinet DataStatus or opcua GroupVersion MSB */ +#define IFH_POS_PN_DATA_STATUS 33 + +/* Profinet transfer status (1 iff the status is 0) */ +#define IFH_POS_PN_TRANSF_STATUS_ZERO 32 + +/* Profinet cycle counter or opcua NetworkMessageNumber */ +#define IFH_POS_PN_CC 16 + +#define IFH_WID_TIMESTAMP 32 +#define IFH_WID_BYPASS 1 +#define IFH_WID_MASQ 1 +#define IFH_WID_MASQ_PORT 4 +#define IFH_WID_LEN 14 +#define IFH_WID_WRDMODE 2 +#define IFH_WID_RTAG48 1 +#define IFH_WID_HAS_RED_TAG 1 +#define IFH_WID_CUTTHRU 1 +#define IFH_WID_REW_CMD 10 +#define IFH_WID_REW_OAM 1 +#define IFH_WID_PDU_TYPE 4 +#define IFH_WID_FCS_UPD 1 +#define IFH_WID_DSCP 6 +#define IFH_WID_DP 1 +#define IFH_WID_RTE_INB_UPDATE 1 +#define IFH_WID_POP_CNT 2 +#define IFH_WID_ETYPE_OFS 2 +#define IFH_WID_SRCPORT 4 +#define IFH_WID_SEQ_NUM 16 +#define IFH_WID_TCI 17 +#define IFH_WID_QOS_CLASS 3 +#define IFH_WID_CPUQ 8 +#define IFH_WID_LEARN_FLAGS 2 +#define IFH_WID_SFLOW_ID 4 +#define IFH_WID_ACL_HIT 1 +#define IFH_WID_ACL_IDX 6 +#define IFH_WID_ISDX 8 +#define IFH_WID_DSTS 9 +#define IFH_WID_FLOOD 2 +#define IFH_WID_SEQ_OP 2 +#define IFH_WID_IPV 3 +#define IFH_WID_AFI 1 +#define IFH_WID_AGED 2 +#define IFH_WID_RTP_ID 10 +#define IFH_WID_RTP_SUBID 1 +#define IFH_WID_PN_DATA_STATUS 8 +#define IFH_WID_PN_TRANSF_STATUS_ZERO 1 +#define IFH_WID_PN_CC 16 + +#endif /* __LAN966X_IFH_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c new file mode 100644 index 000000000..41fa2523d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/if_bridge.h> + +#include "lan966x_main.h" + +static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) +{ + u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0); + int p, lag, i; + + /* Reset destination and aggregation PGIDS */ + for (p = 0; p < lan966x->num_phys_ports; ++p) + lan_wr(ANA_PGID_PGID_SET(BIT(p)), + lan966x, ANA_PGID(p)); + + for (p = PGID_AGGR; p < PGID_SRC; ++p) + lan_wr(ANA_PGID_PGID_SET(visited), + lan966x, ANA_PGID(p)); + + /* The visited ports bitmask holds the list of ports offloading any + * bonding interface. Initially we mark all these ports as unvisited, + * then every time we visit a port in this bitmask, we know that it is + * the lowest numbered port, i.e. the one whose logical ID == physical + * port ID == LAG ID. So we mark as visited all further ports in the + * bitmask that are offloading the same bonding interface. This way, + * we set up the aggregation PGIDs only once per bonding interface. + */ + for (p = 0; p < lan966x->num_phys_ports; ++p) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port || !port->bond) + continue; + + visited &= ~BIT(p); + } + + /* Now, set PGIDs for each active LAG */ + for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { + struct net_device *bond = lan966x->ports[lag]->bond; + int num_active_ports = 0; + unsigned long bond_mask; + u8 aggr_idx[16]; + + if (!bond || (visited & BIT(lag))) + continue; + + bond_mask = lan966x_lag_get_mask(lan966x, bond); + + for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { + struct lan966x_port *port = lan966x->ports[p]; + + lan_wr(ANA_PGID_PGID_SET(bond_mask), + lan966x, ANA_PGID(p)); + if (port->lag_tx_active) + aggr_idx[num_active_ports++] = p; + } + + for (i = PGID_AGGR; i < PGID_SRC; ++i) { + u32 ac; + + ac = lan_rd(lan966x, ANA_PGID(i)); + ac &= ~bond_mask; + /* Don't do division by zero if there was no active + * port. Just make all aggregation codes zero. + */ + if (num_active_ports) + ac |= BIT(aggr_idx[i % num_active_ports]); + lan_wr(ANA_PGID_PGID_SET(ac), + lan966x, ANA_PGID(i)); + } + + /* Mark all ports in the same LAG as visited to avoid applying + * the same config again. + */ + for (p = lag; p < lan966x->num_phys_ports; p++) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port) + continue; + + if (port->bond == bond) + visited |= BIT(p); + } + } +} + +static void lan966x_lag_set_port_ids(struct lan966x *lan966x) +{ + struct lan966x_port *port; + u32 bond_mask; + u32 lag_id; + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + port = lan966x->ports[p]; + if (!port) + continue; + + lag_id = port->chip_port; + + bond_mask = lan966x_lag_get_mask(lan966x, port->bond); + if (bond_mask) + lag_id = __ffs(bond_mask); + + lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id), + ANA_PORT_CFG_PORTID_VAL, + lan966x, ANA_PORT_CFG(port->chip_port)); + } +} + +static void lan966x_lag_update_ids(struct lan966x *lan966x) +{ + lan966x_lag_set_port_ids(lan966x); + lan966x_update_fwd_mask(lan966x); + lan966x_lag_set_aggr_pgids(lan966x); +} + +int lan966x_lag_port_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bond, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct net_device *dev = port->dev; + u32 lag_id = -1; + u32 bond_mask; + int err; + + bond_mask = lan966x_lag_get_mask(lan966x, bond); + if (bond_mask) + lag_id = __ffs(bond_mask); + + port->bond = bond; + lan966x_lag_update_ids(lan966x); + + err = switchdev_bridge_port_offload(brport_dev, dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb, + false, extack); + if (err) + goto out; + + lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev)); + + if (lan966x_lag_first_port(port->bond, port->dev) && + lag_id != -1) + lan966x_mac_lag_replace_port_entry(lan966x, + lan966x->ports[lag_id], + port); + + return 0; + +out: + port->bond = NULL; + lan966x_lag_update_ids(lan966x); + + return err; +} + +void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond) +{ + struct lan966x *lan966x = port->lan966x; + u32 bond_mask; + u32 lag_id; + + if (lan966x_lag_first_port(port->bond, port->dev)) { + bond_mask = lan966x_lag_get_mask(lan966x, port->bond); + bond_mask &= ~BIT(port->chip_port); + if (bond_mask) { + lag_id = __ffs(bond_mask); + lan966x_mac_lag_replace_port_entry(lan966x, port, + lan966x->ports[lag_id]); + } else { + lan966x_mac_lag_remove_port_entry(lan966x, port); + } + } + + port->bond = NULL; + lan966x_lag_update_ids(lan966x); + lan966x_port_stp_state_set(port, BR_STATE_FORWARDING); +} + +static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x, + enum netdev_lag_hash hash_type) +{ + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + struct lan966x_port *port = lan966x->ports[p]; + + if (!port || !port->bond) + continue; + + if (port->hash_type != hash_type) + return false; + } + + return true; +} + +int lan966x_lag_port_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct netdev_lag_upper_info *lui; + struct netlink_ext_ack *extack; + + extack = netdev_notifier_info_to_extack(&info->info); + lui = info->upper_info; + if (!lui) { + port->hash_type = NETDEV_LAG_HASH_NONE; + return NOTIFY_DONE; + } + + if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(extack, + "LAG device using unsupported Tx type"); + return -EINVAL; + } + + if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG devices can have only the same hash_type"); + return -EINVAL; + } + + switch (lui->hash_type) { + case NETDEV_LAG_HASH_L2: + lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_SMAC_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + case NETDEV_LAG_HASH_L34: + lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + case NETDEV_LAG_HASH_L23: + lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1), + lan966x, ANA_AGGR_CFG); + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "LAG device using unsupported hash type"); + return -EINVAL; + } + + port->hash_type = lui->hash_type; + + return NOTIFY_OK; +} + +int lan966x_lag_port_changelowerstate(struct net_device *dev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag = info->lower_state_info; + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + bool is_active; + + if (!port->bond) + return NOTIFY_DONE; + + is_active = lag->link_up && lag->tx_enabled; + if (port->lag_tx_active == is_active) + return NOTIFY_DONE; + + port->lag_tx_active = is_active; + lan966x_lag_set_aggr_pgids(lan966x); + + return NOTIFY_OK; +} + +int lan966x_lag_netdev_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port; + struct net_device *lower; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!lan966x_netdevice_check(lower)) + continue; + + port = netdev_priv(lower); + if (port->bond != dev) + continue; + + err = lan966x_port_prechangeupper(lower, dev, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +int lan966x_lag_netdev_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port; + struct net_device *lower; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!lan966x_netdevice_check(lower)) + continue; + + port = netdev_priv(lower); + if (port->bond != dev) + continue; + + err = lan966x_port_changeupper(lower, dev, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + unsigned long bond_mask; + + if (port->bond != lag) + return false; + + bond_mask = lan966x_lag_get_mask(lan966x, lag); + if (bond_mask && port->chip_port == __ffs(bond_mask)) + return true; + + return false; +} + +u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond) +{ + struct lan966x_port *port; + u32 mask = 0; + int p; + + if (!bond) + return mask; + + for (p = 0; p < lan966x->num_phys_ports; p++) { + port = lan966x->ports[p]; + if (!port) + continue; + + if (port->bond == bond) + mask |= BIT(p); + } + + return mask; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c new file mode 100644 index 000000000..baa3a30c0 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> +#include "lan966x_main.h" + +#define LAN966X_MAC_COLUMNS 4 +#define MACACCESS_CMD_IDLE 0 +#define MACACCESS_CMD_LEARN 1 +#define MACACCESS_CMD_FORGET 2 +#define MACACCESS_CMD_AGE 3 +#define MACACCESS_CMD_GET_NEXT 4 +#define MACACCESS_CMD_INIT 5 +#define MACACCESS_CMD_READ 6 +#define MACACCESS_CMD_WRITE 7 +#define MACACCESS_CMD_SYNC_GET_NEXT 8 + +#define LAN966X_MAC_INVALID_ROW -1 + +struct lan966x_mac_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u16 port_index; + int row; + bool lag; +}; + +struct lan966x_mac_raw_entry { + u32 mach; + u32 macl; + u32 maca; + bool processed; +}; + +static int lan966x_mac_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_MACACCESS); +} + +static int lan966x_mac_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout_atomic(lan966x_mac_get_status, + lan966x, val, + (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) == + MACACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, + TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_mac_select(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + lan_wr(macl, lan966x, ANA_MACLDATA); + lan_wr(mach, lan966x, ANA_MACHDATA); +} + +static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lockdep_assert_held(&lan966x->mac_lock); + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a write command */ + lan_wr(ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_CHANGE2SW_SET(0) | + ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) | + ANA_MACACCESS_DEST_IDX_SET(pgid) | + ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + +/* The mask of the front ports is encoded inside the mac parameter via a call + * to lan966x_mdb_encode_mac(). + */ +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6); + + return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type); +} + +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED); + + return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); +} + +static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED); + + return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type); +} + +static int lan966x_mac_forget_locked(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lockdep_assert_held(&lan966x->mac_lock); + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a forget command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = lan966x_mac_forget_locked(lan966x, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); +} + +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED); +} + +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing) +{ + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2), + ANA_AUTOAGE_AGE_PERIOD, + lan966x, ANA_AUTOAGE); +} + +void lan966x_mac_init(struct lan966x *lan966x) +{ + /* Clear the MAC table */ + lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + spin_lock_init(&lan966x->mac_lock); + INIT_LIST_HEAD(&lan966x->mac_entries); +} + +static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port, + const unsigned char *mac, + u16 vid) +{ + struct lan966x_mac_entry *mac_entry; + + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); + if (!mac_entry) + return NULL; + + memcpy(mac_entry->mac, mac, ETH_ALEN); + mac_entry->vid = vid; + mac_entry->port_index = port->chip_port; + mac_entry->row = LAN966X_MAC_INVALID_ROW; + mac_entry->lag = port->bond ? true : false; + return mac_entry; +} + +static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct lan966x_mac_entry *res = NULL; + struct lan966x_mac_entry *mac_entry; + + list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { + if (mac_entry->vid == vid && + ether_addr_equal(mac, mac_entry->mac) && + mac_entry->port_index == port_index) { + res = mac_entry; + break; + } + } + + return res; +} + +static int lan966x_mac_lookup(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + int ret; + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a read command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ), + lan966x, ANA_MACACCESS); + + ret = lan966x_mac_wait_for_completion(lan966x); + if (ret) + return ret; + + return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS)); +} + +static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type, + const char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info = { 0 }; + + info.addr = mac; + info.vid = vid; + info.offloaded = true; + call_switchdev_notifiers(type, dev, &info.info, NULL); +} + +int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, + const unsigned char *addr, u16 vid) +{ + struct lan966x_mac_entry *mac_entry; + + spin_lock(&lan966x->mac_lock); + if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) { + spin_unlock(&lan966x->mac_lock); + return 0; + } + + /* In case the entry already exists, don't add it again to SW, + * just update HW, but we need to look in the actual HW because + * it is possible for an entry to be learn by HW and before we + * get the interrupt the frame will reach CPU and the CPU will + * add the entry but without the extern_learn flag. + */ + mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + goto mac_learn; + } + + mac_entry = lan966x_mac_alloc_entry(port, addr, vid); + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); + return -ENOMEM; + } + + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, + port->bond ?: port->dev); + +mac_learn: + lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); + + return 0; +} + +int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, + u16 vid) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + if (mac_entry->vid == vid && + ether_addr_equal(addr, mac_entry->mac)) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); + + return 0; +} + +void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x, + struct lan966x_port *src, + struct lan966x_port *dst) +{ + struct lan966x_mac_entry *mac_entry; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { + if (mac_entry->port_index == src->chip_port && + mac_entry->lag) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + lan966x_mac_learn_locked(lan966x, dst->chip_port, + mac_entry->mac, mac_entry->vid, + ENTRYTYPE_LOCKED); + mac_entry->port_index = dst->chip_port; + } + } + spin_unlock(&lan966x->mac_lock); +} + +void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x, + struct lan966x_port *src) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + if (mac_entry->port_index == src->chip_port && + mac_entry->lag) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); +} + +void lan966x_mac_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + spin_unlock(&lan966x->mac_lock); +} + +static void lan966x_mac_notifiers(enum switchdev_notifier_type type, + unsigned char *mac, u32 vid, + struct net_device *dev) +{ + rtnl_lock(); + lan966x_fdb_call_notifiers(type, mac, vid, dev); + rtnl_unlock(); +} + +static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry, + u8 *mac, u16 *vid, u32 *dest_idx) +{ + mac[0] = (raw_entry->mach >> 8) & 0xff; + mac[1] = (raw_entry->mach >> 0) & 0xff; + mac[2] = (raw_entry->macl >> 24) & 0xff; + mac[3] = (raw_entry->macl >> 16) & 0xff; + mac[4] = (raw_entry->macl >> 8) & 0xff; + mac[5] = (raw_entry->macl >> 0) & 0xff; + + *vid = (raw_entry->mach >> 16) & 0xfff; + *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca); +} + +static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, + struct lan966x_mac_raw_entry *raw_entries) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + unsigned char mac[ETH_ALEN] __aligned(2); + struct list_head mac_deleted_entries; + struct lan966x_port *port; + u32 dest_idx; + u32 column; + u16 vid; + + INIT_LIST_HEAD(&mac_deleted_entries); + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { + bool found = false; + + if (mac_entry->row != row) + continue; + + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, + * so when get one invalid entry it can just skip the + * rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; + + /* If the entry in SW is found, then there is nothing + * to do + */ + if (mac_entry->vid == vid && + ether_addr_equal(mac_entry->mac, mac) && + mac_entry->port_index == dest_idx) { + raw_entries[column].processed = true; + found = true; + break; + } + } + + if (!found) { + list_del(&mac_entry->list); + /* Move the entry from SW list to a tmp list such that + * it would be deleted later + */ + list_add_tail(&mac_entry->list, &mac_deleted_entries); + } + } + spin_unlock(&lan966x->mac_lock); + + list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) { + /* Notify the bridge that the entry doesn't exist + * anymore in the HW + */ + port = lan966x->ports[mac_entry->port_index]; + lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mac_entry->mac, mac_entry->vid, + port->bond ?: port->dev); + list_del(&mac_entry->list); + kfree(mac_entry); + } + + /* Now go to the list of columns and see if any entry was not in the SW + * list, then that means that the entry is new so it needs to notify the + * bridge. + */ + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, so when + * get one invalid entry it can just skip the rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + /* If the entry already exists then don't do anything */ + if (raw_entries[column].processed) + continue; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; + + spin_lock(&lan966x->mac_lock); + mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + continue; + } + + port = lan966x->ports[dest_idx]; + mac_entry = lan966x_mac_alloc_entry(port, mac, vid); + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); + return; + } + + mac_entry->row = row; + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + mac, vid, port->bond ?: port->dev); + } +} + +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) +{ + struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 }; + u32 index, column; + bool stop = true; + u32 val; + + /* Start the scan from 0, 0 */ + lan_wr(ANA_MACTINDX_M_INDEX_SET(0) | + ANA_MACTINDX_BUCKET_SET(0), + lan966x, ANA_MACTINDX); + + while (1) { + spin_lock(&lan966x->mac_lock); + lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), + ANA_MACACCESS_MAC_TABLE_CMD, + lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + val = lan_rd(lan966x, ANA_MACTINDX); + index = ANA_MACTINDX_M_INDEX_GET(val); + column = ANA_MACTINDX_BUCKET_GET(val); + + /* The SYNC-GET-NEXT returns all the entries(4) in a row in + * which is suffered a change. By change it means that new entry + * was added or an entry was removed because of ageing. + * It would return all the columns for that row. And after that + * it would return the next row The stop conditions of the + * SYNC-GET-NEXT is when it reaches 'directly' to row 0 + * column 3. So if SYNC-GET-NEXT returns row 0 and column 0 + * then it is required to continue to read more even if it + * reaches row 0 and column 3. + */ + if (index == 0 && column == 0) + stop = false; + + if (column == LAN966X_MAC_COLUMNS - 1 && + index == 0 && stop) { + spin_unlock(&lan966x->mac_lock); + break; + } + + entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); + entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); + entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); + spin_unlock(&lan966x->mac_lock); + + /* Once all the columns are read process them */ + if (column == LAN966X_MAC_COLUMNS - 1) { + lan966x_mac_irq_process(lan966x, index, entry); + /* A row was processed so it is safe to assume that the + * next row/column can be the stop condition + */ + stop = true; + } + } + + lan_rmw(ANA_ANAINTR_INTR_SET(0), + ANA_ANAINTR_INTR, + lan966x, ANA_ANAINTR); + + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c new file mode 100644 index 000000000..9ce46588a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -0,0 +1,1244 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/ip.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/packing.h> +#include <linux/phy/phy.h> +#include <linux/reset.h> +#include <net/addrconf.h> + +#include "lan966x_main.h" + +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define IO_RANGES 2 + +static const struct of_device_id lan966x_match[] = { + { .compatible = "microchip,lan966x-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, lan966x_match); + +struct lan966x_main_io_resource { + enum lan966x_target id; + phys_addr_t offset; + int range; +}; + +static const struct lan966x_main_io_resource lan966x_main_iomap[] = { + { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */ + { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */ + { TARGET_ORG, 0, 1 }, /* 0xe2000000 */ + { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */ + { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */ + { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */ + { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */ + { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */ + { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */ + { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */ + { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */ + { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */ + { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */ + { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */ + { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */ + { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */ + { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */ + { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */ + { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */ + { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */ +}; + +static int lan966x_create_targets(struct platform_device *pdev, + struct lan966x *lan966x) +{ + struct resource *iores[IO_RANGES]; + void __iomem *begin[IO_RANGES]; + int idx; + + /* Initially map the entire range and after that update each target to + * point inside the region at the correct offset. It is possible that + * other devices access the same region so don't add any checks about + * this. + */ + for (idx = 0; idx < IO_RANGES; idx++) { + iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM, + idx); + if (!iores[idx]) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + + begin[idx] = devm_ioremap(&pdev->dev, + iores[idx]->start, + resource_size(iores[idx])); + if (!begin[idx]) { + dev_err(&pdev->dev, "Unable to get registers: %s\n", + iores[idx]->name); + return -ENOMEM; + } + } + + for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) { + const struct lan966x_main_io_resource *iomap = + &lan966x_main_iomap[idx]; + + lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset; + } + + return 0; +} + +static bool lan966x_port_unique_address(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + port = lan966x->ports[p]; + if (!port || port->dev == dev) + continue; + + if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr)) + return false; + } + + return true; +} + +static int lan966x_port_set_mac_address(struct net_device *dev, void *p) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + const struct sockaddr *addr = p; + int ret; + + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + + /* Learn the new net device MAC address in the mac table. */ + ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID); + if (ret) + return ret; + + /* If there is another port with the same address as the dev, then don't + * delete it from the MAC table + */ + if (!lan966x_port_unique_address(dev)) + goto out; + + /* Then forget the previous one. */ + ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID); + if (ret) + return ret; + +out: + eth_hw_addr_set(dev, addr->sa_data); + return ret; +} + +static int lan966x_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct lan966x_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->chip_port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int lan966x_port_open(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int err; + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) | + ANA_PORT_CFG_RECV_ENA_SET(1) | + ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port), + ANA_PORT_CFG_LEARNAUTO | + ANA_PORT_CFG_RECV_ENA | + ANA_PORT_CFG_PORTID_VAL, + lan966x, ANA_PORT_CFG(port->chip_port)); + + err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + phylink_start(port->phylink); + + return 0; +} + +static int lan966x_port_stop(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + + lan966x_port_config_down(port); + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); + + return 0; +} + +static int lan966x_port_inj_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QS_INJ_STATUS); +} + +static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp))) + return 0; + + return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val, + QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp), + READL_SLEEP_US, READL_TIMEOUT_US); +} + +static int lan966x_port_ifh_xmit(struct sk_buff *skb, + __be32 *ifh, + struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 i, count, last; + u8 grp = 0; + u32 val; + int err; + + val = lan_rd(lan966x, QS_INJ_STATUS); + if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) || + (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp))) + goto err; + + /* Write start of frame */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_SOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Write IFH header */ + for (i = 0; i < IFH_LEN; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp)); + } + + /* Write frame */ + count = DIV_ROUND_UP(skb->len, 4); + last = skb->len % 4; + for (i = 0; i < count; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp)); + } + + /* Add padding */ + while (i < (LAN966X_BUFFER_MIN_SZ / 4)) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + goto err; + + lan_wr(0, lan966x, QS_INJ_WR(grp)); + ++i; + } + + /* Inidcate EOF and valid bytes in the last word */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ? + 0 : last) | + QS_INJ_CTRL_EOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Add dummy CRC */ + lan_wr(0, lan966x, QS_INJ_WR(grp)); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return NETDEV_TX_OK; + + dev_consume_skb_any(skb); + return NETDEV_TX_OK; + +err: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + return NETDEV_TX_BUSY; +} + +static void lan966x_ifh_set_bypass(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1, + IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_port(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1, + IFH_POS_DSTS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1, + IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_ipv(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1, + IFH_POS_IPV, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_vid(void *ifh, u64 vid) +{ + packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1, + IFH_POS_TCI, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op) +{ + packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1, + IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) +{ + packing(ifh, ×tamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, + IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0); +} + +static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + __be32 ifh[IFH_LEN]; + int err; + + memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); + + lan966x_ifh_set_bypass(ifh, 1); + lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); + lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + + if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + err = lan966x_ptp_txtstamp_request(port, skb); + if (err) + return err; + + lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op); + lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); + } + + spin_lock(&lan966x->tx_lock); + if (port->lan966x->fdma) + err = lan966x_fdma_xmit(skb, ifh, dev); + else + err = lan966x_port_ifh_xmit(skb, ifh, dev); + spin_unlock(&lan966x->tx_lock); + + return err; +} + +static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int old_mtu = dev->mtu; + int err; + + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = new_mtu; + + if (!lan966x->fdma) + return 0; + + err = lan966x_fdma_change_mtu(lan966x); + if (err) { + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = old_mtu; + } + + return err; +} + +static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED); +} + +static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID); +} + +static void lan966x_port_set_rx_mode(struct net_device *dev) +{ + __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync); +} + +static int lan966x_port_get_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + ppid->id_len = sizeof(lan966x->base_mac); + memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len); + + return 0; +} + +static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct lan966x_port *port = netdev_priv(dev); + + if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return lan966x_ptp_hwtstamp_set(port, ifr); + case SIOCGHWTSTAMP: + return lan966x_ptp_hwtstamp_get(port, ifr); + } + } + + if (!dev->phydev) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static const struct net_device_ops lan966x_port_netdev_ops = { + .ndo_open = lan966x_port_open, + .ndo_stop = lan966x_port_stop, + .ndo_start_xmit = lan966x_port_xmit, + .ndo_change_mtu = lan966x_port_change_mtu, + .ndo_set_rx_mode = lan966x_port_set_rx_mode, + .ndo_get_phys_port_name = lan966x_port_get_phys_port_name, + .ndo_get_stats64 = lan966x_stats_get, + .ndo_set_mac_address = lan966x_port_set_mac_address, + .ndo_get_port_parent_id = lan966x_port_get_parent_id, + .ndo_eth_ioctl = lan966x_port_ioctl, + .ndo_setup_tc = lan966x_tc_setup, +}; + +bool lan966x_netdevice_check(const struct net_device *dev) +{ + return dev->netdev_ops == &lan966x_port_netdev_ops; +} + +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb) +{ + u32 val; + + /* The IGMP and MLD frames are not forward by the HW if + * multicast snooping is enabled, therefor don't mark as + * offload to allow the SW to forward the frames accordingly. + */ + val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port)); + if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA))) + return true; + + if (eth_type_vlan(skb->protocol)) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + return false; + } + + if (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_IGMP) + return false; + + if (IS_ENABLED(CONFIG_IPV6) && + skb->protocol == htons(ETH_P_IPV6) && + ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) && + !ipv6_mc_check_mld(skb)) + return false; + + return true; +} + +static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp) +{ + return lan_rd(lan966x, QS_XTR_RD(grp)); +} + +static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + return read_poll_timeout(lan966x_port_xtr_status, val, + val != XTR_NOT_READY, + READL_SLEEP_US, READL_TIMEOUT_US, false, + lan966x, grp); +} + +static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval) +{ + u32 bytes_valid; + u32 val; + int err; + + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_NOT_READY) { + err = lan966x_port_xtr_ready(lan966x, grp); + if (err) + return -EIO; + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_ESCAPE) + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + + return 4; + default: + *rval = val; + + return 4; + } +} + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) +{ + packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1, + IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0); +} + +static void lan966x_ifh_get_len(void *ifh, u64 *len) +{ + packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1, + IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0); +} + +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) +{ + packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, + IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0); +} + +static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + int i, grp = 0, err = 0; + + if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp))) + return IRQ_NONE; + + do { + u64 src_port, len, timestamp; + struct net_device *dev; + struct sk_buff *skb; + int sz = 0, buf_len; + u32 ifh[IFH_LEN]; + u32 *buf; + u32 val; + + for (i = 0; i < IFH_LEN; i++) { + err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]); + if (err != 4) + goto recover; + } + + err = 0; + + lan966x_ifh_get_src_port(ifh, &src_port); + lan966x_ifh_get_len(ifh, &len); + lan966x_ifh_get_timestamp(ifh, ×tamp); + + WARN_ON(src_port >= lan966x->num_phys_ports); + + dev = lan966x->ports[src_port]->dev; + skb = netdev_alloc_skb(dev, len); + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; + break; + } + buf_len = len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); + + len = 0; + do { + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + *buf++ = val; + len += sz; + } while (len < buf_len); + + /* Read the FCS */ + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; + + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + +recover: + if (sz < 0 || err) + lan_rd(lan966x, QS_XTR_RD(grp)); + + } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)); + + return IRQ_HANDLED; +} + +static irqreturn_t lan966x_ana_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + + return lan966x_mac_irq_handler(lan966x); +} + +static void lan966x_cleanup_ports(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int p; + + for (p = 0; p < lan966x->num_phys_ports; p++) { + port = lan966x->ports[p]; + if (!port) + continue; + + if (port->dev) + unregister_netdev(port->dev); + + if (lan966x->fdma && lan966x->fdma_ndev == port->dev) + lan966x_fdma_netdev_deinit(lan966x, port->dev); + + if (port->phylink) { + rtnl_lock(); + lan966x_port_stop(port->dev); + rtnl_unlock(); + phylink_destroy(port->phylink); + port->phylink = NULL; + } + + if (port->fwnode) + fwnode_handle_put(port->fwnode); + } + + disable_irq(lan966x->xtr_irq); + lan966x->xtr_irq = -ENXIO; + + if (lan966x->ana_irq > 0) { + disable_irq(lan966x->ana_irq); + lan966x->ana_irq = -ENXIO; + } + + if (lan966x->fdma) + devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x); + + if (lan966x->ptp_irq > 0) + devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x); + + if (lan966x->ptp_ext_irq > 0) + devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x); +} + +static int lan966x_probe_port(struct lan966x *lan966x, u32 p, + phy_interface_t phy_mode, + struct fwnode_handle *portnp) +{ + struct lan966x_port *port; + struct phylink *phylink; + struct net_device *dev; + int err; + + if (p >= lan966x->num_phys_ports) + return -EINVAL; + + dev = devm_alloc_etherdev_mqs(lan966x->dev, + sizeof(struct lan966x_port), + NUM_PRIO_QUEUES, 1); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, lan966x->dev); + port = netdev_priv(dev); + port->dev = dev; + port->lan966x = lan966x; + port->chip_port = p; + lan966x->ports[p] = port; + + dev->max_mtu = ETH_MAX_MTU; + + dev->netdev_ops = &lan966x_port_netdev_ops; + dev->ethtool_ops = &lan966x_ethtool_ops; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_TC; + dev->hw_features |= NETIF_F_HW_TC; + dev->needed_headroom = IFH_LEN * sizeof(u32); + + eth_hw_addr_gen(dev, lan966x->base_mac, p + 1); + + lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID, + ENTRYTYPE_LOCKED); + + port->phylink_config.dev = &port->dev->dev; + port->phylink_config.type = PHYLINK_NETDEV; + port->phylink_pcs.poll = true; + port->phylink_pcs.ops = &lan966x_phylink_pcs_ops; + + port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + + phy_interface_set_rgmii(port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QUSGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phylink_config.supported_interfaces); + + phylink = phylink_create(&port->phylink_config, + portnp, + phy_mode, + &lan966x_phylink_mac_ops); + if (IS_ERR(phylink)) { + port->dev = NULL; + return PTR_ERR(phylink); + } + + port->phylink = phylink; + + err = register_netdev(dev); + if (err) { + dev_err(lan966x->dev, "register_netdev failed\n"); + return err; + } + + lan966x_vlan_port_set_vlan_aware(port, 0); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); + + return 0; +} + +static void lan966x_init(struct lan966x *lan966x) +{ + u32 p, i; + + /* MAC table initialization */ + lan966x_mac_init(lan966x); + + lan966x_vlan_init(lan966x); + + /* Flush queues */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) | + GENMASK(1, 0), + lan966x, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) & + ~(GENMASK(1, 0)), + lan966x, QS_XTR_FLUSH); + + /* Set MAC age time to default value, the entry is aged after + * 2 * AGE_PERIOD + */ + lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ), + lan966x, ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1), + ANA_ADVLEARN_VLAN_CHK, + lan966x, ANA_ADVLEARN); + + /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */ + lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) | + (20000000 / 65), + lan966x, SYS_FRM_AGING); + + /* Map the 8 CPU extraction queues to CPU port */ + lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP); + + /* Do byte-swap and expect status after last data word + * Extraction: Mode: manual extraction) | Byte_swap + */ + lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_XTR_GRP_CFG(0)); + + /* Injection: Mode: manual injection | Byte_swap */ + lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_INJ_GRP_CFG(0)); + + lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0), + QS_INJ_CTRL_GAP_SIZE, + lan966x, QS_INJ_CTRL(0)); + + /* Enable IFH insertion/parsing on CPU ports */ + lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) | + SYS_PORT_MODE_INCL_XTR_HDR_SET(1), + lan966x, SYS_PORT_MODE(CPU_PORT)); + + /* Setup flooding PGIDs */ + lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC), + lan966x, ANA_FLOODING_IPMC); + + /* There are 8 priorities */ + for (i = 0; i < 8; ++i) + lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) | + ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) | + ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC), + ANA_FLOODING_FLD_MULTICAST | + ANA_FLOODING_FLD_UNICAST | + ANA_FLOODING_FLD_BROADCAST, + lan966x, ANA_FLOODING(i)); + + for (i = 0; i < PGID_ENTRIES; ++i) + /* Set all the entries to obey VLAN_VLAN */ + lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1), + ANA_PGID_CFG_OBEY_VLAN, + lan966x, ANA_PGID_CFG(i)); + + for (p = 0; p < lan966x->num_phys_ports; p++) { + /* Disable bridging by default */ + lan_rmw(ANA_PGID_PGID_SET(0x0), + ANA_PGID_PGID, + lan966x, ANA_PGID(p + PGID_SRC)); + + /* Do not forward BPDU frames to the front ports and copy them + * to CPU + */ + lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p)); + } + + /* Set source buffer size for each priority and each port to 1500 bytes */ + for (i = 0; i <= QSYS_Q_RSRV; ++i) { + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i)); + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i)); + } + + /* Enable switching to/from cpu port */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Configure and enable the CPU port */ + lan_rmw(ANA_PGID_PGID_SET(0), + ANA_PGID_PGID, + lan966x, ANA_PGID(CPU_PORT)); + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_CPU)); + + /* Multicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MC)); + + /* This will be controlled by mrouter ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MCIPV4)); + + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MCIPV6)); + + /* Unicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_UC)); + + /* Broadcast to the CPU port and to other ports */ + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_BC)); + + lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1), + lan966x, REW_PORT_CFG(CPU_PORT)); + + lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1), + ANA_ANAINTR_INTR_ENA, + lan966x, ANA_ANAINTR); + + spin_lock_init(&lan966x->tx_lock); + + lan966x_taprio_init(lan966x); +} + +static int lan966x_ram_init(struct lan966x *lan966x) +{ + return lan_rd(lan966x, SYS_RAM_INIT); +} + +static int lan966x_reset_switch(struct lan966x *lan966x) +{ + struct reset_control *switch_reset; + int val = 0; + int ret; + + switch_reset = devm_reset_control_get_optional_shared(lan966x->dev, + "switch"); + if (IS_ERR(switch_reset)) + return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset), + "Could not obtain switch reset"); + + reset_control_reset(switch_reset); + + /* Don't reinitialize the switch core, if it is already initialized. In + * case it is initialized twice, some pointers inside the queue system + * in HW will get corrupted and then after a while the queue system gets + * full and no traffic is passing through the switch. The issue is seen + * when loading and unloading the driver and sending traffic through the + * switch. + */ + if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA) + return 0; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); + lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); + ret = readx_poll_timeout(lan966x_ram_init, lan966x, + val, (val & BIT(1)) == 0, READL_SLEEP_US, + READL_TIMEOUT_US); + if (ret) + return ret; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG); + + return 0; +} + +static int lan966x_probe(struct platform_device *pdev) +{ + struct fwnode_handle *ports, *portnp; + struct lan966x *lan966x; + u8 mac_addr[ETH_ALEN]; + int err; + + lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL); + if (!lan966x) + return -ENOMEM; + + platform_set_drvdata(pdev, lan966x); + lan966x->dev = &pdev->dev; + + if (!device_get_mac_address(&pdev->dev, mac_addr)) { + ether_addr_copy(lan966x->base_mac, mac_addr); + } else { + pr_info("MAC addr was not set, use random MAC\n"); + eth_random_addr(lan966x->base_mac); + lan966x->base_mac[5] &= 0xf0; + } + + err = lan966x_create_targets(pdev, lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Failed to create targets"); + + err = lan966x_reset_switch(lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Reset failed"); + + lan966x->num_phys_ports = NUM_PHYS_PORTS; + lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports, + sizeof(struct lan966x_port *), + GFP_KERNEL); + if (!lan966x->ports) + return -ENOMEM; + + /* There QS system has 32KB of memory */ + lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY; + + /* set irq */ + lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr"); + if (lan966x->xtr_irq <= 0) + return -EINVAL; + + err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL, + lan966x_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", lan966x); + if (err) { + pr_err("Unable to use xtr irq"); + return -ENODEV; + } + + lan966x->ana_irq = platform_get_irq_byname(pdev, "ana"); + if (lan966x->ana_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL, + lan966x_ana_irq_handler, IRQF_ONESHOT, + "ana irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use ana irq"); + } + + lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp"); + if (lan966x->ptp_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL, + lan966x_ptp_irq_handler, IRQF_ONESHOT, + "ptp irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq"); + + lan966x->ptp = 1; + } + + lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma"); + if (lan966x->fdma_irq > 0) { + err = devm_request_irq(&pdev->dev, lan966x->fdma_irq, + lan966x_fdma_irq_handler, 0, + "fdma irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq"); + + lan966x->fdma = true; + } + + if (lan966x->ptp) { + lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext"); + if (lan966x->ptp_ext_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, + lan966x->ptp_ext_irq, NULL, + lan966x_ptp_ext_irq_handler, + IRQF_ONESHOT, + "ptp-ext irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Unable to use ptp-ext irq"); + } + } + + ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); + if (!ports) + return dev_err_probe(&pdev->dev, -ENODEV, + "no ethernet-ports child found\n"); + + /* init switch */ + lan966x_init(lan966x); + lan966x_stats_init(lan966x); + + /* go over the child nodes */ + fwnode_for_each_available_child_node(ports, portnp) { + phy_interface_t phy_mode; + struct phy *serdes; + u32 p; + + if (fwnode_property_read_u32(portnp, "reg", &p)) + continue; + + phy_mode = fwnode_get_phy_mode(portnp); + err = lan966x_probe_port(lan966x, p, phy_mode, portnp); + if (err) + goto cleanup_ports; + + /* Read needed configuration */ + lan966x->ports[p]->config.portmode = phy_mode; + lan966x->ports[p]->fwnode = fwnode_handle_get(portnp); + + serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL); + if (PTR_ERR(serdes) == -ENODEV) + serdes = NULL; + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + goto cleanup_ports; + } + lan966x->ports[p]->serdes = serdes; + + lan966x_port_init(lan966x->ports[p]); + } + + fwnode_handle_put(ports); + + lan966x_mdb_init(lan966x); + err = lan966x_fdb_init(lan966x); + if (err) + goto cleanup_ports; + + err = lan966x_ptp_init(lan966x); + if (err) + goto cleanup_fdb; + + err = lan966x_fdma_init(lan966x); + if (err) + goto cleanup_ptp; + + return 0; + +cleanup_ptp: + lan966x_ptp_deinit(lan966x); + +cleanup_fdb: + lan966x_fdb_deinit(lan966x); + +cleanup_ports: + fwnode_handle_put(ports); + fwnode_handle_put(portnp); + + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + return err; +} + +static int lan966x_remove(struct platform_device *pdev) +{ + struct lan966x *lan966x = platform_get_drvdata(pdev); + + lan966x_taprio_deinit(lan966x); + lan966x_fdma_deinit(lan966x); + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + lan966x_mac_purge_entries(lan966x); + lan966x_mdb_deinit(lan966x); + lan966x_fdb_deinit(lan966x); + lan966x_ptp_deinit(lan966x); + + return 0; +} + +static struct platform_driver lan966x_driver = { + .probe = lan966x_probe, + .remove = lan966x_remove, + .driver = { + .name = "lan966x-switch", + .of_match_table = lan966x_match, + }, +}; + +static int __init lan966x_switch_driver_init(void) +{ + int ret; + + lan966x_register_notifier_blocks(); + + ret = platform_driver_register(&lan966x_driver); + if (ret) + goto err; + + return 0; + +err: + lan966x_unregister_notifier_blocks(); + return ret; +} + +static void __exit lan966x_switch_driver_exit(void) +{ + platform_driver_unregister(&lan966x_driver); + lan966x_unregister_notifier_blocks(); +} + +module_init(lan966x_switch_driver_init); +module_exit(lan966x_switch_driver_exit); + +MODULE_DESCRIPTION("Microchip LAN966X switch driver"); +MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h new file mode 100644 index 000000000..4ec33999e --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_MAIN_H__ +#define __LAN966X_MAIN_H__ + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/jiffies.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/ptp_clock_kernel.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/switchdev.h> + +#include "lan966x_regs.h" +#include "lan966x_ifh.h" + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + +#define READL_SLEEP_US 10 +#define READL_TIMEOUT_US 100000000 + +#define LAN966X_BUFFER_CELL_SZ 64 +#define LAN966X_BUFFER_MEMORY (160 * 1024) +#define LAN966X_BUFFER_MIN_SZ 60 + +#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN) + +#define PGID_AGGR 64 +#define PGID_SRC 80 +#define PGID_ENTRIES 89 + +#define UNAWARE_PVID 0 +#define HOST_PVID 4095 + +/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */ +#define QSYS_Q_RSRV 95 + +#define NUM_PHYS_PORTS 8 +#define CPU_PORT 8 +#define NUM_PRIO_QUEUES 8 + +/* Reserved PGIDs */ +#define PGID_CPU (PGID_AGGR - 6) +#define PGID_UC (PGID_AGGR - 5) +#define PGID_BC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +/* Non-reserved PGIDs, used for general purpose */ +#define PGID_GP_START (CPU_PORT + 1) +#define PGID_GP_END PGID_CPU + +#define LAN966X_SPEED_NONE 0 +#define LAN966X_SPEED_2500 1 +#define LAN966X_SPEED_1000 1 +#define LAN966X_SPEED_100 2 +#define LAN966X_SPEED_10 3 + +#define LAN966X_PHC_COUNT 3 +#define LAN966X_PHC_PORT 0 +#define LAN966X_PHC_PINS_NUM 7 + +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_ONE_STEP_PTP 0x3 +#define IFH_REW_OP_TWO_STEP_PTP 0x4 + +#define FDMA_RX_DCB_MAX_DBS 1 +#define FDMA_TX_DCB_MAX_DBS 1 +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 +#define FDMA_DCB_MAX 512 + +#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */ +#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */ + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACV4, + ENTRYTYPE_MACV6, +}; + +struct lan966x_port; + +struct lan966x_db { + u64 dataptr; + u64 status; +}; + +struct lan966x_rx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct lan966x_tx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; +}; + +struct lan966x_rx { + struct lan966x *lan966x; + + /* Pointer to the array of hardware dcbs. */ + struct lan966x_rx_dcb *dcbs; + + /* Pointer to the last address in the dcbs. */ + struct lan966x_rx_dcb *last_entry; + + /* For each DB, there is a page */ + struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + + /* Represents the db_index, it can have a value between 0 and + * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS + * it means that the DCB can be reused. + */ + int db_index; + + /* Represents the index in the dcbs. It has a value between 0 and + * FDMA_DCB_MAX + */ + int dcb_index; + + /* Represents the dma address to the dcbs array */ + dma_addr_t dma; + + /* Represents the page order that is used to allocate the pages for the + * RX buffers. This value is calculated based on max MTU of the devices. + */ + u8 page_order; + + u8 channel_id; +}; + +struct lan966x_tx_dcb_buf { + struct net_device *dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool used; + bool ptp; +}; + +struct lan966x_tx { + struct lan966x *lan966x; + + /* Pointer to the dcb list */ + struct lan966x_tx_dcb *dcbs; + u16 last_in_use; + + /* Represents the DMA address to the first entry of the dcb entries. */ + dma_addr_t dma; + + /* Array of dcbs that are given to the HW */ + struct lan966x_tx_dcb_buf *dcbs_buf; + + u8 channel_id; + + bool activated; +}; + +struct lan966x_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +struct lan966x_phc { + struct ptp_clock *clock; + struct ptp_clock_info info; + struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM]; + struct hwtstamp_config hwtstamp_config; + struct lan966x *lan966x; + u8 index; +}; + +struct lan966x_skb_cb { + u8 rew_op; + u16 ts_id; + unsigned long jiffies; +}; + +#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10) +#define LAN966X_SKB_CB(skb) \ + ((struct lan966x_skb_cb *)((skb)->cb)) + +struct lan966x { + struct device *dev; + + u8 num_phys_ports; + struct lan966x_port **ports; + + void __iomem *regs[NUM_TARGETS]; + + int shared_queue_sz; + + u8 base_mac[ETH_ALEN]; + + spinlock_t tx_lock; /* lock for frame transmition */ + + struct net_device *bridge; + u16 bridge_mask; + u16 bridge_fwd_mask; + + struct list_head mac_entries; + spinlock_t mac_lock; /* lock for mac_entries list */ + + u16 vlan_mask[VLAN_N_VID]; + DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID); + + /* stats */ + const struct lan966x_stat_layout *stats_layout; + u32 num_stats; + + /* workqueue for reading stats */ + struct mutex stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; + + /* interrupts */ + int xtr_irq; + int ana_irq; + int ptp_irq; + int fdma_irq; + int ptp_ext_irq; + + /* worqueue for fdb */ + struct workqueue_struct *fdb_work; + struct list_head fdb_entries; + + /* mdb */ + struct list_head mdb_entries; + struct list_head pgid_entries; + + /* ptp */ + bool ptp; + struct lan966x_phc phc[LAN966X_PHC_COUNT]; + spinlock_t ptp_clock_lock; /* lock for phc */ + spinlock_t ptp_ts_id_lock; /* lock for ts_id */ + struct mutex ptp_lock; /* lock for ptp interface state */ + u16 ptp_skbs; + + /* fdma */ + bool fdma; + struct net_device *fdma_ndev; + struct lan966x_rx rx; + struct lan966x_tx tx; + struct napi_struct napi; + + /* Mirror */ + struct lan966x_port *mirror_monitor; + u32 mirror_mask[2]; + u32 mirror_count; +}; + +struct lan966x_port_config { + phy_interface_t portmode; + const unsigned long *advertising; + int speed; + int duplex; + u32 pause; + bool inband; + bool autoneg; +}; + +struct lan966x_port_tc { + bool ingress_shared_block; + unsigned long police_id; + unsigned long ingress_mirror_id; + unsigned long egress_mirror_id; + struct flow_stats police_stat; + struct flow_stats mirror_stat; +}; + +struct lan966x_port { + struct net_device *dev; + struct lan966x *lan966x; + + u8 chip_port; + u16 pvid; + u16 vid; + bool vlan_aware; + + bool learn_ena; + bool mcast_ena; + + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + struct lan966x_port_config config; + struct phylink *phylink; + struct phy *serdes; + struct fwnode_handle *fwnode; + + u8 ptp_cmd; + u16 ts_id; + struct sk_buff_head tx_skbs; + + struct net_device *bond; + bool lag_tx_active; + enum netdev_lag_hash hash_type; + + struct lan966x_port_tc tc; +}; + +extern const struct phylink_mac_ops lan966x_phylink_mac_ops; +extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops; +extern const struct ethtool_ops lan966x_ethtool_ops; +extern struct notifier_block lan966x_switchdev_nb __read_mostly; +extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly; + +bool lan966x_netdevice_check(const struct net_device *dev); + +void lan966x_register_notifier_blocks(void); +void lan966x_unregister_notifier_blocks(void); + +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb); + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port); +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp); + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats); +int lan966x_stats_init(struct lan966x *lan966x); + +void lan966x_port_config_down(struct lan966x_port *port); +void lan966x_port_config_up(struct lan966x_port *port); +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state); +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config); +void lan966x_port_init(struct lan966x_port *port); + +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid); +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid); +void lan966x_mac_init(struct lan966x *lan966x); +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing); +int lan966x_mac_del_entry(struct lan966x *lan966x, + const unsigned char *addr, + u16 vid); +int lan966x_mac_add_entry(struct lan966x *lan966x, + struct lan966x_port *port, + const unsigned char *addr, + u16 vid); +void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x, + struct lan966x_port *src, + struct lan966x_port *dst); +void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x, + struct lan966x_port *src); +void lan966x_mac_purge_entries(struct lan966x *lan966x); +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x); + +void lan966x_vlan_init(struct lan966x *lan966x); +void lan966x_vlan_port_apply(struct lan966x_port *port); +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware); +int lan966x_vlan_port_set_vid(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid); +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid); + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid); +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid); +int lan966x_fdb_init(struct lan966x *lan966x); +void lan966x_fdb_deinit(struct lan966x *lan966x); +void lan966x_fdb_flush_workqueue(struct lan966x *lan966x); +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info); + +void lan966x_mdb_init(struct lan966x *lan966x); +void lan966x_mdb_deinit(struct lan966x *lan966x); +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj); +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj); +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid); +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid); +void lan966x_mdb_clear_entries(struct lan966x *lan966x); +void lan966x_mdb_restore_entries(struct lan966x *lan966x); + +int lan966x_ptp_init(struct lan966x *lan966x); +void lan966x_ptp_deinit(struct lan966x *lan966x); +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr); +int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr); +void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb, + u64 timestamp); +int lan966x_ptp_txtstamp_request(struct lan966x_port *port, + struct sk_buff *skb); +void lan966x_ptp_txtstamp_release(struct lan966x_port *port, + struct sk_buff *skb); +irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args); +u32 lan966x_ptp_get_period_ps(void); +int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); +int lan966x_fdma_change_mtu(struct lan966x *lan966x); +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); +int lan966x_fdma_init(struct lan966x *lan966x); +void lan966x_fdma_deinit(struct lan966x *lan966x); +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args); + +int lan966x_lag_port_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bond, + struct netlink_ext_ack *extack); +void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond); +int lan966x_lag_port_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_lag_port_changelowerstate(struct net_device *dev, + struct netdev_notifier_changelowerstate_info *info); +int lan966x_lag_netdev_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_lag_netdev_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info); +bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev); +u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond); + +int lan966x_port_changeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info); +int lan966x_port_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info); +void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state); +void lan966x_port_ageing_set(struct lan966x_port *port, + unsigned long ageing_clock_t); +void lan966x_update_fwd_mask(struct lan966x *lan966x); + +int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, + void *type_data); + +int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc); +int lan966x_mqprio_del(struct lan966x_port *port); + +void lan966x_taprio_init(struct lan966x *lan966x); +void lan966x_taprio_deinit(struct lan966x *lan966x); +int lan966x_taprio_add(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt); +int lan966x_taprio_del(struct lan966x_port *port); +int lan966x_taprio_speed_set(struct lan966x_port *port, int speed); + +int lan966x_tbf_add(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt); +int lan966x_tbf_del(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt); + +int lan966x_cbs_add(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt); +int lan966x_cbs_del(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt); + +int lan966x_ets_add(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt); +int lan966x_ets_del(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt); + +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress); + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack); +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats); + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack); +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack); +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress); + +static inline void __iomem *lan_addr(void __iomem *base[], + int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base[id + (tinst)] + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_wr(u32 val, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, lan_addr(lan966x->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +#endif /* __LAN966X_MAIN_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c new file mode 100644 index 000000000..2af55268b --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_pgid_entry { + struct list_head list; + int index; + refcount_t refcount; + u16 ports; +}; + +struct lan966x_mdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN]; + u16 vid; + u16 ports; + struct lan966x_pgid_entry *pgid; + u8 cpu_copy; +}; + +void lan966x_mdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->mdb_entries); + INIT_LIST_HEAD(&lan966x->pgid_entries); +} + +static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry, *tmp; + + list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + } +} + +static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x) +{ + struct lan966x_pgid_entry *pgid_entry, *tmp; + + list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) { + list_del(&pgid_entry->list); + kfree(pgid_entry); + } +} + +void lan966x_mdb_deinit(struct lan966x *lan966x) +{ + lan966x_mdb_purge_mdb_entries(lan966x); + lan966x_mdb_purge_pgid_entries(lan966x); +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_get(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (ether_addr_equal(mdb_entry->mac, mac) && + mdb_entry->vid == vid) + return mdb_entry; + } + + return NULL; +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_add(struct lan966x *lan966x, + const struct switchdev_obj_port_mdb *mdb) +{ + struct lan966x_mdb_entry *mdb_entry; + + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + return ERR_PTR(-ENOMEM); + + ether_addr_copy(mdb_entry->mac, mdb->addr); + mdb_entry->vid = mdb->vid; + + list_add_tail(&mdb_entry->list, &lan966x->mdb_entries); + + return mdb_entry; +} + +static void lan966x_mdb_encode_mac(unsigned char *mac, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + ether_addr_copy(mac, mdb_entry->mac); + + if (type == ENTRYTYPE_MACV4) { + mac[0] = 0; + mac[1] = mdb_entry->ports >> 8; + mac[2] = mdb_entry->ports & 0xff; + } else if (type == ENTRYTYPE_MACV6) { + mac[0] = mdb_entry->ports >> 8; + mac[1] = mdb_entry->ports & 0xff; + } +} + +static int lan966x_mdb_ip_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + bool cpu_copy = false; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) + mdb_entry->cpu_copy++; + else + mdb_entry->ports |= BIT(port->chip_port); + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + cpu_copy = true; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, cpu_copy, + mac, mdb_entry->vid, type); +} + +static int lan966x_mdb_ip_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports && !mdb_entry->cpu_copy) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy, + mac, mdb_entry->vid, type); +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports) +{ + struct lan966x_pgid_entry *pgid_entry; + + pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL); + if (!pgid_entry) + return ERR_PTR(-ENOMEM); + + pgid_entry->ports = ports; + pgid_entry->index = index; + refcount_set(&pgid_entry->refcount, 1); + + list_add_tail(&pgid_entry->list, &lan966x->pgid_entries); + + return pgid_entry; +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_get(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry) +{ + struct lan966x_pgid_entry *pgid_entry; + int index; + + /* Try to find an existing pgid that uses the same ports as the + * mdb_entry + */ + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->ports == mdb_entry->ports) { + refcount_inc(&pgid_entry->refcount); + return pgid_entry; + } + } + + /* Try to find an empty pgid entry and allocate one in case it finds it, + * otherwise it means that there are no more resources + */ + for (index = PGID_GP_START; index < PGID_GP_END; index++) { + bool used = false; + + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->index == index) { + used = true; + break; + } + } + + if (!used) + return lan966x_pgid_entry_add(lan966x, index, + mdb_entry->ports); + } + + return ERR_PTR(-ENOSPC); +} + +static void lan966x_pgid_entry_del(struct lan966x *lan966x, + struct lan966x_pgid_entry *pgid_entry) +{ + if (!refcount_dec_and_test(&pgid_entry->refcount)) + return; + + list_del(&pgid_entry->list); + kfree(pgid_entry); +} + +static int lan966x_mdb_l2_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) { + mdb_entry->ports |= BIT(CPU_PORT); + mdb_entry->cpu_copy++; + } else { + mdb_entry->ports |= BIT(port->chip_port); + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + mdb_entry->ports &= BIT(CPU_PORT); + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static int lan966x_mdb_l2_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + + ports &= ~BIT(CPU_PORT); + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static enum macaccess_entry_type +lan966x_mdb_classify(const unsigned char *mac) +{ + if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e) + return ENTRYTYPE_MACV4; + if (mac[0] == 0x33 && mac[1] == 0x33) + return ENTRYTYPE_MACV6; + return ENTRYTYPE_LOCKED; +} + +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are added for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_add(port, mdb, type); + + return lan966x_mdb_l2_add(port, mdb, type); +} + +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are removed for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_del(port, mdb, type); + + return lan966x_mdb_l2_del(port, mdb, type); +} + +static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports |= BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type); + } +} + +static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports &= ~BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type); + } +} + +void lan966x_mdb_clear_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + unsigned char mac[ETH_ALEN]; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + type = lan966x_mdb_classify(mdb_entry->mac); + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + /* Remove just the MAC entry, still keep the PGID in case of L2 + * entries because this can be restored at later point + */ + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } +} + +void lan966x_mdb_restore_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + unsigned char mac[ETH_ALEN]; + bool cpu_copy = false; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + type = lan966x_mdb_classify(mdb_entry->mac); + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) { + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + mdb_entry->vid) && + mdb_entry->cpu_copy) + cpu_copy = true; + + lan966x_mac_ip_learn(lan966x, cpu_copy, mac, + mdb_entry->vid, type); + } else { + lan966x_mac_learn(lan966x, mdb_entry->pgid->index, + mdb_entry->mac, + mdb_entry->vid, type); + } + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c new file mode 100644 index 000000000..7e1ba3f40 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_mirror_port_add(struct lan966x_port *port, + struct flow_action_entry *action, + unsigned long mirror_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_port *monitor_port; + + if (!lan966x_netdevice_check(action->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an lan966x port"); + return -EOPNOTSUPP; + } + + monitor_port = netdev_priv(action->dev); + + if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) { + NL_SET_ERR_MSG_MOD(extack, + "Mirror already exists"); + return -EEXIST; + } + + if (lan966x->mirror_monitor && + lan966x->mirror_monitor != monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change mirror port while in use"); + return -EBUSY; + } + + if (port == monitor_port) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot mirror the monitor port"); + return -EINVAL; + } + + lan966x->mirror_mask[ingress] |= BIT(port->chip_port); + + lan966x->mirror_monitor = monitor_port; + lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count++; + + if (ingress) + port->tc.ingress_mirror_id = mirror_id; + else + port->tc.egress_mirror_id = mirror_id; + + return 0; +} + +int lan966x_mirror_port_del(struct lan966x_port *port, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) { + NL_SET_ERR_MSG_MOD(extack, + "There is no mirroring for this port"); + return -ENOENT; + } + + lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port); + + if (ingress) { + lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0), + ANA_PORT_CFG_SRC_MIRROR_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + } else { + lan_wr(lan966x->mirror_mask[0], lan966x, + ANA_EMIRRORPORTS); + } + + lan966x->mirror_count--; + + if (lan966x->mirror_count == 0) { + lan966x->mirror_monitor = NULL; + lan_wr(0, lan966x, ANA_MIRRORPORTS); + } + + if (ingress) + port->tc.ingress_mirror_id = 0; + else + port->tc.egress_mirror_id = 0; + + return 0; +} + +void lan966x_mirror_port_stats(struct lan966x_port *port, + struct flow_stats *stats, + bool ingress) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.mirror_stat; + lan966x_stats_get(port->dev, &new_stats); + + if (ingress) { + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + } else { + flow_stats_update(stats, + new_stats.tx_bytes - old_stats->bytes, + new_stats.tx_packets - old_stats->pkts, + new_stats.tx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.tx_bytes; + old_stats->pkts = new_stats.tx_packets; + old_stats->drops = new_stats.tx_dropped; + old_stats->lastused = jiffies; + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c new file mode 100644 index 000000000..7fa76e74f --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc) +{ + u8 i; + + if (num_tc != NUM_PRIO_QUEUES) { + netdev_err(port->dev, "Only %d traffic classes supported\n", + NUM_PRIO_QUEUES); + return -EINVAL; + } + + netdev_set_num_tc(port->dev, num_tc); + + for (i = 0; i < num_tc; ++i) + netdev_set_tc_queue(port->dev, i, 1, i); + + return 0; +} + +int lan966x_mqprio_del(struct lan966x_port *port) +{ + netdev_reset_tc(port->dev); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c new file mode 100644 index 000000000..e4ac59480 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/phylink.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/phy/phy.h> +#include <linux/sfp.h> + +#include "lan966x_main.h" + +static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config, + phy_interface_t interface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + + return &port->phylink_pcs; +} + +static void lan966x_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static int lan966x_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t iface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + phy_interface_t serdes_mode = iface; + int err; + + if (port->serdes) { + err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, + serdes_mode); + if (err) { + netdev_err(to_net_dev(config->dev), + "Could not set mode of SerDes\n"); + return err; + } + } + + return 0; +} + +static void lan966x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x_port_config *port_config = &port->config; + + port_config->duplex = duplex; + port_config->speed = speed; + port_config->pause = 0; + port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0; + port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0; + + if (phy_interface_mode_is_rgmii(interface)) + phy_set_speed(port->serdes, speed); + + lan966x_port_config_up(port); +} + +static void lan966x_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x *lan966x = port->lan966x; + + lan966x_port_config_down(port); + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} + +static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct lan966x_port, phylink_pcs); +} + +static void lan966x_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + + lan966x_port_status_get(port, state); +} + +static int lan966x_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + struct lan966x_port_config config; + int ret; + + config = port->config; + config.portmode = interface; + config.inband = phylink_autoneg_inband(mode); + config.autoneg = phylink_test(advertising, Autoneg); + config.advertising = advertising; + + ret = lan966x_port_pcs_set(port, &config); + if (ret) + netdev_err(port->dev, "port PCS config failed: %d\n", ret); + + return ret; +} + +static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs) +{ + /* Currently not used */ +} + +const struct phylink_mac_ops lan966x_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = lan966x_phylink_mac_select, + .mac_config = lan966x_phylink_mac_config, + .mac_prepare = lan966x_phylink_mac_prepare, + .mac_link_down = lan966x_phylink_mac_link_down, + .mac_link_up = lan966x_phylink_mac_link_up, +}; + +const struct phylink_pcs_ops lan966x_phylink_pcs_ops = { + .pcs_get_state = lan966x_pcs_get_state, + .pcs_config = lan966x_pcs_config, + .pcs_an_restart = lan966x_pcs_aneg_restart, +}; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c new file mode 100644 index 000000000..7d66fe75c --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +/* 0-8 : 9 port policers */ +#define POL_IDX_PORT 0 + +/* Policer order: Serial (QoS -> Port -> VCAP) */ +#define POL_ORDER 0x1d3 + +struct lan966x_tc_policer { + /* kilobit per second */ + u32 rate; + /* bytes */ + u32 burst; +}; + +static int lan966x_police_add(struct lan966x_port *port, + struct lan966x_tc_policer *pol, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + /* Rate unit is 33 1/3 kpps */ + pol->rate = DIV_ROUND_UP(pol->rate * 3, 100); + /* Avoid zero burst size */ + pol->burst = pol->burst ?: 1; + /* Unit is 4kB */ + pol->burst = DIV_ROUND_UP(pol->burst, 4096); + + if (pol->rate > GENMASK(15, 0) || + pol->burst > GENMASK(6, 0)) + return -EINVAL; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(1) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) | + ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_del(struct lan966x_port *port, + u16 pol_idx) +{ + struct lan966x *lan966x = port->lan966x; + + lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) | + ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) | + ANA_POL_MODE_IPG_SIZE_SET(20) | + ANA_POL_MODE_FRM_MODE_SET(2) | + ANA_POL_MODE_OVERSHOOT_ENA_SET(1), + lan966x, ANA_POL_MODE(pol_idx)); + + lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0), + lan966x, ANA_POL_PIR_STATE(pol_idx)); + + lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) | + ANA_POL_PIR_CFG_PIR_BURST_SET(0), + lan966x, ANA_POL_PIR_CFG(pol_idx)); + + return 0; +} + +static int lan966x_police_validate(struct lan966x_port *port, + const struct flow_action *action, + const struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on egress"); + return -EOPNOTSUPP; + } + + if (port->tc.ingress_shared_block) { + NL_SET_ERR_MSG_MOD(extack, + "Policer is not supported on shared ingress blocks"); + return -EOPNOTSUPP; + } + + if (port->tc.police_id && port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Only one policer per port is supported"); + return -EEXIST; + } + + return 0; +} + +int lan966x_police_port_add(struct lan966x_port *port, + struct flow_action *action, + struct flow_action_entry *act, + unsigned long police_id, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + struct rtnl_link_stats64 new_stats; + struct lan966x_tc_policer pol; + struct flow_stats *old_stats; + int err; + + err = lan966x_police_validate(port, action, act, police_id, ingress, + extack); + if (err) + return err; + + memset(&pol, 0, sizeof(pol)); + + pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8; + pol.burst = act->police.burst; + + err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = police_id; + + /* Setup initial stats */ + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + + return 0; +} + +int lan966x_police_port_del(struct lan966x_port *port, + unsigned long police_id, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = port->lan966x; + int err; + + if (port->tc.police_id != police_id) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid policer id"); + return -EINVAL; + } + + err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to add policer to port"); + return err; + } + + lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) | + ANA_POL_CFG_POL_ORDER_SET(POL_ORDER), + ANA_POL_CFG_PORT_POL_ENA | + ANA_POL_CFG_POL_ORDER, + lan966x, ANA_POL_CFG(port->chip_port)); + + port->tc.police_id = 0; + + return 0; +} + +void lan966x_police_port_stats(struct lan966x_port *port, + struct flow_stats *stats) +{ + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &port->tc.police_stat; + lan966x_stats_get(port->dev, &new_stats); + + flow_stats_update(stats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c new file mode 100644 index 000000000..0050fcb98 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> +#include <linux/phy/phy.h> + +#include "lan966x_main.h" + +/* Watermark encode */ +#define MULTIPLIER_BIT BIT(8) +static u32 lan966x_wm_enc(u32 value) +{ + value /= LAN966X_BUFFER_CELL_SZ; + + if (value >= MULTIPLIER_BIT) { + value /= 16; + if (value >= MULTIPLIER_BIT) + value = (MULTIPLIER_BIT - 1); + + value |= MULTIPLIER_BIT; + } + + return value; +} + +static void lan966x_port_link_down(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val, delay = 0; + + /* 0.5: Disable any AFI */ + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(0), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); + + /* wait for reg afi_port_frm_out to become 0 for the port */ + while (true) { + val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port)); + if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("AFI timeout chip port %u", port->chip_port); + break; + } + } + + delay = 0; + + /* 1: Reset the PCS Rx clock domain */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1), + DEV_CLOCK_CFG_PCS_RX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 2: Disable MAC frame reception */ + lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0), + DEV_MAC_ENA_CFG_RX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* 3: Disable traffic being sent to or from switch port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 4: Disable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 5: Disable Flowcontrol */ + lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0), + SYS_PAUSE_CFG_PAUSE_ENA, + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* 5.1: Disable PFC */ + lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0), + QSYS_SW_PORT_MODE_TX_PFC_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */ + usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC); + + /* 7: Disable HDX backpressure */ + lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0), + SYS_FRONT_PORT_MODE_HDX_MODE, + lan966x, SYS_FRONT_PORT_MODE(port->chip_port)); + + /* 8: Flush the queues accociated with the port */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 9: Enable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 10: Wait until flushing is complete */ + while (true) { + val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port)); + if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("Flush timeout chip port %u", port->chip_port); + break; + } + } + + /* 11: Reset the Port and MAC clock domains */ + lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0), + DEV_MAC_ENA_CFG_TX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) | + DEV_CLOCK_CFG_MAC_RX_RST_SET(1) | + DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST | + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 12: Clear flushing */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* The port is disabled and flushed, now set up the port in the + * new operating mode + */ +} + +static void lan966x_port_link_up(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + int speed = 0, mode = 0; + int atop_wm = 0; + + switch (config->speed) { + case SPEED_10: + speed = LAN966X_SPEED_10; + break; + case SPEED_100: + speed = LAN966X_SPEED_100; + break; + case SPEED_1000: + speed = LAN966X_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + case SPEED_2500: + speed = LAN966X_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + } + + lan966x_taprio_speed_set(port, config->speed); + + /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the + * port speed for QSGMII ports. + */ + if (phy_interface_num_ports(config->portmode) == 4) + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + + lan_wr(config->duplex | mode, + lan966x, DEV_MAC_MODE_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) | + DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) | + DEV_MAC_IFG_CFG_RX_IFG2_SET(2), + DEV_MAC_IFG_CFG_TX_IFG | + DEV_MAC_IFG_CFG_RX_IFG1 | + DEV_MAC_IFG_CFG_RX_IFG2, + lan966x, DEV_MAC_IFG_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) | + DEV_MAC_HDX_CFG_SEED_LOAD_SET(1), + DEV_MAC_HDX_CFG_SEED | + DEV_MAC_HDX_CFG_SEED_LOAD, + lan966x, DEV_MAC_HDX_CFG(port->chip_port)); + + if (config->portmode == PHY_INTERFACE_MODE_GMII) { + if (config->speed == SPEED_1000) + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + else + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + } + + /* No PFC */ + lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed), + lan966x, ANA_PFC_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1), + DEV_PCS1G_CFG_PCS_ENA, + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0), + DEV_PCS1G_SD_CFG_SD_ENA, + lan966x, DEV_PCS1G_SD_CFG(port->chip_port)); + + /* Set Pause WM hysteresis, start/stop are in 1518 byte units */ + lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) | + SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) | + SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)), + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port)); + lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port)); + + /* Flow control */ + lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) | + SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0), + SYS_MAC_FC_CFG_FC_LINK_SPEED | + SYS_MAC_FC_CFG_FC_LATENCY_CFG | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG | + SYS_MAC_FC_CFG_RX_FC_ENA | + SYS_MAC_FC_CFG_TX_FC_ENA, + lan966x, SYS_MAC_FC_CFG(port->chip_port)); + + /* Tail dropping watermark */ + atop_wm = lan966x->shared_queue_sz; + + /* The total memory size is diveded by number of front ports plus CPU + * port + */ + lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x, + SYS_ATOP(port->chip_port)); + lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG); + + /* This needs to be at the end */ + /* Enable MAC module */ + lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV_MAC_ENA_CFG_TX_ENA_SET(1), + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* Take out the clock from reset */ + lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed), + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* Core: Enable port for frame transfer */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(16), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); +} + +void lan966x_port_config_down(struct lan966x_port *port) +{ + lan966x_port_link_down(port); +} + +void lan966x_port_config_up(struct lan966x_port *port) +{ + lan966x_port_link_up(port); +} + +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state) +{ + struct lan966x *lan966x = port->lan966x; + bool link_down; + u16 bmsr = 0; + u16 lp_adv; + u32 val; + + val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port)); + link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val); + if (link_down) + lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port)); + + /* Get both current Link and Sync status */ + val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port)); + state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) && + DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val); + state->link &= !link_down; + + /* Get PCS ANEG status register */ + val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port)); + /* Aneg complete provides more information */ + if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) { + state->an_complete = true; + + bmsr |= state->link ? BMSR_LSTATUS : 0; + bmsr |= BMSR_ANEGCOMPLETE; + + lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val); + phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv); + } else { + if (!state->link) + return; + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->speed = SPEED_1000; + else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + + state->duplex = DUPLEX_FULL; + } +} + +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config) +{ + struct lan966x *lan966x = port->lan966x; + bool inband_aneg = false; + bool outband; + bool full_preamble = false; + + if (config->portmode == PHY_INTERFACE_MODE_QUSGMII) + full_preamble = true; + + if (config->inband) { + if (config->portmode == PHY_INTERFACE_MODE_SGMII || + phy_interface_num_ports(config->portmode) == 4) + inband_aneg = true; /* Cisco-SGMII in-band-aneg */ + else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX && + config->autoneg) + inband_aneg = true; /* Clause-37 in-band-aneg */ + + outband = false; + } else { + outband = true; + } + + /* Disable or enable inband. + * For QUSGMII, we rely on the preamble to transmit data such as + * timestamps, therefore force full preamble transmission, and prevent + * premable shortening + */ + lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) | + DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble), + DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA | + DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, + lan966x, DEV_PCS1G_MODE_CFG(port->chip_port)); + + /* Enable PCS */ + lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1), + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + if (inband_aneg) { + int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode, + config->advertising); + if (adv >= 0) + /* Enable in-band aneg */ + lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) | + DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1), + lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } else { + lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) | + DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_LINK_SPEED | + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + port->config = *config; + + return 0; +} + +void lan966x_port_init(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_port_config_down(port); + + if (lan966x->fdma) + lan966x_fdma_netdev_init(lan966x, port->dev); + + if (phy_interface_num_ports(config->portmode) != 4) + return; + + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0) | + DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST | + DEV_CLOCK_CFG_LINK_SPEED, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c new file mode 100644 index 000000000..0a0e233f3 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/ptp_classify.h> + +#include "lan966x_main.h" + +#define LAN966X_MAX_PTP_ID 512 + +/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference + * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849) + */ +#define LAN966X_1PPM_FORMAT 3480517749723LL + +/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference + * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849) + */ +#define LAN966X_1PPB_FORMAT 3480517749LL + +#define TOD_ACC_PIN 0x7 + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_TOD +}; + +static u64 lan966x_ptp_get_nominal_value(void) +{ + /* This is the default value that for each system clock, the time of day + * is increased. It has the format 5.59 nanosecond. + */ + return 0x304d4873ecade305; +} + +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) +{ + struct lan966x *lan966x = port->lan966x; + struct hwtstamp_config cfg; + struct lan966x_phc *phc; + + /* For now don't allow to run ptp on ports that are part of a bridge, + * because in case of transparent clock the HW will still forward the + * frames, so there would be duplicate frames + */ + if (lan966x->bridge_mask & BIT(port->chip_port)) + return -EINVAL; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = IFH_REW_OP_NOOP; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* Commit back the result & save it */ + mutex_lock(&lan966x->ptp_lock); + phc = &lan966x->phc[LAN966X_PHC_PORT]; + memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&lan966x->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_phc *phc; + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, + sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; +} + +static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) +{ + struct ptp_header *header; + u8 msgtype; + int type; + + if (port->ptp_cmd == IFH_REW_OP_NOOP) + return IFH_REW_OP_NOOP; + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return IFH_REW_OP_NOOP; + + header = ptp_parse_header(skb, type); + if (!header) + return IFH_REW_OP_NOOP; + + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + return IFH_REW_OP_TWO_STEP_PTP; + + /* If it is sync and run 1 step then set the correct operation, + * otherwise run as 2 step + */ + msgtype = ptp_get_msgtype(header, type); + if ((msgtype & 0xf) == 0) + return IFH_REW_OP_ONE_STEP_PTP; + + return IFH_REW_OP_TWO_STEP_PTP; +} + +static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port) +{ + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT, + jiffies) + break; + + __skb_unlink(skb, &port->tx_skbs); + dev_kfree_skb_any(skb); + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); +} + +int lan966x_ptp_txtstamp_request(struct lan966x_port *port, + struct sk_buff *skb) +{ + struct lan966x *lan966x = port->lan966x; + unsigned long flags; + u8 rew_op; + + rew_op = lan966x_ptp_classify(port, skb); + LAN966X_SKB_CB(skb)->rew_op = rew_op; + + if (rew_op != IFH_REW_OP_TWO_STEP_PTP) + return 0; + + lan966x_ptp_txtstamp_old_release(port); + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) { + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_queue_tail(&port->tx_skbs, skb); + LAN966X_SKB_CB(skb)->ts_id = port->ts_id; + LAN966X_SKB_CB(skb)->jiffies = jiffies; + + lan966x->ptp_skbs++; + port->ts_id++; + if (port->ts_id == LAN966X_MAX_PTP_ID) + port->ts_id = 0; + + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + + return 0; +} + +void lan966x_ptp_txtstamp_release(struct lan966x_port *port, + struct sk_buff *skb) +{ + struct lan966x *lan966x = port->lan966x; + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + port->ts_id--; + lan966x->ptp_skbs--; + skb_unlink(skb, &port->tx_skbs); + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); +} + +static void lan966x_get_hwtimestamp(struct lan966x *lan966x, + struct timespec64 *ts, + u32 nsec) +{ + /* Read current PTP time to get seconds */ + unsigned long flags; + u32 curr_nsec; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + ts->tv_nsec = nsec; + + /* Sec has incremented since the ts was registered */ + if (curr_nsec < nsec) + ts->tv_sec--; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); +} + +irqreturn_t lan966x_ptp_irq_handler(int irq, void *args) +{ + int budget = LAN966X_MAX_PTP_ID; + struct lan966x *lan966x = args; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + struct lan966x_port *port; + struct timespec64 ts; + unsigned long flags; + u32 val, id, txport; + u32 delay; + + val = lan_rd(lan966x, PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retrieved */ + if (!(val & PTP_TWOSTEP_CTRL_VLD)) + break; + + WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL); + + if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX)) + continue; + + /* Retrieve the ts Tx port */ + txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); + + /* Retrieve its associated skb */ + port = lan966x->ports[txport]; + + /* Retrieve the delay */ + delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP); + delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); + + /* Get next timestamp from fifo, which needs to be the + * rx timestamp which represents the id of the frame + */ + lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1), + PTP_TWOSTEP_CTRL_NXT, + lan966x, PTP_TWOSTEP_CTRL); + + val = lan_rd(lan966x, PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retried */ + if (!(val & PTP_TWOSTEP_CTRL_VLD)) + break; + + /* Read RX timestamping to get the ID */ + id = lan_rd(lan966x, PTP_TWOSTEP_STAMP); + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (LAN966X_SKB_CB(skb)->ts_id != id) + continue; + + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + /* Next ts */ + lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1), + PTP_TWOSTEP_CTRL_NXT, + lan966x, PTP_TWOSTEP_CTRL); + + if (WARN_ON(!skb_match)) + continue; + + spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags); + lan966x->ptp_skbs--; + spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags); + + /* Get the h/w timestamp */ + lan966x_get_hwtimestamp(lan966x, &ts, delay); + + /* Set the timestamp into the skb */ + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb_match, &shhwtstamps); + + dev_kfree_skb_any(skb_match); + } + + return IRQ_HANDLED; +} + +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + struct lan966x_phc *phc; + unsigned long flags; + u64 time = 0; + time64_t s; + int pin, i; + s64 ns; + + if (!(lan_rd(lan966x, PTP_PIN_INTR))) + return IRQ_NONE; + + /* Go through all domains and see which pin generated the interrupt */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + struct ptp_clock_event ptp_event = {0}; + + phc = &lan966x->phc[i]; + pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0); + if (pin == -1) + continue; + + if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin))) + continue; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Enable to get the new interrupt. + * By writing 1 it clears the bit + */ + lan_wr(BIT(pin), lan966x, PTP_PIN_INTR); + + /* Get current time */ + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(pin)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + time = ktime_set(s, ns); + + ptp_event.index = pin; + ptp_event.timestamp = time; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(phc->clock, &ptp_event); + } + + return IRQ_HANDLED; +} + +static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + bool neg_adj = 0; + u64 tod_inc; + u64 ref; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + tod_inc = lan966x_ptp_get_nominal_value(); + + /* The multiplication is split in 2 separate additions because of + * overflow issues. If scaled_ppm with 16bit fractional part was bigger + * than 20ppm then we got overflow. + */ + ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16); + ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16; + tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x, + PTP_CLK_PER_CFG(phc->index, 0)); + lan_wr((u32)(tod_inc >> 32), lan966x, + PTP_CLK_PER_CFG(phc->index, 1)); + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + /* Set new value */ + lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), + lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + lan_wr(lower_32_bits(ts->tv_sec), + lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Apply new values */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + time64_t s; + s64 ns; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + /* Deal with negative values */ + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} + +static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + unsigned long flags; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta), + lan966x, PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Adjust time with the value of PTP_TOD_NSEC */ + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + } else { + /* Fall back using lan966x_ptp_settime64 which is not exact */ + struct timespec64 ts; + u64 now; + + lan966x_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + lan966x_ptp_settime64(ptp, &ts); + } + + return 0; +} + +static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct ptp_clock_info *info; + int i; + + /* Currently support only 1 channel */ + if (chan != 0) + return -1; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + case PTP_PF_EXTTS: + break; + default: + return -1; + } + + /* The PTP pins are shared by all the PHC. So it is required to see if + * the pin is connected to another PHC. The pin is connected to another + * PHC if that pin already has a function on that PHC. + */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + info = &lan966x->phc[i].info; + + /* Ignore the check with ourself */ + if (ptp == info) + continue; + + if (info->pin_config[pin].func == PTP_PF_PEROUT || + info->pin_config[pin].func == PTP_PF_EXTTS) + return -1; + } + + return 0; +} + +static int lan966x_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct timespec64 ts_phase, ts_period; + unsigned long flags; + s64 wf_high, wf_low; + bool pps = false; + int pin; + + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + if (!on) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + if (rq->perout.period.sec == 1 && + rq->perout.period.nsec == 0) + pps = true; + + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(lan966x->dev, + "Absolute time not supported!\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + wf_high = 5000; + } + + if (pps) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(3), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + int pin; + u32 val; + + if (lan966x->ptp_ext_irq <= 0) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SELECT_SET(pin), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_SYNC | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SELECT, + lan966x, PTP_PIN_CFG(pin)); + + val = lan_rd(lan966x, PTP_PIN_INTR_ENA); + if (on) + val |= BIT(pin); + else + val &= ~BIT(pin); + lan_wr(val, lan966x, PTP_PIN_INTR_ENA); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return lan966x_ptp_perout(ptp, rq, on); + case PTP_CLK_REQ_EXTTS: + return lan966x_ptp_extts(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static struct ptp_clock_info lan966x_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "lan966x ptp", + .max_adj = 200000, + .gettime64 = lan966x_ptp_gettime64, + .settime64 = lan966x_ptp_settime64, + .adjtime = lan966x_ptp_adjtime, + .adjfine = lan966x_ptp_adjfine, + .verify = lan966x_ptp_verify, + .enable = lan966x_ptp_enable, + .n_per_out = LAN966X_PHC_PINS_NUM, + .n_ext_ts = LAN966X_PHC_PINS_NUM, + .n_pins = LAN966X_PHC_PINS_NUM, +}; + +static int lan966x_ptp_phc_init(struct lan966x *lan966x, + int index, + struct ptp_clock_info *clock_info) +{ + struct lan966x_phc *phc = &lan966x->phc[index]; + struct ptp_pin_desc *p; + int i; + + for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) { + p = &phc->pins[i]; + + snprintf(p->name, sizeof(p->name), "pin%d", i); + p->index = i; + p->func = PTP_PF_NONE; + } + + phc->info = *clock_info; + phc->info.pin_config = &phc->pins[0]; + phc->clock = ptp_clock_register(&phc->info, lan966x->dev); + if (IS_ERR(phc->clock)) + return PTR_ERR(phc->clock); + + phc->index = index; + phc->lan966x = lan966x; + + /* PTP Rx stamping is always enabled. */ + phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +int lan966x_ptp_init(struct lan966x *lan966x) +{ + u64 tod_adj = lan966x_ptp_get_nominal_value(); + struct lan966x_port *port; + int err, i; + + if (!lan966x->ptp) + return 0; + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info); + if (err) + return err; + } + + spin_lock_init(&lan966x->ptp_clock_lock); + spin_lock_init(&lan966x->ptp_ts_id_lock); + mutex_init(&lan966x->ptp_lock); + + /* Disable master counters */ + lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG); + + /* Configure the nominal TOD increment per clock cycle */ + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x, + PTP_CLK_PER_CFG(i, 0)); + lan_wr((u32)(tod_adj >> 32), lan966x, + PTP_CLK_PER_CFG(i, 1)); + } + + lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0), + PTP_DOM_CFG_CLKCFG_DIS, + lan966x, PTP_DOM_CFG); + + /* Enable master counters */ + lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + port = lan966x->ports[i]; + if (!port) + continue; + + skb_queue_head_init(&port->tx_skbs); + } + + return 0; +} + +void lan966x_ptp_deinit(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + if (!lan966x->ptp) + return; + + for (i = 0; i < lan966x->num_phys_ports; i++) { + port = lan966x->ports[i]; + if (!port) + continue; + + skb_queue_purge(&port->tx_skbs); + } + + for (i = 0; i < LAN966X_PHC_COUNT; ++i) + ptp_clock_unregister(lan966x->phc[i].clock); +} + +void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct lan966x_phc *phc; + struct timespec64 ts; + u64 full_ts_in_ns; + + if (!lan966x->ptp) + return; + + phc = &lan966x->phc[LAN966X_PHC_PORT]; + lan966x_ptp_gettime64(&phc->info, &ts); + + /* Drop the sub-ns precision */ + timestamp = timestamp >> 2; + if (ts.tv_nsec < timestamp) + ts.tv_sec--; + ts.tv_nsec = timestamp; + full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = full_ts_in_ns; +} + +u32 lan966x_ptp_get_period_ps(void) +{ + /* This represents the system clock period in picoseconds */ + return 15125; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h new file mode 100644 index 000000000..fb5087fef --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -0,0 +1,1509 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ + +/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200. + * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty) + */ + +#ifndef _LAN966X_REGS_H_ +#define _LAN966X_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum lan966x_target { + TARGET_AFI = 2, + TARGET_ANA = 3, + TARGET_CHIP_TOP = 5, + TARGET_CPU = 6, + TARGET_DEV = 13, + TARGET_FDMA = 21, + TARGET_GCB = 27, + TARGET_ORG = 36, + TARGET_PTP = 41, + TARGET_QS = 42, + TARGET_QSYS = 46, + TARGET_REW = 47, + TARGET_SYS = 52, + NUM_TARGETS = 66 +}; + +#define __REG(...) __VA_ARGS__ + +/* AFI:PORT_TBL:PORT_FRM_OUT */ +#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4) + +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\ + FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\ + FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) + +/* AFI:PORT_TBL:PORT_CFG */ +#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4) + +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) + +#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0) +#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x) +#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x) + +/* ANA:ANA:ADVLEARN */ +#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4) + +#define ANA_ADVLEARN_VLAN_CHK BIT(0) +#define ANA_ADVLEARN_VLAN_CHK_SET(x)\ + FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x) +#define ANA_ADVLEARN_VLAN_CHK_GET(x)\ + FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x) + +/* ANA:ANA:VLANMASK */ +#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4) + +/* ANA:ANA:ANAINTR */ +#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4) + +#define ANA_ANAINTR_INTR BIT(1) +#define ANA_ANAINTR_INTR_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR, x) +#define ANA_ANAINTR_INTR_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR, x) + +#define ANA_ANAINTR_INTR_ENA BIT(0) +#define ANA_ANAINTR_INTR_ENA_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR_ENA, x) +#define ANA_ANAINTR_INTR_ENA_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR_ENA, x) + +/* ANA:ANA:AUTOAGE */ +#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4) + +#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x) +#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x) + +/* ANA:ANA:MIRRORPORTS */ +#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4) + +#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0) +#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x) +#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\ + FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x) + +/* ANA:ANA:EMIRRORPORTS */ +#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4) + +#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\ + FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x) +#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\ + FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x) + +/* ANA:ANA:FLOODING */ +#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4) + +#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x) +#define ANA_FLOODING_FLD_UNICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_UNICAST, x) + +#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x) +#define ANA_FLOODING_FLD_BROADCAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x) + +#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0) +#define ANA_FLOODING_FLD_MULTICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x) +#define ANA_FLOODING_FLD_MULTICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x) + +/* ANA:ANA:FLOODING_IPMC */ +#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) + +/* ANA:PGID:PGID */ +#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4) + +#define ANA_PGID_PGID GENMASK(8, 0) +#define ANA_PGID_PGID_SET(x)\ + FIELD_PREP(ANA_PGID_PGID, x) +#define ANA_PGID_PGID_GET(x)\ + FIELD_GET(ANA_PGID_PGID, x) + +/* ANA:PGID:PGID_CFG */ +#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4) + +#define ANA_PGID_CFG_OBEY_VLAN BIT(0) +#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x) +#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\ + FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x) + +/* ANA:ANA_TABLES:MACHDATA */ +#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4) + +/* ANA:ANA_TABLES:MACLDATA */ +#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4) + +/* ANA:ANA_TABLES:MACACCESS */ +#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4) + +#define ANA_MACACCESS_CHANGE2SW BIT(17) +#define ANA_MACACCESS_CHANGE2SW_SET(x)\ + FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x) +#define ANA_MACACCESS_CHANGE2SW_GET(x)\ + FIELD_GET(ANA_MACACCESS_CHANGE2SW, x) + +#define ANA_MACACCESS_MAC_CPU_COPY BIT(16) +#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x) +#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x) + +#define ANA_MACACCESS_VALID BIT(12) +#define ANA_MACACCESS_VALID_SET(x)\ + FIELD_PREP(ANA_MACACCESS_VALID, x) +#define ANA_MACACCESS_VALID_GET(x)\ + FIELD_GET(ANA_MACACCESS_VALID, x) + +#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10) +#define ANA_MACACCESS_ENTRYTYPE_SET(x)\ + FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x) +#define ANA_MACACCESS_ENTRYTYPE_GET(x)\ + FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x) + +#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4) +#define ANA_MACACCESS_DEST_IDX_SET(x)\ + FIELD_PREP(ANA_MACACCESS_DEST_IDX, x) +#define ANA_MACACCESS_DEST_IDX_GET(x)\ + FIELD_GET(ANA_MACACCESS_DEST_IDX, x) + +#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0) +#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x) +#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x) + +/* ANA:ANA_TABLES:MACTINDX */ +#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4) + +#define ANA_MACTINDX_BUCKET GENMASK(12, 11) +#define ANA_MACTINDX_BUCKET_SET(x)\ + FIELD_PREP(ANA_MACTINDX_BUCKET, x) +#define ANA_MACTINDX_BUCKET_GET(x)\ + FIELD_GET(ANA_MACTINDX_BUCKET, x) + +#define ANA_MACTINDX_M_INDEX GENMASK(10, 0) +#define ANA_MACTINDX_M_INDEX_SET(x)\ + FIELD_PREP(ANA_MACTINDX_M_INDEX, x) +#define ANA_MACTINDX_M_INDEX_GET(x)\ + FIELD_GET(ANA_MACTINDX_M_INDEX, x) + +/* ANA:ANA_TABLES:VLAN_PORT_MASK */ +#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4) + +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\ + FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\ + FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) + +/* ANA:ANA_TABLES:VLANACCESS */ +#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4) + +#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0) +#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\ + FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x) +#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\ + FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x) + +/* ANA:ANA_TABLES:VLANTIDX */ +#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4) + +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) + +#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0) +#define ANA_VLANTIDX_V_INDEX_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_V_INDEX, x) +#define ANA_VLANTIDX_V_INDEX_GET(x)\ + FIELD_GET(ANA_VLANTIDX_V_INDEX, x) + +/* ANA:PORT:VLAN_CFG */ +#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4) + +#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) + +#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18) +#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x) +#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x) + +#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0) +#define ANA_VLAN_CFG_VLAN_VID_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x) +#define ANA_VLAN_CFG_VLAN_VID_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x) + +/* ANA:PORT:DROP_CFG */ +#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4) + +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) + +/* ANA:PORT:CPU_FWD_CFG */ +#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4) + +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) + +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) + +/* ANA:PORT:CPU_FWD_BPDU_CFG */ +#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4) + +/* ANA:PORT:PORT_CFG */ +#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4) + +#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x) + +#define ANA_PORT_CFG_LEARNAUTO BIT(6) +#define ANA_PORT_CFG_LEARNAUTO_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x) +#define ANA_PORT_CFG_LEARNAUTO_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x) + +#define ANA_PORT_CFG_LEARN_ENA BIT(5) +#define ANA_PORT_CFG_LEARN_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x) +#define ANA_PORT_CFG_LEARN_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x) + +#define ANA_PORT_CFG_RECV_ENA BIT(4) +#define ANA_PORT_CFG_RECV_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x) +#define ANA_PORT_CFG_RECV_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_RECV_ENA, x) + +#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0) +#define ANA_PORT_CFG_PORTID_VAL_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x) +#define ANA_PORT_CFG_PORTID_VAL_GET(x)\ + FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) + +/* ANA:PORT:POL_CFG */ +#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4) + +#define ANA_POL_CFG_PORT_POL_ENA BIT(17) +#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\ + FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x) +#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\ + FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x) + +#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0) +#define ANA_POL_CFG_POL_ORDER_SET(x)\ + FIELD_PREP(ANA_POL_CFG_POL_ORDER, x) +#define ANA_POL_CFG_POL_ORDER_GET(x)\ + FIELD_GET(ANA_POL_CFG_POL_ORDER, x) + +/* ANA:PFC:PFC_CFG */ +#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4) + +#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0) +#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x) +#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x) + +/* ANA:COMMON:AGGR_CFG */ +#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4) + +#define ANA_AGGR_CFG_AC_RND_ENA BIT(6) +#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x) +#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x) + +#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5) +#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x) +#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4) +#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x) +#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) + +/* ANA:POL:POL_PIR_CFG */ +#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4) + +#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6) +#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x) +#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x) + +#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0) +#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\ + FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x) +#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\ + FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x) + +/* ANA:POL:POL_MODE_CFG */ +#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4) + +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) +#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x) + +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) +#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x) + +#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5) +#define ANA_POL_MODE_IPG_SIZE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x) +#define ANA_POL_MODE_IPG_SIZE_GET(x)\ + FIELD_GET(ANA_POL_MODE_IPG_SIZE, x) + +#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3) +#define ANA_POL_MODE_FRM_MODE_SET(x)\ + FIELD_PREP(ANA_POL_MODE_FRM_MODE, x) +#define ANA_POL_MODE_FRM_MODE_GET(x)\ + FIELD_GET(ANA_POL_MODE_FRM_MODE, x) + +#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0) +#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\ + FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x) +#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\ + FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x) + +/* ANA:POL:POL_PIR_STATE */ +#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4) + +#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0) +#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\ + FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x) +#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\ + FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x) + +/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */ +#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4) + +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) + +/* DEV:PORT_MODE:CLOCK_CFG */ +#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4) + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x) +#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x) + +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x) +#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x) + +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x) +#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x) + +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x) +#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x) + +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PORT_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x) +#define DEV_CLOCK_CFG_PORT_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x) + +#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0) +#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x) +#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x) + +/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4) + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x) +#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x) + +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x) +#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4) + +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4) + +#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4) + +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) + +#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x) +#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x) + +#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x) +#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x) +#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4) + +#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x) +#define DEV_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED, x) + +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */ +#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */ +#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4) + +#define DEV_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x) +#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4) + +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) + +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1) +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) +#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4) + +#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x) +#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4) + +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) + +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) + +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) + +#define DEV_PCS1G_ANEG_CFG_ENA BIT(0) +#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x) +#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4) + +#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) + +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4) + +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) + +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4) + +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ + FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ + FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) + +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_CH_DB_DISCARD */ +#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4) + +#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\ + FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\ + FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_ACTIVE */ +#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(3) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + +/* PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0) +#define PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PIN_INTR_INTR_PTP, x) + +/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0) +#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x) +#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\ + FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x) + +/* PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4) + +#define PTP_DOM_CFG_ENA GENMASK(11, 9) +#define PTP_DOM_CFG_ENA_SET(x)\ + FIELD_PREP(PTP_DOM_CFG_ENA, x) +#define PTP_DOM_CFG_ENA_GET(x)\ + FIELD_GET(PTP_DOM_CFG_ENA, x) + +#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0) +#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\ + FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x) +#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\ + FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x) + +/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4) + +/* PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4) + +#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27) +#define PTP_PIN_CFG_PIN_ACTION_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x) +#define PTP_PIN_CFG_PIN_ACTION_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x) + +#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25) +#define PTP_PIN_CFG_PIN_SYNC_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x) +#define PTP_PIN_CFG_PIN_SYNC_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x) + +#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21) +#define PTP_PIN_CFG_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x) +#define PTP_PIN_CFG_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x) + +#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16) +#define PTP_PIN_CFG_PIN_DOM_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x) +#define PTP_PIN_CFG_PIN_DOM_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_DOM, x) + +/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4) + +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0) +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x) +#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\ + FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x) + +/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4) + +/* PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4) + +#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0) +#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\ + FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x) +#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\ + FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x) + +/* PTP:PTP_PINS:WF_HIGH_PERIOD */ +#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 24, 0, 1, 4) + +#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_PINS:WF_LOW_PERIOD */ +#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 28, 0, 1, 4) + +#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0) +#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */ +#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4) + +#define PTP_TWOSTEP_CTRL_NXT BIT(11) +#define PTP_TWOSTEP_CTRL_NXT_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x) +#define PTP_TWOSTEP_CTRL_NXT_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x) + +#define PTP_TWOSTEP_CTRL_VLD BIT(10) +#define PTP_TWOSTEP_CTRL_VLD_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x) +#define PTP_TWOSTEP_CTRL_VLD_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x) + +#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9) +#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x) +#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x) + +#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1) +#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x) +#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x) + +#define PTP_TWOSTEP_CTRL_OVFL BIT(0) +#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x) +#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\ + FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x) + +/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */ +#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4) + +#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2) +#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ + FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x) +#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ + FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x) + +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) + +#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_MODE, x) +#define QS_XTR_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_MODE, x) + +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x) +#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) + +#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x) +#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\ + FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x) + +#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x) +#define QS_INJ_STATUS_FIFO_RDY_GET(x)\ + FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x) + +/* QSYS:SYSTEM:PORT_MODE */ +#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4) + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x) +#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x) + +/* QSYS:SYSTEM:SWITCH_PORT_MODE */ +#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4) + +#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18) +#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x) +#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x) + +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) + +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) + +#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0) +#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x) +#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x) + +/* QSYS:SYSTEM:SW_STATUS */ +#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4) + +#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0) +#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\ + FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x) +#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\ + FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x) + +/* QSYS:SYSTEM:CPU_GROUP_MAP */ +#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4) + +/* QSYS:RES_CTRL:RES_CFG */ +#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4) + +/* QSYS:HSCH:CIR_CFG */ +#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4) + +#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6) +#define QSYS_CIR_CFG_CIR_RATE_SET(x)\ + FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x) +#define QSYS_CIR_CFG_CIR_RATE_GET(x)\ + FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x) + +#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0) +#define QSYS_CIR_CFG_CIR_BURST_SET(x)\ + FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x) +#define QSYS_CIR_CFG_CIR_BURST_GET(x)\ + FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x) + +/* QSYS:HSCH:SE_CFG */ +#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4) + +#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6) +#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x) +#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x) + +#define QSYS_SE_CFG_SE_RR_ENA BIT(5) +#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x) +#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x) + +#define QSYS_SE_CFG_SE_AVB_ENA BIT(4) +#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x) +#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x) + +#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2) +#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\ + FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x) +#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\ + FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x) + +#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4) + +#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0) +#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\ + FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x) +#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\ + FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x) + +/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */ +#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4) + +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x) + +#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x) +#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x) + +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17) +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x) +#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x) + +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5) +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x) +#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x) + +/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */ +#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4) + +#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0) +#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\ + FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x) +#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\ + FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x) + +/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */ +#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4) + +#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0) +#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\ + FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x) +#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\ + FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x) + +/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */ +#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4) + +#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19) +#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\ + FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x) +#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\ + FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x) + +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16) +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x) +#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */ +#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4) + +#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0) +#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\ + FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x) +#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\ + FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */ +#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4) + +/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */ +#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4) + +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0) +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\ + FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x) +#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\ + FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x) + +/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */ +#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4) + +/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */ +#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4) + +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23) +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\ + FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x) +#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\ + FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x) + +/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */ +#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4) + +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0) +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\ + FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x) +#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\ + FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x) + +/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */ +#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4) + +#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0) +#define QSYS_TAS_LST_LIST_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x) +#define QSYS_TAS_LST_LIST_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_LST_LIST_STATE, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */ +#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4) + +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10) +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x) +#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x) + +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2) +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x) +#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x) + +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0) +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x) +#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */ +#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4) + +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12) +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x) +#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x) + +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0) +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\ + FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x) +#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\ + FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x) + +/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */ +#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4) + +/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */ +#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4) + +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0) +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\ + FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x) +#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\ + FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x) +#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CFG */ +#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4) + +#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_CFG, x) +#define REW_TAG_CFG_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_CFG, x) + +#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x) +#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x) + +/* REW:PORT:PORT_CFG */ +#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4) + +#define REW_PORT_CFG_NO_REWRITE BIT(0) +#define REW_PORT_CFG_NO_REWRITE_SET(x)\ + FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x) +#define REW_PORT_CFG_NO_REWRITE_GET(x)\ + FIELD_GET(REW_PORT_CFG_NO_REWRITE, x) + +/* SYS:SYSTEM:RESET_CFG */ +#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4) + +#define SYS_RESET_CFG_CORE_ENA BIT(0) +#define SYS_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x) +#define SYS_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(SYS_RESET_CFG_CORE_ENA, x) + +/* SYS:SYSTEM:PORT_MODE */ +#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4) + +#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4) +#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x) +#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x) + +#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2) +#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x) +#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x) + +/* SYS:SYSTEM:FRONT_PORT_MODE */ +#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4) + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1) +#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x) +#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x) + +/* SYS:SYSTEM:FRM_AGING */ +#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\ + FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x) +#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\ + FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x) + +/* SYS:SYSTEM:STAT_CFG */ +#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4) + +#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0) +#define SYS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x) +#define SYS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x) + +/* SYS:PAUSE_CFG:PAUSE_CFG */ +#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4) + +#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x) +#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x) + +#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x) +#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x) + +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) +#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x) +#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x) + +/* SYS:PAUSE_CFG:ATOP */ +#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4) + +/* SYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4) + +/* SYS:PAUSE_CFG:MAC_FC_CFG */ +#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4) + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) + +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) + +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) + +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x) +#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x) +#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) + +/* SYS:STAT:CNT */ +#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4) + +/* SYS:RAM_CTRL:RAM_INIT */ +#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x) +#define SYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_INIT, x) + +#endif /* _LAN966X_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c new file mode 100644 index 000000000..1c88120eb --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -0,0 +1,664 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/if_bridge.h> +#include <net/switchdev.h> + +#include "lan966x_main.h" + +static struct notifier_block lan966x_netdevice_nb __read_mostly; + +static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port, + u32 pgid_ip) +{ + struct lan966x *lan966x = port->lan966x; + u32 flood_mask_ip; + + flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip)); + flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip); + + /* If mcast snooping is not enabled then use mcast flood mask + * to decide to enable multicast flooding or not. + */ + if (!port->mcast_ena) { + u32 flood_mask; + + flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC)); + flood_mask = ANA_PGID_PGID_GET(flood_mask); + + if (flood_mask & BIT(port->chip_port)) + flood_mask_ip |= BIT(port->chip_port); + else + flood_mask_ip &= ~BIT(port->chip_port); + } else { + flood_mask_ip &= ~BIT(port->chip_port); + } + + lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_ip)); +} + +static void lan966x_port_set_mcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_MC)); + + if (!port->mcast_ena) { + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); + } +} + +static void lan966x_port_set_ucast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_UC)); +} + +static void lan966x_port_set_bcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_BC)); +} + +static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled) +{ + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled), + ANA_PORT_CFG_LEARN_ENA, + port->lan966x, ANA_PORT_CFG(port->chip_port)); + + port->learn_ena = enabled; +} + +static void lan966x_port_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_MCAST_FLOOD) + lan966x_port_set_mcast_flood(port, + !!(flags.val & BR_MCAST_FLOOD)); + + if (flags.mask & BR_FLOOD) + lan966x_port_set_ucast_flood(port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_BCAST_FLOOD) + lan966x_port_set_bcast_flood(port, + !!(flags.val & BR_BCAST_FLOOD)); + + if (flags.mask & BR_LEARNING) + lan966x_port_set_learning(port, + !!(flags.val & BR_LEARNING)); +} + +static int lan966x_port_pre_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD | + BR_LEARNING)) + return -EINVAL; + + return 0; +} + +void lan966x_update_fwd_mask(struct lan966x *lan966x) +{ + int i; + + for (i = 0; i < lan966x->num_phys_ports; i++) { + struct lan966x_port *port = lan966x->ports[i]; + unsigned long mask = 0; + + if (port && lan966x->bridge_fwd_mask & BIT(i)) { + mask = lan966x->bridge_fwd_mask & ~BIT(i); + + if (port->bond) + mask &= ~lan966x_lag_get_mask(lan966x, + port->bond); + } + + mask |= BIT(CPU_PORT); + + lan_wr(ANA_PGID_PGID_SET(mask), + lan966x, ANA_PGID(PGID_SRC + i)); + } +} + +void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state) +{ + struct lan966x *lan966x = port->lan966x; + bool learn_ena = false; + + if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) && + port->learn_ena) + learn_ena = true; + + if (state == BR_STATE_FORWARDING) + lan966x->bridge_fwd_mask |= BIT(port->chip_port); + else + lan966x->bridge_fwd_mask &= ~BIT(port->chip_port); + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_update_fwd_mask(lan966x); +} + +void lan966x_port_ageing_set(struct lan966x_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + lan966x_mac_set_ageing(port->lan966x, ageing_time); +} + +static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena) +{ + struct lan966x *lan966x = port->lan966x; + + port->mcast_ena = mcast_ena; + if (mcast_ena) + lan966x_mdb_restore_entries(lan966x); + else + lan966x_mdb_clear_entries(lan966x); + + lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) | + ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena), + ANA_CPU_FWD_CFG_IGMP_REDIR_ENA | + ANA_CPU_FWD_CFG_MLD_REDIR_ENA | + ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, + lan966x, ANA_CPU_FWD_CFG(port->chip_port)); + + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); + lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); +} + +static int lan966x_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err = 0; + + if (ctx && ctx != port) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + lan966x_port_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + lan966x_port_stp_state_set(port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + lan966x_port_ageing_set(port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering); + lan966x_vlan_port_apply(port); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + lan966x_port_mc_set(port, !attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_port_bridge_join(struct lan966x_port *port, + struct net_device *brport_dev, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + struct net_device *dev = port->dev; + int err; + + if (!lan966x->bridge_mask) { + lan966x->bridge = bridge; + } else { + if (lan966x->bridge != bridge) { + NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge"); + return -ENODEV; + } + } + + err = switchdev_bridge_port_offload(brport_dev, dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb, + false, extack); + if (err) + return err; + + lan966x->bridge_mask |= BIT(port->chip_port); + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + lan966x_port_bridge_flags(port, flags); + + return 0; +} + +static void lan966x_port_bridge_leave(struct lan966x_port *port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask & ~BR_LEARNING; + lan966x_port_bridge_flags(port, flags); + + lan966x->bridge_mask &= ~BIT(port->chip_port); + + if (!lan966x->bridge_mask) + lan966x->bridge = NULL; + + /* Set the port back to host mode */ + lan966x_vlan_port_set_vlan_aware(port, false); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); +} + +int lan966x_port_changeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = lan966x_port_bridge_join(port, brport_dev, + info->upper_dev, + extack); + else + lan966x_port_bridge_leave(port, info->upper_dev); + } + + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = lan966x_lag_port_join(port, info->upper_dev, + info->upper_dev, + extack); + else + lan966x_lag_port_leave(port, info->upper_dev); + } + + return err; +} + +int lan966x_port_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + int err = NOTIFY_DONE; + + if (netif_is_bridge_master(info->upper_dev) && !info->linking) { + switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL); + lan966x_fdb_flush_workqueue(port->lan966x); + } + + if (netif_is_lag_master(info->upper_dev)) { + err = lan966x_lag_port_prechangeupper(dev, info); + if (err || info->linking) + return err; + + switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL); + lan966x_fdb_flush_workqueue(port->lan966x); + } + + return err; +} + +static int lan966x_foreign_bridging_check(struct net_device *upper, + bool *has_foreign, + bool *seen_lan966x, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = NULL; + struct net_device *dev; + struct list_head *iter; + + if (!netif_is_bridge_master(upper) && + !netif_is_lag_master(upper)) + return 0; + + netdev_for_each_lower_dev(upper, dev, iter) { + if (lan966x_netdevice_check(dev)) { + struct lan966x_port *port = netdev_priv(dev); + + if (lan966x) { + /* Upper already has at least one port of a + * lan966x switch inside it, check that it's + * the same instance of the driver. + */ + if (port->lan966x != lan966x) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging between multiple lan966x switches disallowed"); + return -EINVAL; + } + } else { + /* This is the first lan966x port inside this + * upper device + */ + lan966x = port->lan966x; + *seen_lan966x = true; + } + } else if (netif_is_lag_master(dev)) { + /* Allow to have bond interfaces that have only lan966x + * devices + */ + if (lan966x_foreign_bridging_check(dev, has_foreign, + seen_lan966x, + extack)) + return -EINVAL; + } else { + *has_foreign = true; + } + + if (*seen_lan966x && *has_foreign) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging lan966x ports with foreign interfaces disallowed"); + return -EINVAL; + } + } + + return 0; +} + +static int lan966x_bridge_check(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + bool has_foreign = false; + bool seen_lan966x = false; + + return lan966x_foreign_bridging_check(info->upper_dev, + &has_foreign, + &seen_lan966x, + info->info.extack); +} + +static int lan966x_netdevice_port_event(struct net_device *dev, + struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + if (!lan966x_netdevice_check(dev)) { + switch (event) { + case NETDEV_CHANGEUPPER: + case NETDEV_PRECHANGEUPPER: + err = lan966x_bridge_check(dev, ptr); + if (err) + return err; + + if (netif_is_lag_master(dev)) { + if (event == NETDEV_CHANGEUPPER) + err = lan966x_lag_netdev_changeupper(dev, + ptr); + else + err = lan966x_lag_netdev_prechangeupper(dev, + ptr); + + return err; + } + break; + default: + return 0; + } + + return 0; + } + + switch (event) { + case NETDEV_PRECHANGEUPPER: + err = lan966x_port_prechangeupper(dev, dev, ptr); + break; + case NETDEV_CHANGEUPPER: + err = lan966x_bridge_check(dev, ptr); + if (err) + return err; + + err = lan966x_port_changeupper(dev, dev, ptr); + break; + case NETDEV_CHANGELOWERSTATE: + err = lan966x_lag_port_changelowerstate(dev, ptr); + break; + } + + return err; +} + +static int lan966x_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret; + + ret = lan966x_netdevice_port_event(dev, nb, event, ptr); + + return notifier_from_errno(ret); +} + +static bool lan966x_foreign_dev_check(const struct net_device *dev, + const struct net_device *foreign_dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int i; + + if (netif_is_bridge_master(foreign_dev)) + if (lan966x->bridge == foreign_dev) + return false; + + if (netif_is_lag_master(foreign_dev)) + for (i = 0; i < lan966x->num_phys_ports; ++i) + if (lan966x->ports[i] && + lan966x->ports[i]->bond == foreign_dev) + return false; + + return true; +} + +static int lan966x_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + err = switchdev_handle_fdb_event_to_device(dev, event, ptr, + lan966x_netdevice_check, + lan966x_foreign_dev_check, + lan966x_handle_fdb); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static int lan966x_handle_port_vlan_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_add_vlan(port, v->vid, + v->flags & BRIDGE_VLAN_INFO_PVID, + v->flags & BRIDGE_VLAN_INFO_UNTAGGED); + else + lan966x_vlan_cpu_add_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_add(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_add(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_handle_port_vlan_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_del_vlan(port, v->vid); + else + lan966x_vlan_cpu_del_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_del(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_del(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_switchdev_blocking_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static struct notifier_block lan966x_netdevice_nb __read_mostly = { + .notifier_call = lan966x_netdevice_event, +}; + +struct notifier_block lan966x_switchdev_nb __read_mostly = { + .notifier_call = lan966x_switchdev_event, +}; + +struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = { + .notifier_call = lan966x_switchdev_blocking_event, +}; + +void lan966x_register_notifier_blocks(void) +{ + register_netdevice_notifier(&lan966x_netdevice_nb); + register_switchdev_notifier(&lan966x_switchdev_nb); + register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); +} + +void lan966x_unregister_notifier_blocks(void) +{ + unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); + unregister_switchdev_notifier(&lan966x_switchdev_nb); + unregister_netdevice_notifier(&lan966x_netdevice_nb); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c new file mode 100644 index 000000000..3f5b21206 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define LAN966X_TAPRIO_TIMEOUT_MS 1000 +#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2 + +/* Minimum supported cycle time in nanoseconds */ +#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC + +/* Maximum supported cycle time in nanoseconds */ +#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1) + +/* Total number of TAS GCL entries */ +#define LAN966X_TAPRIO_NUM_GCL 256 + +/* TAPRIO link speeds for calculation of guard band */ +enum lan966x_taprio_link_speed { + LAN966X_TAPRIO_SPEED_NO_GB, + LAN966X_TAPRIO_SPEED_10, + LAN966X_TAPRIO_SPEED_100, + LAN966X_TAPRIO_SPEED_1000, + LAN966X_TAPRIO_SPEED_2500, +}; + +/* TAPRIO list states */ +enum lan966x_taprio_state { + LAN966X_TAPRIO_STATE_ADMIN, + LAN966X_TAPRIO_STATE_ADVANCING, + LAN966X_TAPRIO_STATE_PENDING, + LAN966X_TAPRIO_STATE_OPERATING, + LAN966X_TAPRIO_STATE_TERMINATING, + LAN966X_TAPRIO_STATE_MAX, +}; + +/* TAPRIO GCL command */ +enum lan966x_taprio_gcl_cmd { + LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0, +}; + +static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry) +{ + return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry; +} + +static u32 lan966x_taprio_list_state_get(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val; + + val = lan_rd(lan966x, QSYS_TAS_LST); + return QSYS_TAS_LST_LIST_STATE_GET(val); +} + +static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port, + u32 list) +{ + struct lan966x *lan966x = port->lan966x; + + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list), + QSYS_TAS_CFG_CTRL_LIST_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + return lan966x_taprio_list_state_get(port); +} + +static void lan966x_taprio_list_state_set(struct lan966x_port *port, + u32 state) +{ + struct lan966x *lan966x = port->lan966x; + + lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state), + QSYS_TAS_LST_LIST_STATE, + lan966x, QSYS_TAS_LST); +} + +static int lan966x_taprio_list_shutdown(struct lan966x_port *port, + u32 list) +{ + struct lan966x *lan966x = port->lan966x; + bool pending, operating; + unsigned long end; + u32 state; + + end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS); + /* It is required to try multiple times to set the state of list, + * because the HW can overwrite this. + */ + do { + state = lan966x_taprio_list_state_get(port); + + pending = false; + operating = false; + + if (state == LAN966X_TAPRIO_STATE_ADVANCING || + state == LAN966X_TAPRIO_STATE_PENDING) { + lan966x_taprio_list_state_set(port, + LAN966X_TAPRIO_STATE_ADMIN); + pending = true; + } + + if (state == LAN966X_TAPRIO_STATE_OPERATING) { + lan966x_taprio_list_state_set(port, + LAN966X_TAPRIO_STATE_TERMINATING); + operating = true; + } + + /* If the entry was in pending and now gets in admin, then there + * is nothing else to do, so just bail out + */ + state = lan966x_taprio_list_state_get(port); + if (pending && + state == LAN966X_TAPRIO_STATE_ADMIN) + return 0; + + /* If the list was in operating and now is in terminating or + * admin, then is OK to exit but it needs to wait until the list + * will get in admin. It is not required to set the state + * again. + */ + if (operating && + (state == LAN966X_TAPRIO_STATE_TERMINATING || + state == LAN966X_TAPRIO_STATE_ADMIN)) + break; + + } while (!time_after(jiffies, end)); + + end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS); + do { + state = lan966x_taprio_list_state_get(port); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + break; + + } while (!time_after(jiffies, end)); + + /* If the list was in operating mode, it could be stopped while some + * queues where closed, so make sure to restore "all-queues-open" + */ + if (operating) { + lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port), + lan966x, QSYS_TAS_GS_CTRL); + + lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff), + lan966x, QSYS_TAS_GATE_STATE); + } + + return 0; +} + +static int lan966x_taprio_shutdown(struct lan966x_port *port) +{ + u32 i, list, state; + int err; + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + list = lan966x_taprio_list_index(port, i); + state = lan966x_taprio_list_index_state_get(port, list); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + continue; + + err = lan966x_taprio_list_shutdown(port, list); + if (err) + return err; + } + + return 0; +} + +/* Find a suitable list for a new schedule. First priority is a list in state + * pending. Second priority is a list in state admin. + */ +static int lan966x_taprio_find_list(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt, + int *new_list, int *obs_list) +{ + int state[LAN966X_TAPRIO_ENTRIES_PER_PORT]; + int list[LAN966X_TAPRIO_ENTRIES_PER_PORT]; + int err, oper = -1; + u32 i; + + *new_list = -1; + *obs_list = -1; + + /* If there is already an entry in operating mode, return this list in + * obs_list, such that when the new list will get activated the + * operating list will be stopped. In this way is possible to have + * smooth transitions between the lists + */ + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + list[i] = lan966x_taprio_list_index(port, i); + state[i] = lan966x_taprio_list_index_state_get(port, list[i]); + if (state[i] == LAN966X_TAPRIO_STATE_OPERATING) + oper = list[i]; + } + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + if (state[i] == LAN966X_TAPRIO_STATE_PENDING) { + err = lan966x_taprio_shutdown(port); + if (err) + return err; + + *new_list = list[i]; + *obs_list = (oper == -1) ? *new_list : oper; + return 0; + } + } + + for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) { + if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) { + *new_list = list[i]; + *obs_list = (oper == -1) ? *new_list : oper; + return 0; + } + } + + return -ENOSPC; +} + +static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt) +{ + u64 total_time = 0; + u32 i; + + /* This is not supported by th HW */ + if (qopt->cycle_time_extension) + return -EOPNOTSUPP; + + /* There is a limited number of gcl entries that can be used, they are + * shared by all ports + */ + if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL) + return -EINVAL; + + /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */ + if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS || + qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + for (i = 0; i < qopt->num_entries; ++i) { + struct tc_taprio_sched_entry *entry = &qopt->entries[i]; + + /* Don't allow intervals bigger than 1 sec or smaller than 1 + * usec + */ + if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS || + entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES) + return -EINVAL; + + total_time += qopt->entries[i].interval; + } + + /* Don't allow the total time of intervals be bigger than 1 sec */ + if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS) + return -EINVAL; + + /* The HW expects that the cycle time to be at least as big as sum of + * each interval of gcl + */ + if (qopt->cycle_time < total_time) + return -EINVAL; + + return 0; +} + +static int lan966x_taprio_gcl_free_get(struct lan966x_port *port, + unsigned long *free_list) +{ + struct lan966x *lan966x = port->lan966x; + u32 num_free, state, list; + u32 base, next, max_list; + + /* By default everything is free */ + bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL); + num_free = LAN966X_TAPRIO_NUM_GCL; + + /* Iterate over all gcl entries and find out which are free. And mark + * those that are not free. + */ + max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT; + for (list = 0; list < max_list; ++list) { + state = lan966x_taprio_list_index_state_get(port, list); + if (state == LAN966X_TAPRIO_STATE_ADMIN) + continue; + + base = lan_rd(lan966x, QSYS_TAS_LIST_CFG); + base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base); + next = base; + + do { + clear_bit(next, free_list); + num_free--; + + lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next), + QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2); + next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next); + } while (base != next); + } + + return num_free; +} + +static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port, + struct tc_taprio_sched_entry *entry, + u32 next_entry) +{ + struct lan966x *lan966x = port->lan966x; + + /* Setup a single gcl entry */ + lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) | + QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) | + QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES), + lan966x, QSYS_TAS_GCL_CT_CFG); + + lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) | + QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry), + lan966x, QSYS_TAS_GCL_CT_CFG2); + + lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG); +} + +static int lan966x_taprio_gcl_setup(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt, + int list) +{ + DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL); + struct lan966x *lan966x = port->lan966x; + u32 i, base, next; + + if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries) + return -ENOSPC; + + /* Select list */ + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list), + QSYS_TAS_CFG_CTRL_LIST_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + /* Setup the address of the first gcl entry */ + base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL); + lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base), + QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, + lan966x, QSYS_TAS_LIST_CFG); + + /* Iterate over entries and add them to the gcl list */ + next = base; + for (i = 0; i < qopt->num_entries; ++i) { + lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next), + QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, + lan966x, QSYS_TAS_CFG_CTRL); + + /* If the entry is last, point back to the start of the list */ + if (i == qopt->num_entries - 1) + next = base; + else + next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL, + next + 1); + + lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next); + } + + return 0; +} + +/* Calculate new base_time based on cycle_time. The HW recommends to have the + * new base time at least 2 * cycle type + current time + */ +static void lan966x_taprio_new_base_time(struct lan966x *lan966x, + const u32 cycle_time, + const ktime_t org_base_time, + ktime_t *new_base_time) +{ + ktime_t current_time, threshold_time; + struct timespec64 ts; + + /* Get the current time and calculate the threshold_time */ + lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts); + current_time = timespec64_to_ktime(ts); + threshold_time = current_time + (2 * cycle_time); + + /* If the org_base_time is in enough in future just use it */ + if (org_base_time >= threshold_time) { + *new_base_time = org_base_time; + return; + } + + /* If the org_base_time is smaller than current_time, calculate the new + * base time as following. + */ + if (org_base_time <= current_time) { + u64 tmp = current_time - org_base_time; + u32 rem = 0; + + if (tmp > cycle_time) + div_u64_rem(tmp, cycle_time, &rem); + rem = cycle_time - rem; + *new_base_time = threshold_time + rem; + return; + } + + /* The only left place for org_base_time is between current_time and + * threshold_time. In this case the new_base_time is calculated like + * org_base_time + 2 * cycletime + */ + *new_base_time = org_base_time + 2 * cycle_time; +} + +int lan966x_taprio_speed_set(struct lan966x_port *port, int speed) +{ + struct lan966x *lan966x = port->lan966x; + u8 taprio_speed; + + switch (speed) { + case SPEED_10: + taprio_speed = LAN966X_TAPRIO_SPEED_10; + break; + case SPEED_100: + taprio_speed = LAN966X_TAPRIO_SPEED_100; + break; + case SPEED_1000: + taprio_speed = LAN966X_TAPRIO_SPEED_1000; + break; + case SPEED_2500: + taprio_speed = LAN966X_TAPRIO_SPEED_2500; + break; + default: + return -EINVAL; + } + + lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed), + QSYS_TAS_PROFILE_CFG_LINK_SPEED, + lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port)); + + return 0; +} + +int lan966x_taprio_add(struct lan966x_port *port, + struct tc_taprio_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + int err, new_list, obs_list; + struct timespec64 ts; + ktime_t base_time; + + err = lan966x_taprio_check(qopt); + if (err) + return err; + + err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list); + if (err) + return err; + + err = lan966x_taprio_gcl_setup(port, qopt, new_list); + if (err) + return err; + + lan966x_taprio_new_base_time(lan966x, qopt->cycle_time, + qopt->base_time, &base_time); + + ts = ktime_to_timespec64(base_time); + lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec), + lan966x, QSYS_TAS_BT_NSEC); + + lan_wr(lower_32_bits(ts.tv_sec), + lan966x, QSYS_TAS_BT_SEC_LSB); + + lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)), + lan966x, QSYS_TAS_BT_SEC_MSB); + + lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG); + + lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list), + QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, + lan966x, QSYS_TAS_STARTUP_CFG); + + /* Start list processing */ + lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING), + QSYS_TAS_LST_LIST_STATE, + lan966x, QSYS_TAS_LST); + + return err; +} + +int lan966x_taprio_del(struct lan966x_port *port) +{ + return lan966x_taprio_shutdown(port); +} + +void lan966x_taprio_init(struct lan966x *lan966x) +{ + int num_taprio_lists; + int p; + + lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) / + lan966x_ptp_get_period_ps()), + lan966x, QSYS_TAS_STM_CFG); + + num_taprio_lists = lan966x->num_phys_ports * + LAN966X_TAPRIO_ENTRIES_PER_PORT; + + /* For now we always use guard band on all queues */ + lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) | + QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1), + QSYS_TAS_CFG_CTRL_LIST_NUM_MAX | + QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, + lan966x, QSYS_TAS_CFG_CTRL); + + for (p = 0; p < lan966x->num_phys_ports; p++) + lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p), + QSYS_TAS_PROFILE_CFG_PORT_NUM, + lan966x, QSYS_TAS_PROFILE_CFG(p)); +} + +void lan966x_taprio_deinit(struct lan966x *lan966x) +{ + int p; + + for (p = 0; p < lan966x->num_phys_ports; ++p) { + if (!lan966x->ports[p]) + continue; + + lan966x_taprio_del(lan966x->ports[p]); + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c new file mode 100644 index 000000000..4555a35d0 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +int lan966x_tbf_add(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + bool root = qopt->parent == TC_H_ROOT; + u32 queue = 0; + u32 cir, cbs; + u32 se_idx; + + if (!root) { + queue = TC_H_MIN(qopt->parent) - 1; + if (queue >= NUM_PRIO_QUEUES) + return -EOPNOTSUPP; + } + + if (root) + se_idx = SE_IDX_PORT + port->chip_port; + else + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue; + + cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8; + cbs = qopt->replace_params.max_size; + + /* Rate unit is 100 kbps */ + cir = DIV_ROUND_UP(cir, 100); + /* Avoid using zero rate */ + cir = cir ?: 1; + /* Burst unit is 4kB */ + cbs = DIV_ROUND_UP(cbs, 4096); + /* Avoid using zero burst */ + cbs = cbs ?: 1; + + /* Check that actually the result can be written */ + if (cir > GENMASK(15, 0) || + cbs > GENMASK(6, 0)) + return -EINVAL; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) | + QSYS_SE_CFG_SE_FRM_MODE_SET(1), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) | + QSYS_CIR_CFG_CIR_BURST_SET(cbs), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} + +int lan966x_tbf_del(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + struct lan966x *lan966x = port->lan966x; + bool root = qopt->parent == TC_H_ROOT; + u32 queue = 0; + u32 se_idx; + + if (!root) { + queue = TC_H_MIN(qopt->parent) - 1; + if (queue >= NUM_PRIO_QUEUES) + return -EOPNOTSUPP; + } + + if (root) + se_idx = SE_IDX_PORT + port->chip_port; + else + se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue; + + lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) | + QSYS_SE_CFG_SE_FRM_MODE_SET(0), + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE, + lan966x, QSYS_SE_CFG(se_idx)); + + lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) | + QSYS_CIR_CFG_CIR_BURST_SET(0), + lan966x, QSYS_CIR_CFG(se_idx)); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c new file mode 100644 index 000000000..651d5493a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/pkt_cls.h> + +#include "lan966x_main.h" + +static LIST_HEAD(lan966x_tc_block_cb_list); + +static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port, + struct tc_mqprio_qopt_offload *mqprio) +{ + u8 num_tc = mqprio->qopt.num_tc; + + mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return num_tc ? lan966x_mqprio_add(port, num_tc) : + lan966x_mqprio_del(port); +} + +static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port, + struct tc_taprio_qopt_offload *taprio) +{ + return taprio->enable ? lan966x_taprio_add(port, taprio) : + lan966x_taprio_del(port); +} + +static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port, + struct tc_tbf_qopt_offload *qopt) +{ + switch (qopt->command) { + case TC_TBF_REPLACE: + return lan966x_tbf_add(port, qopt); + case TC_TBF_DESTROY: + return lan966x_tbf_del(port, qopt); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port, + struct tc_cbs_qopt_offload *qopt) +{ + return qopt->enable ? lan966x_cbs_add(port, qopt) : + lan966x_cbs_del(port, qopt); +} + +static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port, + struct tc_ets_qopt_offload *qopt) +{ + switch (qopt->command) { + case TC_ETS_REPLACE: + return lan966x_ets_add(port, qopt); + case TC_ETS_DESTROY: + return lan966x_ets_del(port, qopt); + default: + return -EOPNOTSUPP; + }; + + return -EOPNOTSUPP; +} + +static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv, bool ingress) +{ + struct lan966x_port *port = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return lan966x_tc_matchall(port, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int lan966x_tc_block_cb_ingress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, true); +} + +static int lan966x_tc_block_cb_egress(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return lan966x_tc_block_cb(type, type_data, cb_priv, false); +} + +static int lan966x_tc_setup_block(struct lan966x_port *port, + struct flow_block_offload *f) +{ + flow_setup_cb_t *cb; + bool ingress; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = lan966x_tc_block_cb_ingress; + port->tc.ingress_shared_block = f->block_shared; + ingress = true; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = lan966x_tc_block_cb_egress; + ingress = false; + } else { + return -EOPNOTSUPP; + } + + return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list, + cb, port, port, ingress); +} + +int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct lan966x_port *port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return lan966x_tc_setup_qdisc_mqprio(port, type_data); + case TC_SETUP_QDISC_TAPRIO: + return lan966x_tc_setup_qdisc_taprio(port, type_data); + case TC_SETUP_QDISC_TBF: + return lan966x_tc_setup_qdisc_tbf(port, type_data); + case TC_SETUP_QDISC_CBS: + return lan966x_tc_setup_qdisc_cbs(port, type_data); + case TC_SETUP_QDISC_ETS: + return lan966x_tc_setup_qdisc_ets(port, type_data); + case TC_SETUP_BLOCK: + return lan966x_tc_setup_block(port, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c new file mode 100644 index 000000000..7368433b9 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_tc_matchall_add(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only once action per filter is supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_POLICE: + return lan966x_police_port_add(port, &f->rule->action, act, + f->cookie, ingress, + f->common.extack); + case FLOW_ACTION_MIRRED: + return lan966x_mirror_port_add(port, act, f->cookie, + ingress, f->common.extack); + default: + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_del(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + return lan966x_police_port_del(port, f->cookie, + f->common.extack); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + return lan966x_mirror_port_del(port, ingress, + f->common.extack); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int lan966x_tc_matchall_stats(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (f->cookie == port->tc.police_id) { + lan966x_police_port_stats(port, &f->stats); + } else if (f->cookie == port->tc.ingress_mirror_id || + f->cookie == port->tc.egress_mirror_id) { + lan966x_mirror_port_stats(port, &f->stats, ingress); + } else { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; +} + +int lan966x_tc_matchall(struct lan966x_port *port, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Only chain zero is supported"); + return -EOPNOTSUPP; + } + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return lan966x_tc_matchall_add(port, f, ingress); + case TC_CLSMATCHALL_DESTROY: + return lan966x_tc_matchall_del(port, f, ingress); + case TC_CLSMATCHALL_STATS: + return lan966x_tc_matchall_stats(port, f, ingress); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c new file mode 100644 index 000000000..3c4466012 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define VLANACCESS_CMD_IDLE 0 +#define VLANACCESS_CMD_READ 1 +#define VLANACCESS_CMD_WRITE 2 +#define VLANACCESS_CMD_INIT 3 + +static int lan966x_vlan_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_VLANACCESS); +} + +static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout(lan966x_vlan_get_status, + lan966x, val, + (val & ANA_VLANACCESS_VLAN_TBL_CMD) == + VLANACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid) +{ + u16 mask = lan966x->vlan_mask[vid]; + bool cpu_dis; + + cpu_dis = !(mask & BIT(CPU_PORT)); + + /* Set flags and the VID to configure */ + lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) | + ANA_VLANTIDX_V_INDEX_SET(vid), + ANA_VLANTIDX_VLAN_PGID_CPU_DIS | + ANA_VLANTIDX_V_INDEX, + lan966x, ANA_VLANTIDX); + + /* Set the vlan port members mask */ + lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask), + ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, + lan966x, ANA_VLAN_PORT_MASK); + + /* Issue a write command */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + + if (lan966x_vlan_wait_for_completion(lan966x)) + dev_err(lan966x->dev, "Vlan set mask failed\n"); +} + +static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] |= BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] &= ~BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT)); +} + +static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] |= BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __set_bit(vid, lan966x->cpu_vlan_mask); +} + +static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __clear_bit(vid, lan966x->cpu_vlan_mask); +} + +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return test_bit(vid, lan966x->cpu_vlan_mask); +} + +static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->bridge_mask & BIT(port->chip_port))) + return HOST_PVID; + + return port->vlan_aware ? port->pvid : UNAWARE_PVID; +} + +int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid, + bool pvid, bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* Egress vlan classification */ + if (untagged && port->vid != vid) { + if (port->vid) { + dev_err(lan966x->dev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } + port->vid = vid; + } + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + return 0; +} + +static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid) +{ + if (port->pvid == vid) + port->pvid = 0; + + if (port->vid == vid) + port->vid = 0; +} + +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware) +{ + port->vlan_aware = vlan_aware; +} + +void lan966x_vlan_port_apply(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u16 pvid; + u32 val; + + pvid = lan966x_vlan_port_get_pvid(port); + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to classify for untagged frames (may be zero) */ + val = ANA_VLAN_CFG_VLAN_VID_SET(pvid); + if (port->vlan_aware) + val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1); + + lan_rmw(val, + ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA | + ANA_VLAN_CFG_VLAN_POP_CNT, + lan966x, ANA_VLAN_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware), + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, + lan966x, DEV_MAC_TAGS_CFG(port->chip_port)); + + /* Drop frames with multicast source address */ + val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); + if (port->vlan_aware && !pvid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1); + + lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port)); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */ + val = REW_TAG_CFG_TAG_TPID_CFG_SET(0); + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG_SET(1); + else + val |= REW_TAG_CFG_TAG_CFG_SET(3); + } + + /* Update only some bits in the register */ + lan_rmw(val, + REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG, + lan966x, REW_TAG_CFG(port->chip_port)); + + /* Set default VLAN and tag type to 8021Q */ + lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid), + REW_PORT_VLAN_CFG_PORT_TPID | + REW_PORT_VLAN_CFG_PORT_VID, + lan966x, REW_PORT_VLAN_CFG(port->chip_port)); +} + +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* If the CPU(br) is already part of the vlan then add the fdb + * entries in MAC table to copy the frames to the CPU(br). + * If the CPU(br) is not part of the vlan then it would + * just drop the frames. + */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_port_set_vid(port, vid, pvid, untagged); + lan966x_vlan_port_add_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); +} + +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + + lan966x_vlan_port_remove_vid(port, vid); + lan966x_vlan_port_del_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); + + /* In case there are no other ports in vlan then remove the CPU from + * that vlan but still keep it in the mask because it may be needed + * again then another port gets added in that vlan + */ + if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); + } +} + +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Add an entry in the MAC table for the CPU + * Add the CPU part of the vlan only if there is another port in that + * vlan otherwise all the broadcast frames in that vlan will go to CPU + * even if none of the ports are in the vlan and then the CPU will just + * need to discard these frames. It is required to store this + * information so when a front port is added then it would add also the + * CPU port. + */ + if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); +} + +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Remove the CPU part of the vlan */ + lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid); + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); +} + +void lan966x_vlan_init(struct lan966x *lan966x) +{ + u16 port, vid; + + /* Clear VLAN table, by default all ports are members of all VLANS */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + lan966x_vlan_wait_for_completion(lan966x); + + for (vid = 1; vid < VLAN_N_VID; vid++) { + lan966x->vlan_mask[vid] = 0; + lan966x_vlan_set_mask(lan966x, vid); + } + + /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */ + lan966x->vlan_mask[HOST_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, HOST_PVID); + + lan966x->vlan_mask[UNAWARE_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, UNAWARE_PVID); + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID); + + /* Configure the CPU port to be vlan aware */ + lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) | + ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1), + lan966x, ANA_VLAN_CFG(CPU_PORT)); + + /* Set vlan ingress filter mask to all ports */ + lan_wr(GENMASK(lan966x->num_phys_ports, 0), + lan966x, ANA_VLANMASK); + + for (port = 0; port < lan966x->num_phys_ports; port++) { + lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port)); + lan_wr(0, lan966x, REW_TAG_CFG(port)); + } +} |