diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/microchip/sparx5 | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microchip/sparx5')
22 files changed, 14141 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig new file mode 100644 index 000000000..cc5e48e1b --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -0,0 +1,13 @@ +config SPARX5_SWITCH + tristate "Sparx5 switch driver" + depends on NET_SWITCHDEV + depends on HAS_IOMEM + depends on OF + depends on ARCH_SPARX5 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + depends on BRIDGE || BRIDGE=n + select PHYLINK + select PHY_SPARX5_SERDES + select RESET_CONTROLLER + help + This driver supports the Sparx5 network switch device. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile new file mode 100644 index 000000000..d1c6ad966 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip Sparx5 network device drivers. +# + +obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o + +sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ + sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ + sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ + sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c new file mode 100644 index 000000000..76a8bb596 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/device.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +/* QSYS calendar information */ +#define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */ +#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */ + +/* DSM calendar information */ +#define SPX5_DSM_CAL_LEN 64 +#define SPX5_DSM_CAL_EMPTY 0xFFFF +#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13 +#define SPX5_DSM_CAL_TAXIS 8 +#define SPX5_DSM_CAL_BW_LOSS 553 + +#define SPX5_TAXI_PORT_MAX 70 + +#define SPEED_12500 12500 + +/* Maps from taxis to port numbers */ +static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = { + {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23}, + {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31}, + {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39}, + {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47}, + {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99}, + {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}, +}; + +struct sparx5_calendar_data { + u32 schedule[SPX5_DSM_CAL_LEN]; + u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]; + u32 new_slots[SPX5_DSM_CAL_LEN]; + u32 temp_sched[SPX5_DSM_CAL_LEN]; + u32 indices[SPX5_DSM_CAL_LEN]; + u32 short_list[SPX5_DSM_CAL_LEN]; + u32 long_list[SPX5_DSM_CAL_LEN]; +}; + +static u32 sparx5_target_bandwidth(struct sparx5 *sparx5) +{ + switch (sparx5->target_ct) { + case SPX5_TARGET_CT_7546: + case SPX5_TARGET_CT_7546TSN: + return 65000; + case SPX5_TARGET_CT_7549: + case SPX5_TARGET_CT_7549TSN: + return 91000; + case SPX5_TARGET_CT_7552: + case SPX5_TARGET_CT_7552TSN: + return 129000; + case SPX5_TARGET_CT_7556: + case SPX5_TARGET_CT_7556TSN: + return 161000; + case SPX5_TARGET_CT_7558: + case SPX5_TARGET_CT_7558TSN: + return 201000; + default: + return 0; + } +} + +/* This is used in calendar configuration */ +enum sparx5_cal_bw { + SPX5_CAL_SPEED_NONE = 0, + SPX5_CAL_SPEED_1G = 1, + SPX5_CAL_SPEED_2G5 = 2, + SPX5_CAL_SPEED_5G = 3, + SPX5_CAL_SPEED_10G = 4, + SPX5_CAL_SPEED_25G = 5, + SPX5_CAL_SPEED_0G5 = 6, + SPX5_CAL_SPEED_12G5 = 7 +}; + +static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock) +{ + switch (cclock) { + case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */ + case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */ + case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */ + default: return 0; + } + return 0; +} + +static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed) +{ + switch (speed) { + case SPX5_CAL_SPEED_1G: return 1000; + case SPX5_CAL_SPEED_2G5: return 2500; + case SPX5_CAL_SPEED_5G: return 5000; + case SPX5_CAL_SPEED_10G: return 10000; + case SPX5_CAL_SPEED_25G: return 25000; + case SPX5_CAL_SPEED_0G5: return 500; + case SPX5_CAL_SPEED_12G5: return 12500; + default: return 0; + } +} + +static u32 sparx5_bandwidth_to_calendar(u32 bw) +{ + switch (bw) { + case SPEED_10: return SPX5_CAL_SPEED_0G5; + case SPEED_100: return SPX5_CAL_SPEED_0G5; + case SPEED_1000: return SPX5_CAL_SPEED_1G; + case SPEED_2500: return SPX5_CAL_SPEED_2G5; + case SPEED_5000: return SPX5_CAL_SPEED_5G; + case SPEED_10000: return SPX5_CAL_SPEED_10G; + case SPEED_12500: return SPX5_CAL_SPEED_12G5; + case SPEED_25000: return SPX5_CAL_SPEED_25G; + case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G; + default: return SPX5_CAL_SPEED_NONE; + } +} + +static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, + u32 portno) +{ + struct sparx5_port *port; + + if (portno >= SPX5_PORTS) { + /* Internal ports */ + if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) { + /* Equals 1.25G */ + return SPX5_CAL_SPEED_2G5; + } else if (portno == SPX5_PORT_VD0) { + /* IPMC only idle BW */ + return SPX5_CAL_SPEED_NONE; + } else if (portno == SPX5_PORT_VD1) { + /* OAM only idle BW */ + return SPX5_CAL_SPEED_NONE; + } else if (portno == SPX5_PORT_VD2) { + /* IPinIP gets only idle BW */ + return SPX5_CAL_SPEED_NONE; + } + /* not in port map */ + return SPX5_CAL_SPEED_NONE; + } + /* Front ports - may be used */ + port = sparx5->ports[portno]; + if (!port) + return SPX5_CAL_SPEED_NONE; + return sparx5_bandwidth_to_calendar(port->conf.bandwidth); +} + +/* Auto configure the QSYS calendar based on port configuration */ +int sparx5_config_auto_calendar(struct sparx5 *sparx5) +{ + u32 cal[7], value, idx, portno; + u32 max_core_bw; + u32 total_bw = 0, used_port_bw = 0; + int err = 0; + enum sparx5_cal_bw spd; + + memset(cal, 0, sizeof(cal)); + + max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock); + if (max_core_bw == 0) { + dev_err(sparx5->dev, "Core clock not supported"); + return -EINVAL; + } + + /* Setup the calendar with the bandwidth to each port */ + for (portno = 0; portno < SPX5_PORTS_ALL; portno++) { + u64 reg, offset, this_bw; + + spd = sparx5_get_port_cal_speed(sparx5, portno); + if (spd == SPX5_CAL_SPEED_NONE) + continue; + + this_bw = sparx5_cal_speed_to_value(spd); + if (portno < SPX5_PORTS) + used_port_bw += this_bw; + else + /* Internal ports are granted half the value */ + this_bw = this_bw / 2; + total_bw += this_bw; + reg = portno; + offset = do_div(reg, SPX5_PORTS_PER_CALREG); + cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT); + } + + if (used_port_bw > sparx5_target_bandwidth(sparx5)) { + dev_err(sparx5->dev, + "Port BW %u above target BW %u\n", + used_port_bw, sparx5_target_bandwidth(sparx5)); + return -EINVAL; + } + + if (total_bw > max_core_bw) { + dev_err(sparx5->dev, + "Total BW %u above switch core BW %u\n", + total_bw, max_core_bw); + return -EINVAL; + } + + /* Halt the calendar while changing it */ + spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10), + QSYS_CAL_CTRL_CAL_MODE, + sparx5, QSYS_CAL_CTRL); + + /* Assign port bandwidth to auto calendar */ + for (idx = 0; idx < ARRAY_SIZE(cal); idx++) + spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx)); + + /* Increase grant rate of all ports to account for + * core clock ppm deviations + */ + spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */ + QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, + sparx5, + QSYS_CAL_CTRL); + + /* Grant idle usage to VD 0-2 */ + for (idx = 2; idx < 5; idx++) + spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12), + sparx5, + HSCH_OUTB_SHARE_ENA(idx)); + + /* Enable Auto mode */ + spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8), + QSYS_CAL_CTRL_CAL_MODE, + sparx5, QSYS_CAL_CTRL); + + /* Verify successful calendar config */ + value = spx5_rd(sparx5, QSYS_CAL_CTRL); + if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) { + dev_err(sparx5->dev, "QSYS calendar error\n"); + err = -EINVAL; + } + return err; +} + +static u32 sparx5_dsm_exb_gcd(u32 a, u32 b) +{ + if (b == 0) + return a; + return sparx5_dsm_exb_gcd(b, a % b); +} + +static u32 sparx5_dsm_cal_len(u32 *cal) +{ + u32 idx = 0, len = 0; + + while (idx < SPX5_DSM_CAL_LEN) { + if (cal[idx] != SPX5_DSM_CAL_EMPTY) + len++; + idx++; + } + return len; +} + +static u32 sparx5_dsm_cp_cal(u32 *sched) +{ + u32 idx = 0, tmp; + + while (idx < SPX5_DSM_CAL_LEN) { + if (sched[idx] != SPX5_DSM_CAL_EMPTY) { + tmp = sched[idx]; + sched[idx] = SPX5_DSM_CAL_EMPTY; + return tmp; + } + idx++; + } + return SPX5_DSM_CAL_EMPTY; +} + +static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data) +{ + bool slow_mode; + u32 gcd, idx, sum, min, factor; + u32 num_of_slots, slot_spd, empty_slots; + u32 taxi_bw, clk_period_ps; + + clk_period_ps = sparx5_clk_period(sparx5->coreclock); + taxi_bw = 128 * 1000000 / clk_period_ps; + slow_mode = !!(clk_period_ps > 2000); + memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi], + sizeof(data->taxi_ports)); + + for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) { + data->new_slots[idx] = SPX5_DSM_CAL_EMPTY; + data->schedule[idx] = SPX5_DSM_CAL_EMPTY; + data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY; + } + /* Default empty calendar */ + data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; + + /* Map ports to taxi positions */ + for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) { + u32 portno = data->taxi_ports[idx]; + + if (portno < SPX5_TAXI_PORT_MAX) { + data->taxi_speeds[idx] = sparx5_cal_speed_to_value + (sparx5_get_port_cal_speed(sparx5, portno)); + } else { + data->taxi_speeds[idx] = 0; + } + } + + sum = 0; + min = 25000; + for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) { + u32 jdx; + + sum += data->taxi_speeds[idx]; + if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min) + min = data->taxi_speeds[idx]; + gcd = min; + for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++) + gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]); + } + if (sum == 0) /* Empty calendar */ + return 0; + /* Make room for overhead traffic */ + factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS); + + if (sum * factor > (taxi_bw * 1000)) { + dev_err(sparx5->dev, + "Taxi %u, Requested BW %u above available BW %u\n", + taxi, sum, taxi_bw); + return -EINVAL; + } + for (idx = 0; idx < 4; idx++) { + u32 raw_spd; + + if (idx == 0) + raw_spd = gcd / 5; + else if (idx == 1) + raw_spd = gcd / 2; + else if (idx == 2) + raw_spd = gcd; + else + raw_spd = min; + slot_spd = raw_spd * factor / 1000; + num_of_slots = taxi_bw / slot_spd; + if (num_of_slots <= 64) + break; + } + + num_of_slots = num_of_slots > 64 ? 64 : num_of_slots; + slot_spd = taxi_bw / num_of_slots; + + sum = 0; + for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) { + u32 spd = data->taxi_speeds[idx]; + u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000; + + if (adjusted_speed > 0) { + data->avg_dist[idx] = (128 * 1000000 * 10) / + (adjusted_speed * clk_period_ps); + } else { + data->avg_dist[idx] = -1; + } + data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000; + if (spd != 25000 && (spd != 10000 || !slow_mode)) { + if (num_of_slots < (5 * data->dev_slots[idx])) { + dev_err(sparx5->dev, + "Taxi %u, speed %u, Low slot sep.\n", + taxi, spd); + return -EINVAL; + } + } + sum += data->dev_slots[idx]; + if (sum > num_of_slots) { + dev_err(sparx5->dev, + "Taxi %u with overhead factor %u\n", + taxi, factor); + return -EINVAL; + } + } + + empty_slots = num_of_slots - sum; + + for (idx = 0; idx < empty_slots; idx++) + data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; + + for (idx = 1; idx < num_of_slots; idx++) { + u32 indices_len = 0; + u32 slot, jdx, kdx, ts; + s32 cnt; + u32 num_of_old_slots, num_of_new_slots, tgt_score; + + for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) { + if (data->dev_slots[slot] == idx) { + data->indices[indices_len] = slot; + indices_len++; + } + } + if (indices_len == 0) + continue; + kdx = 0; + for (slot = 0; slot < idx; slot++) { + for (jdx = 0; jdx < indices_len; jdx++, kdx++) + data->new_slots[kdx] = data->indices[jdx]; + } + + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY) + break; + } + + num_of_old_slots = slot; + num_of_new_slots = kdx; + cnt = 0; + ts = 0; + + if (num_of_new_slots > num_of_old_slots) { + memcpy(data->short_list, data->schedule, + sizeof(data->short_list)); + memcpy(data->long_list, data->new_slots, + sizeof(data->long_list)); + tgt_score = 100000 * num_of_old_slots / + num_of_new_slots; + } else { + memcpy(data->short_list, data->new_slots, + sizeof(data->short_list)); + memcpy(data->long_list, data->schedule, + sizeof(data->long_list)); + tgt_score = 100000 * num_of_new_slots / + num_of_old_slots; + } + + while (sparx5_dsm_cal_len(data->short_list) > 0 || + sparx5_dsm_cal_len(data->long_list) > 0) { + u32 act = 0; + + if (sparx5_dsm_cal_len(data->short_list) > 0) { + data->temp_sched[ts] = + sparx5_dsm_cp_cal(data->short_list); + ts++; + cnt += 100000; + act = 1; + } + while (sparx5_dsm_cal_len(data->long_list) > 0 && + cnt > 0) { + data->temp_sched[ts] = + sparx5_dsm_cp_cal(data->long_list); + ts++; + cnt -= tgt_score; + act = 1; + } + if (act == 0) { + dev_err(sparx5->dev, + "Error in DSM calendar calculation\n"); + return -EINVAL; + } + } + + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY) + break; + } + for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) { + data->schedule[slot] = data->temp_sched[slot]; + data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY; + data->new_slots[slot] = SPX5_DSM_CAL_EMPTY; + } + } + return 0; +} + +static int sparx5_dsm_calendar_check(struct sparx5 *sparx5, + struct sparx5_calendar_data *data) +{ + u32 num_of_slots, idx, port; + int cnt, max_dist; + u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN]; + u32 cal_length = sparx5_dsm_cal_len(data->schedule); + + for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) { + num_of_slots = 0; + max_dist = data->avg_dist[port]; + for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) { + slot_indices[idx] = SPX5_DSM_CAL_EMPTY; + distances[idx] = SPX5_DSM_CAL_EMPTY; + } + + for (idx = 0; idx < cal_length; idx++) { + if (data->schedule[idx] == port) { + slot_indices[num_of_slots] = idx; + num_of_slots++; + } + } + + slot_indices[num_of_slots] = slot_indices[0] + cal_length; + + for (idx = 0; idx < num_of_slots; idx++) { + distances[idx] = (slot_indices[idx + 1] - + slot_indices[idx]) * 10; + } + + for (idx = 0; idx < num_of_slots; idx++) { + u32 jdx, kdx; + + cnt = distances[idx] - max_dist; + if (cnt < 0) + cnt = -cnt; + kdx = 0; + for (jdx = (idx + 1) % num_of_slots; + jdx != idx; + jdx = (jdx + 1) % num_of_slots, kdx++) { + cnt = cnt + distances[jdx] - max_dist; + if (cnt < 0) + cnt = -cnt; + if (cnt > max_dist) + goto check_err; + } + } + } + return 0; +check_err: + dev_err(sparx5->dev, + "Port %u: distance %u above limit %d\n", + port, cnt, max_dist); + return -EINVAL; +} + +static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi, + struct sparx5_calendar_data *data) +{ + u32 idx; + u32 cal_len = sparx5_dsm_cal_len(data->schedule), len; + + spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1), + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + for (idx = 0; idx < cal_len; idx++) { + spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx), + DSM_TAXI_CAL_CFG_CAL_IDX, + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]), + DSM_TAXI_CAL_CFG_CAL_PGM_VAL, + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + } + spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0), + sparx5, + DSM_TAXI_CAL_CFG(taxi)); + len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5, + DSM_TAXI_CAL_CFG(taxi))); + if (len != cal_len - 1) + goto update_err; + return 0; +update_err: + dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len); + return -EINVAL; +} + +/* Configure the DSM calendar based on port configuration */ +int sparx5_config_dsm_calendar(struct sparx5 *sparx5) +{ + int taxi; + struct sparx5_calendar_data *data; + int err = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) { + err = sparx5_dsm_calendar_calc(sparx5, taxi, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar calculation failed\n"); + goto cal_out; + } + err = sparx5_dsm_calendar_check(sparx5, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar check failed\n"); + goto cal_out; + } + err = sparx5_dsm_calendar_update(sparx5, taxi, data); + if (err) { + dev_err(sparx5->dev, "DSM calendar update failed\n"); + goto cal_out; + } + } +cal_out: + kfree(data); + return err; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c new file mode 100644 index 000000000..01f3a3a41 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -0,0 +1,1264 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/ethtool.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +/* Index of ANA_AC port counters */ +#define SPX5_PORT_POLICER_DROPS 0 + +/* Add a potentially wrapping 32 bit value to a 64 bit counter */ +static void sparx5_update_counter(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +enum sparx5_stats_entry { + spx5_stats_rx_symbol_err_cnt = 0, + spx5_stats_pmac_rx_symbol_err_cnt = 1, + spx5_stats_tx_uc_cnt = 2, + spx5_stats_pmac_tx_uc_cnt = 3, + spx5_stats_tx_mc_cnt = 4, + spx5_stats_tx_bc_cnt = 5, + spx5_stats_tx_backoff1_cnt = 6, + spx5_stats_tx_multi_coll_cnt = 7, + spx5_stats_rx_uc_cnt = 8, + spx5_stats_pmac_rx_uc_cnt = 9, + spx5_stats_rx_mc_cnt = 10, + spx5_stats_rx_bc_cnt = 11, + spx5_stats_rx_crc_err_cnt = 12, + spx5_stats_pmac_rx_crc_err_cnt = 13, + spx5_stats_rx_alignment_lost_cnt = 14, + spx5_stats_pmac_rx_alignment_lost_cnt = 15, + spx5_stats_tx_ok_bytes_cnt = 16, + spx5_stats_pmac_tx_ok_bytes_cnt = 17, + spx5_stats_tx_defer_cnt = 18, + spx5_stats_tx_late_coll_cnt = 19, + spx5_stats_tx_xcoll_cnt = 20, + spx5_stats_tx_csense_cnt = 21, + spx5_stats_rx_ok_bytes_cnt = 22, + spx5_stats_pmac_rx_ok_bytes_cnt = 23, + spx5_stats_pmac_tx_mc_cnt = 24, + spx5_stats_pmac_tx_bc_cnt = 25, + spx5_stats_tx_xdefer_cnt = 26, + spx5_stats_pmac_rx_mc_cnt = 27, + spx5_stats_pmac_rx_bc_cnt = 28, + spx5_stats_rx_in_range_len_err_cnt = 29, + spx5_stats_pmac_rx_in_range_len_err_cnt = 30, + spx5_stats_rx_out_of_range_len_err_cnt = 31, + spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32, + spx5_stats_rx_oversize_cnt = 33, + spx5_stats_pmac_rx_oversize_cnt = 34, + spx5_stats_tx_pause_cnt = 35, + spx5_stats_pmac_tx_pause_cnt = 36, + spx5_stats_rx_pause_cnt = 37, + spx5_stats_pmac_rx_pause_cnt = 38, + spx5_stats_rx_unsup_opcode_cnt = 39, + spx5_stats_pmac_rx_unsup_opcode_cnt = 40, + spx5_stats_rx_undersize_cnt = 41, + spx5_stats_pmac_rx_undersize_cnt = 42, + spx5_stats_rx_fragments_cnt = 43, + spx5_stats_pmac_rx_fragments_cnt = 44, + spx5_stats_rx_jabbers_cnt = 45, + spx5_stats_pmac_rx_jabbers_cnt = 46, + spx5_stats_rx_size64_cnt = 47, + spx5_stats_pmac_rx_size64_cnt = 48, + spx5_stats_rx_size65to127_cnt = 49, + spx5_stats_pmac_rx_size65to127_cnt = 50, + spx5_stats_rx_size128to255_cnt = 51, + spx5_stats_pmac_rx_size128to255_cnt = 52, + spx5_stats_rx_size256to511_cnt = 53, + spx5_stats_pmac_rx_size256to511_cnt = 54, + spx5_stats_rx_size512to1023_cnt = 55, + spx5_stats_pmac_rx_size512to1023_cnt = 56, + spx5_stats_rx_size1024to1518_cnt = 57, + spx5_stats_pmac_rx_size1024to1518_cnt = 58, + spx5_stats_rx_size1519tomax_cnt = 59, + spx5_stats_pmac_rx_size1519tomax_cnt = 60, + spx5_stats_tx_size64_cnt = 61, + spx5_stats_pmac_tx_size64_cnt = 62, + spx5_stats_tx_size65to127_cnt = 63, + spx5_stats_pmac_tx_size65to127_cnt = 64, + spx5_stats_tx_size128to255_cnt = 65, + spx5_stats_pmac_tx_size128to255_cnt = 66, + spx5_stats_tx_size256to511_cnt = 67, + spx5_stats_pmac_tx_size256to511_cnt = 68, + spx5_stats_tx_size512to1023_cnt = 69, + spx5_stats_pmac_tx_size512to1023_cnt = 70, + spx5_stats_tx_size1024to1518_cnt = 71, + spx5_stats_pmac_tx_size1024to1518_cnt = 72, + spx5_stats_tx_size1519tomax_cnt = 73, + spx5_stats_pmac_tx_size1519tomax_cnt = 74, + spx5_stats_mm_rx_assembly_err_cnt = 75, + spx5_stats_mm_rx_assembly_ok_cnt = 76, + spx5_stats_mm_rx_merge_frag_cnt = 77, + spx5_stats_mm_rx_smd_err_cnt = 78, + spx5_stats_mm_tx_pfragment_cnt = 79, + spx5_stats_rx_bad_bytes_cnt = 80, + spx5_stats_pmac_rx_bad_bytes_cnt = 81, + spx5_stats_rx_in_bytes_cnt = 82, + spx5_stats_rx_ipg_shrink_cnt = 83, + spx5_stats_rx_sync_lost_err_cnt = 84, + spx5_stats_rx_tagged_frms_cnt = 85, + spx5_stats_rx_untagged_frms_cnt = 86, + spx5_stats_tx_out_bytes_cnt = 87, + spx5_stats_tx_tagged_frms_cnt = 88, + spx5_stats_tx_untagged_frms_cnt = 89, + spx5_stats_rx_hih_cksm_err_cnt = 90, + spx5_stats_pmac_rx_hih_cksm_err_cnt = 91, + spx5_stats_rx_xgmii_prot_err_cnt = 92, + spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93, + spx5_stats_ana_ac_port_stat_lsb_cnt = 94, + spx5_stats_green_p0_rx_fwd = 95, + spx5_stats_green_p0_rx_port_drop = 111, + spx5_stats_green_p0_tx_port = 127, + spx5_stats_rx_local_drop = 143, + spx5_stats_tx_local_drop = 144, + spx5_stats_count = 145, +}; + +static const char *const sparx5_stats_layout[] = { + "mm_rx_assembly_err_cnt", + "mm_rx_assembly_ok_cnt", + "mm_rx_merge_frag_cnt", + "mm_rx_smd_err_cnt", + "mm_tx_pfragment_cnt", + "rx_bad_bytes_cnt", + "pmac_rx_bad_bytes_cnt", + "rx_in_bytes_cnt", + "rx_ipg_shrink_cnt", + "rx_sync_lost_err_cnt", + "rx_tagged_frms_cnt", + "rx_untagged_frms_cnt", + "tx_out_bytes_cnt", + "tx_tagged_frms_cnt", + "tx_untagged_frms_cnt", + "rx_hih_cksm_err_cnt", + "pmac_rx_hih_cksm_err_cnt", + "rx_xgmii_prot_err_cnt", + "pmac_rx_xgmii_prot_err_cnt", + "rx_port_policer_drop", + "rx_fwd_green_p0", + "rx_fwd_green_p1", + "rx_fwd_green_p2", + "rx_fwd_green_p3", + "rx_fwd_green_p4", + "rx_fwd_green_p5", + "rx_fwd_green_p6", + "rx_fwd_green_p7", + "rx_fwd_yellow_p0", + "rx_fwd_yellow_p1", + "rx_fwd_yellow_p2", + "rx_fwd_yellow_p3", + "rx_fwd_yellow_p4", + "rx_fwd_yellow_p5", + "rx_fwd_yellow_p6", + "rx_fwd_yellow_p7", + "rx_port_drop_green_p0", + "rx_port_drop_green_p1", + "rx_port_drop_green_p2", + "rx_port_drop_green_p3", + "rx_port_drop_green_p4", + "rx_port_drop_green_p5", + "rx_port_drop_green_p6", + "rx_port_drop_green_p7", + "rx_port_drop_yellow_p0", + "rx_port_drop_yellow_p1", + "rx_port_drop_yellow_p2", + "rx_port_drop_yellow_p3", + "rx_port_drop_yellow_p4", + "rx_port_drop_yellow_p5", + "rx_port_drop_yellow_p6", + "rx_port_drop_yellow_p7", + "tx_port_green_p0", + "tx_port_green_p1", + "tx_port_green_p2", + "tx_port_green_p3", + "tx_port_green_p4", + "tx_port_green_p5", + "tx_port_green_p6", + "tx_port_green_p7", + "tx_port_yellow_p0", + "tx_port_yellow_p1", + "tx_port_yellow_p2", + "tx_port_yellow_p3", + "tx_port_yellow_p4", + "tx_port_yellow_p5", + "tx_port_yellow_p6", + "tx_port_yellow_p7", + "rx_local_drop", + "tx_local_drop", +}; + +static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats; + u64 *stats; + u32 addr; + int idx; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + mutex_lock(&sparx5->queue_stats_lock); + spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG); + addr = 0; + stats = &portstats[spx5_stats_green_p0_rx_fwd]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + addr = 16; + stats = &portstats[spx5_stats_green_p0_rx_port_drop]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + addr = 256; + stats = &portstats[spx5_stats_green_p0_tx_port]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats) + sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr))); + sparx5_update_counter(&portstats[spx5_stats_rx_local_drop], + spx5_rd(sparx5, XQS_CNT(32))); + sparx5_update_counter(&portstats[spx5_stats_tx_local_drop], + spx5_rd(sparx5, XQS_CNT(272))); + mutex_unlock(&sparx5->queue_stats_lock); +} + +static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + + sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt], + spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno, + SPX5_PORT_POLICER_DROPS))); +} + +static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SYMBOL_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst))); +} + +static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt], + spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt], + spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt], + spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt], + spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt], + spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt], + spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt], + spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_CRC_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + DEV5G_RX_ALIGNMENT_LOST_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt], + spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt], + spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OK_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt], + spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OVERSIZE_CNT(tinst))); +} + +static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst, + u32 tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt], + spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt], + spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_PAUSE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNSUP_OPCODE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst))); +} + +static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNDERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_OVERSIZE_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt], + spx5_inst_rd(inst, + DEV5G_RX_FRAGMENTS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt], + spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_JABBERS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt], + spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_RX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt], + spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE64_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_TX_SIZE1519TOMAX_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst))); +} + +static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32 + tinst) +{ + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_MERGE_FRAG_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt], + spx5_inst_rd(inst, + DEV5G_MM_RX_SMD_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt], + spx5_inst_rd(inst, + DEV5G_MM_TX_PFRAGMENT_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_RX_BAD_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt], + spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt], + spx5_inst_rd(inst, + DEV5G_RX_IPG_SHRINK_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_RX_TAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_RX_UNTAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt], + spx5_inst_rd(inst, + DEV5G_TX_OUT_BYTES_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_TX_TAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt], + spx5_inst_rd(inst, + DEV5G_TX_UNTAGGED_FRMS_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_HIH_CKSM_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt], + spx5_inst_rd(inst, + DEV5G_RX_XGMII_PROT_ERR_CNT(tinst))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt], + spx5_inst_rd(inst, + DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst))); +} + +static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + void __iomem *inst; + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_phy_stats(portstats, inst, tinst); + sparx5_get_dev_mac_stats(portstats, inst, tinst); + sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst); + sparx5_get_dev_rmon_stats(portstats, inst, tinst); + sparx5_get_dev_misc_stats(portstats, inst, tinst); +} + +static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt], + spx5_inst_rd(inst, + ASM_RX_SYMBOL_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SYMBOL_ERR_CNT(portno))); +} + +static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt], + spx5_inst_rd(inst, ASM_TX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt], + spx5_inst_rd(inst, ASM_TX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt], + spx5_inst_rd(inst, ASM_TX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt], + spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt], + spx5_inst_rd(inst, + ASM_TX_MULTI_COLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt], + spx5_inst_rd(inst, ASM_RX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt], + spx5_inst_rd(inst, ASM_RX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt], + spx5_inst_rd(inst, ASM_RX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt], + spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_CRC_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + ASM_RX_ALIGNMENT_LOST_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt], + spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt], + spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt], + spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt], + spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt], + spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OK_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt], + spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt], + spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt], + spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_RX_IN_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OVERSIZE_CNT(portno))); +} + +static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst, + int portno) +{ + sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt], + spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt], + spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_PAUSE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + ASM_RX_UNSUP_OPCODE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno))); +} + +static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt], + spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_UNDERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], + spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_OVERSIZE_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt], + spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_FRAGMENTS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt], + spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_JABBERS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt], + spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_RX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt], + spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE64_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE65TO127_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE128TO255_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE256TO511_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE512TO1023_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE1024TO1518_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_TX_SIZE1519TOMAX_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt], + spx5_inst_rd(inst, + ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno))); +} + +static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int + portno) +{ + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_ASSEMBLY_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_ASSEMBLY_OK_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_MERGE_FRAG_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt], + spx5_inst_rd(inst, + ASM_MM_RX_SMD_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt], + spx5_inst_rd(inst, + ASM_MM_TX_PFRAGMENT_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt], + spx5_inst_rd(inst, + ASM_PMAC_RX_BAD_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt], + spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt], + spx5_inst_rd(inst, + ASM_RX_IPG_SHRINK_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt], + spx5_inst_rd(inst, + ASM_RX_SYNC_LOST_ERR_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt], + spx5_inst_rd(inst, + ASM_RX_TAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt], + spx5_inst_rd(inst, + ASM_RX_UNTAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt], + spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt], + spx5_inst_rd(inst, + ASM_TX_TAGGED_FRMS_CNT(portno))); + sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt], + spx5_inst_rd(inst, + ASM_TX_UNTAGGED_FRMS_CNT(portno))); +} + +static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno) +{ + u64 *portstats = &sparx5->stats[portno * sparx5->num_stats]; + void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + + sparx5_get_asm_phy_stats(portstats, inst, portno); + sparx5_get_asm_mac_stats(portstats, inst, portno); + sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno); + sparx5_get_asm_rmon_stats(portstats, inst, portno); + sparx5_get_asm_misc_stats(portstats, inst, portno); +} + +static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {} +}; + +static void sparx5_get_eth_phy_stats(struct net_device *ndev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_phy_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_phy_stats(portstats, inst, portno); + } + phy_stats->SymbolErrorDuringCarrier = + portstats[spx5_stats_rx_symbol_err_cnt] + + portstats[spx5_stats_pmac_rx_symbol_err_cnt]; +} + +static void sparx5_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_mac_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_mac_stats(portstats, inst, portno); + } + mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] + + portstats[spx5_stats_pmac_tx_uc_cnt] + + portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_tx_bc_cnt]; + mac_stats->SingleCollisionFrames = + portstats[spx5_stats_tx_backoff1_cnt]; + mac_stats->MultipleCollisionFrames = + portstats[spx5_stats_tx_multi_coll_cnt]; + mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] + + portstats[spx5_stats_pmac_rx_uc_cnt] + + portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_rx_bc_cnt]; + mac_stats->FrameCheckSequenceErrors = + portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt]; + mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_tx_ok_bytes_cnt]; + mac_stats->FramesWithDeferredXmissions = + portstats[spx5_stats_tx_defer_cnt]; + mac_stats->LateCollisions = + portstats[spx5_stats_tx_late_coll_cnt]; + mac_stats->FramesAbortedDueToXSColls = + portstats[spx5_stats_tx_xcoll_cnt]; + mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt]; + mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_rx_ok_bytes_cnt]; + mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_pmac_tx_mc_cnt]; + mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] + + portstats[spx5_stats_pmac_tx_bc_cnt]; + mac_stats->FramesWithExcessiveDeferral = + portstats[spx5_stats_tx_xdefer_cnt]; + mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_pmac_rx_mc_cnt]; + mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] + + portstats[spx5_stats_pmac_rx_bc_cnt]; + mac_stats->InRangeLengthErrors = + portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt]; + mac_stats->OutOfRangeLengthField = + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt]; + mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; +} + +static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev, + struct ethtool_eth_ctrl_stats *mac_ctrl_stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno); + } + mac_ctrl_stats->MACControlFramesTransmitted = + portstats[spx5_stats_tx_pause_cnt] + + portstats[spx5_stats_pmac_tx_pause_cnt]; + mac_ctrl_stats->MACControlFramesReceived = + portstats[spx5_stats_rx_pause_cnt] + + portstats[spx5_stats_pmac_rx_pause_cnt]; + mac_ctrl_stats->UnsupportedOpcodesReceived = + portstats[spx5_stats_rx_unsup_opcode_cnt] + + portstats[spx5_stats_pmac_rx_unsup_opcode_cnt]; +} + +static void sparx5_get_eth_rmon_stats(struct net_device *ndev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_rmon_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_rmon_stats(portstats, inst, portno); + } + rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] + + portstats[spx5_stats_pmac_rx_undersize_cnt]; + rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; + rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] + + portstats[spx5_stats_pmac_rx_fragments_cnt]; + rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] + + portstats[spx5_stats_pmac_rx_jabbers_cnt]; + rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] + + portstats[spx5_stats_pmac_rx_size64_cnt]; + rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] + + portstats[spx5_stats_pmac_rx_size65to127_cnt]; + rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] + + portstats[spx5_stats_pmac_rx_size128to255_cnt]; + rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] + + portstats[spx5_stats_pmac_rx_size256to511_cnt]; + rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] + + portstats[spx5_stats_pmac_rx_size512to1023_cnt]; + rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] + + portstats[spx5_stats_pmac_rx_size1024to1518_cnt]; + rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] + + portstats[spx5_stats_pmac_rx_size1519tomax_cnt]; + rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] + + portstats[spx5_stats_pmac_tx_size64_cnt]; + rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] + + portstats[spx5_stats_pmac_tx_size65to127_cnt]; + rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] + + portstats[spx5_stats_pmac_tx_size128to255_cnt]; + rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] + + portstats[spx5_stats_pmac_tx_size256to511_cnt]; + rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] + + portstats[spx5_stats_pmac_tx_size512to1023_cnt]; + rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] + + portstats[spx5_stats_pmac_tx_size1024to1518_cnt]; + rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] + + portstats[spx5_stats_pmac_tx_size1519tomax_cnt]; + *ranges = sparx5_rmon_ranges; +} + +static int sparx5_get_sset_count(struct net_device *ndev, int sset) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + return sparx5->num_ethtool_stats; +} + +static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int idx; + + if (sset != ETH_SS_STATS) + return; + + for (idx = 0; idx < sparx5->num_ethtool_stats; idx++) + strncpy(data + idx * ETH_GSTRING_LEN, + sparx5->stats_layout[idx], ETH_GSTRING_LEN); +} + +static void sparx5_get_sset_data(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + int portno = port->portno; + void __iomem *inst; + u64 *portstats; + int idx; + + portstats = &sparx5->stats[portno * sparx5->num_stats]; + if (sparx5_is_baser(port->conf.portmode)) { + u32 tinst = sparx5_port_dev_index(portno); + u32 dev = sparx5_to_high_dev(portno); + + inst = spx5_inst_get(sparx5, dev, tinst); + sparx5_get_dev_misc_stats(portstats, inst, tinst); + } else { + inst = spx5_inst_get(sparx5, TARGET_ASM, 0); + sparx5_get_asm_misc_stats(portstats, inst, portno); + } + sparx5_get_ana_ac_stats_stats(sparx5, portno); + sparx5_get_queue_sys_stats(sparx5, portno); + /* Copy port counters to the ethtool buffer */ + for (idx = spx5_stats_mm_rx_assembly_err_cnt; + idx < spx5_stats_mm_rx_assembly_err_cnt + + sparx5->num_ethtool_stats; idx++) + *data++ = portstats[idx]; +} + +void sparx5_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + u64 *portstats; + int idx; + + if (!sparx5->stats) + return; /* Not initialized yet */ + + portstats = &sparx5->stats[port->portno * sparx5->num_stats]; + + stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] + + portstats[spx5_stats_pmac_rx_uc_cnt] + + portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_rx_bc_cnt]; + stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] + + portstats[spx5_stats_pmac_tx_uc_cnt] + + portstats[spx5_stats_tx_mc_cnt] + + portstats[spx5_stats_tx_bc_cnt]; + stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_rx_ok_bytes_cnt]; + stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] + + portstats[spx5_stats_pmac_tx_ok_bytes_cnt]; + stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] + + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt] + + portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt] + + portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] + + portstats[spx5_stats_tx_csense_cnt] + + portstats[spx5_stats_tx_late_coll_cnt]; + stats->multicast = portstats[spx5_stats_rx_mc_cnt] + + portstats[spx5_stats_pmac_rx_mc_cnt]; + stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] + + portstats[spx5_stats_tx_xcoll_cnt] + + portstats[spx5_stats_tx_backoff1_cnt]; + stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] + + portstats[spx5_stats_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] + + portstats[spx5_stats_rx_oversize_cnt] + + portstats[spx5_stats_pmac_rx_oversize_cnt]; + stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] + + portstats[spx5_stats_pmac_rx_crc_err_cnt]; + stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] + + portstats[spx5_stats_pmac_rx_alignment_lost_cnt]; + stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt]; + stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt]; + stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt]; + stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt]; + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx) + stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop + + idx]; + stats->tx_dropped = portstats[spx5_stats_tx_local_drop]; +} + +static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno) +{ + if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode)) + sparx5_get_device_stats(sparx5, portno); + else + sparx5_get_asm_stats(sparx5, portno); + sparx5_get_ana_ac_stats_stats(sparx5, portno); + sparx5_get_queue_sys_stats(sparx5, portno); +} + +static void sparx5_update_stats(struct sparx5 *sparx5) +{ + int idx; + + for (idx = 0; idx < SPX5_PORTS; idx++) + if (sparx5->ports[idx]) + sparx5_update_port_stats(sparx5, idx); +} + +static void sparx5_check_stats_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct sparx5 *sparx5 = container_of(dwork, + struct sparx5, + stats_work); + + sparx5_update_stats(sparx5); + + queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work, + SPX5_STATS_CHECK_DELAY); +} + +static int sparx5_get_link_settings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct sparx5_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int sparx5_set_link_settings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct sparx5_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static void sparx5_config_stats(struct sparx5 *sparx5) +{ + /* Enable global events for port policer drops */ + spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0), + ANA_AC_PORT_SGE_CFG_MASK, + sparx5, + ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS)); +} + +static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno) +{ + /* Clear Queue System counters */ + spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) | + XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5, + XQS_STAT_CFG); + + /* Use counter for port policer drop count */ + spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) | + ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) | + ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff), + ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE | + ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE | + ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, + sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS)); +} + +static int sparx5_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + if (!sparx5->ptp) + return ethtool_op_get_ts_info(dev, info); + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + + info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +const struct ethtool_ops sparx5_ethtool_ops = { + .get_sset_count = sparx5_get_sset_count, + .get_strings = sparx5_get_sset_strings, + .get_ethtool_stats = sparx5_get_sset_data, + .get_link_ksettings = sparx5_get_link_settings, + .set_link_ksettings = sparx5_set_link_settings, + .get_link = ethtool_op_get_link, + .get_eth_phy_stats = sparx5_get_eth_phy_stats, + .get_eth_mac_stats = sparx5_get_eth_mac_stats, + .get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats, + .get_rmon_stats = sparx5_get_eth_rmon_stats, + .get_ts_info = sparx5_get_ts_info, +}; + +int sparx_stats_init(struct sparx5 *sparx5) +{ + char queue_name[32]; + int portno; + + sparx5->stats_layout = sparx5_stats_layout; + sparx5->num_stats = spx5_stats_count; + sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout); + sparx5->stats = devm_kcalloc(sparx5->dev, + SPX5_PORTS_ALL * sparx5->num_stats, + sizeof(u64), GFP_KERNEL); + if (!sparx5->stats) + return -ENOMEM; + + mutex_init(&sparx5->queue_stats_lock); + sparx5_config_stats(sparx5); + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) + sparx5_config_port_stats(sparx5, portno); + + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(sparx5->dev)); + sparx5->stats_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->stats_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work); + queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work, + SPX5_STATS_CHECK_DELAY); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c new file mode 100644 index 000000000..141897dfe --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 + +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_INFO_TOKEN BIT(17) +#define FDMA_DCB_INFO_INTR BIT(18) +#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_BUFFER_SIZE 2048 +#define FDMA_WEIGHT 4 + +/* Frame DMA DCB format + * + * +---------------------------+ + * | Next Ptr | + * +---------------------------+ + * | Reserved | Info | + * +---------------------------+ + * | Data0 Ptr | + * +---------------------------+ + * | Reserved | Status0 | + * +---------------------------+ + * | Data1 Ptr | + * +---------------------------+ + * | Reserved | Status1 | + * +---------------------------+ + * | Data2 Ptr | + * +---------------------------+ + * | Reserved | Status2 | + * |-------------|-------------| + * | | + * | | + * | | + * | | + * | | + * |---------------------------| + * | Data14 Ptr | + * +-------------|-------------+ + * | Reserved | Status14 | + * +-------------|-------------+ + */ + +/* For each hardware DB there is an entry in this list and when the HW DB + * entry is used, this SW DB entry is moved to the back of the list + */ +struct sparx5_db { + struct list_head list; + void *cpu_addr; +}; + +static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, + struct sparx5_rx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_INTR; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_DONE; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); +} + +static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(rx->channel_id)); + spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); + + /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), + sparx5, FDMA_CH_CFG(rx->channel_id)); + + /* Set the RX Watermark to max */ + spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, + sparx5, + FDMA_XTR_CFG); + + /* Start RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Enable RX channel DB interrupt */ + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Activate the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Dectivate the RX channel */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); + + /* Disable RX channel DB interrupt */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Stop RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); +} + +static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(tx->channel_id)); + spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); + + /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), + sparx5, FDMA_CH_CFG(tx->channel_id)); + + /* Start TX fdma */ + spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Activate the channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Disable the channel */ + spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Reload the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Reload the TX channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) +{ + return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, + GFP_ATOMIC); +} + +static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + struct sparx5_db_hw *db_hw; + unsigned int packet_size; + struct sparx5_port *port; + struct sk_buff *new_skb; + struct frame_info fi; + struct sk_buff *skb; + dma_addr_t dma_addr; + + /* Check if the DCB is done */ + db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) + return false; + skb = rx->skb[rx->dcb_index][rx->db_index]; + /* Replace the DB entry with a new SKB */ + new_skb = sparx5_fdma_rx_alloc_skb(rx); + if (unlikely(!new_skb)) + return false; + /* Map the new skb data and set the new skb */ + dma_addr = virt_to_phys(new_skb->data); + rx->skb[rx->dcb_index][rx->db_index] = new_skb; + db_hw->dataptr = dma_addr; + packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); + skb_put(skb, packet_size); + /* Now do the normal processing of the skb */ + sparx5_ifh_parse((u32 *)skb->data, &fi); + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, XTR_QUEUE); + return false; + } + skb->dev = port->ndev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + rx->packets++; + netif_receive_skb(skb); + return true; +} + +static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) +{ + struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); + struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + int counter = 0; + + while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { + struct sparx5_rx_dcb_hw *old_dcb; + + rx->db_index++; + counter++; + /* Check if the DCB can be reused */ + if (rx->db_index != FDMA_RX_DCB_MAX_DBS) + continue; + /* As the DCB can be reused, just advance the dcb_index + * pointer and set the nextptr in the DCB + */ + rx->db_index = 0; + old_dcb = &rx->dcb_entries[rx->dcb_index]; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + sparx5_fdma_rx_add_dcb(rx, old_dcb, + rx->dma + + ((unsigned long)old_dcb - + (unsigned long)rx->dcb_entries)); + } + if (counter < weight) { + napi_complete_done(&rx->napi, counter); + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + } + if (counter) + sparx5_fdma_rx_reload(sparx5, rx); + return counter; +} + +static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb) +{ + struct sparx5_tx_dcb_hw *next_dcb; + + next_dcb = dcb; + next_dcb++; + /* Handle wrap-around */ + if ((unsigned long)next_dcb >= + ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) + next_dcb = tx->first_entry; + return next_dcb; +} + +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) +{ + struct sparx5_tx_dcb_hw *next_dcb_hw; + struct sparx5_tx *tx = &sparx5->tx; + static bool first_time = true; + struct sparx5_db_hw *db_hw; + struct sparx5_db *db; + + next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); + db_hw = &next_dcb_hw->db[0]; + if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) + return -EINVAL; + db = list_first_entry(&tx->db_list, struct sparx5_db, list); + list_move_tail(&db->list, &tx->db_list); + next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; + tx->curr_entry->nextptr = tx->dma + + ((unsigned long)next_dcb_hw - + (unsigned long)tx->first_entry); + tx->curr_entry = next_dcb_hw; + memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); + memcpy(db->cpu_addr, ifh, IFH_LEN * 4); + memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); + db_hw->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); + if (first_time) { + sparx5_fdma_tx_activate(sparx5, tx); + first_time = false; + } else { + sparx5_fdma_tx_reload(sparx5, tx); + } + return NETDEV_TX_OK; +} + +static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_rx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!rx->dcb_entries) + return -ENOMEM; + rx->dma = virt_to_phys(rx->dcb_entries); + rx->last_entry = rx->dcb_entries; + rx->db_index = 0; + rx->dcb_index = 0; + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &rx->dcb_entries[idx]; + dcb->info = 0; + /* For each db allocate an skb and map skb data pointer to the DB + * dataptr. In this way when the frame is received the skb->data + * will contain the frame, so no memcpy is needed + */ + for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = sparx5_fdma_rx_alloc_skb(rx); + if (!skb) + return -ENOMEM; + + dma_addr = virt_to_phys(skb->data); + db_hw->dataptr = dma_addr; + db_hw->status = 0; + rx->skb[idx][jdx] = skb; + } + sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); + } + netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, + FDMA_WEIGHT); + napi_enable(&rx->napi); + sparx5_fdma_rx_activate(sparx5, rx); + return 0; +} + +static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_tx *tx = &sparx5->tx; + struct sparx5_tx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!tx->curr_entry) + return -ENOMEM; + tx->dma = virt_to_phys(tx->curr_entry); + tx->first_entry = tx->curr_entry; + INIT_LIST_HEAD(&tx->db_list); + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &tx->curr_entry[idx]; + dcb->info = 0; + /* TX databuffers must be 16byte aligned */ + for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + struct sparx5_db *db; + dma_addr_t phys; + void *cpu_addr; + + cpu_addr = devm_kzalloc(sparx5->dev, + FDMA_XTR_BUFFER_SIZE, + GFP_KERNEL); + if (!cpu_addr) + return -ENOMEM; + phys = virt_to_phys(cpu_addr); + db_hw->dataptr = phys; + db_hw->status = 0; + db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; + db->cpu_addr = cpu_addr; + list_add_tail(&db->list, &tx->db_list); + } + sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); + /* Let the curr_entry to point to the last allocated entry */ + if (idx == FDMA_DCB_MAX - 1) + tx->curr_entry = dcb; + } + return 0; +} + +static void sparx5_fdma_rx_init(struct sparx5 *sparx5, + struct sparx5_rx *rx, int channel) +{ + int idx; + + rx->channel_id = channel; + /* Fetch a netdev for SKB and NAPI use, any will do */ + for (idx = 0; idx < SPX5_PORTS; ++idx) { + struct sparx5_port *port = sparx5->ports[idx]; + + if (port && port->ndev) { + rx->ndev = port->ndev; + break; + } + } +} + +static void sparx5_fdma_tx_init(struct sparx5 *sparx5, + struct sparx5_tx *tx, int channel) +{ + tx->channel_id = channel; +} + +irqreturn_t sparx5_fdma_handler(int irq, void *args) +{ + struct sparx5 *sparx5 = args; + u32 db = 0, err = 0; + + db = spx5_rd(sparx5, FDMA_INTR_DB); + err = spx5_rd(sparx5, FDMA_INTR_ERR); + /* Clear interrupt */ + if (db) { + spx5_wr(0, sparx5, FDMA_INTR_DB_ENA); + spx5_wr(db, sparx5, FDMA_INTR_DB); + napi_schedule(&sparx5->rx.napi); + } + if (err) { + u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); + + dev_err_ratelimited(sparx5->dev, + "ERR: int: %#x, type: %#x\n", + err, err_type); + spx5_wr(err, sparx5, FDMA_INTR_ERR); + spx5_wr(err_type, sparx5, FDMA_ERRORS); + } + return IRQ_HANDLED; +} + +static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + int urgency; + + /* Change mode to fdma extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(portno)); + + /* Disable Disassembler buffer underrun watchdog + * to avoid truncated packets in XTR + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + + /* Disabling frame aging */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(portno)); + } +} + +int sparx5_fdma_start(struct sparx5 *sparx5) +{ + int err; + + /* Reset FDMA state */ + spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); + spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); + + /* Force ACP caching but disable read/write allocation */ + spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | + CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | + CPU_PROC_CTRL_ACP_ARCACHE_SET(0), + CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | + CPU_PROC_CTRL_ACP_AWCACHE | + CPU_PROC_CTRL_ACP_ARCACHE, + sparx5, CPU_PROC_CTRL); + + sparx5_fdma_injection_mode(sparx5); + sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); + sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); + err = sparx5_fdma_rx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); + return err; + } + err = sparx5_fdma_tx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); + return err; + } + return err; +} + +static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); +} + +int sparx5_fdma_stop(struct sparx5 *sparx5) +{ + u32 val; + + napi_disable(&sparx5->rx.napi); + /* Stop the fdma and channel interrupts */ + sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); + sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); + /* Wait for the RX channel to stop */ + read_poll_timeout(sparx5_fdma_port_ctrl, val, + FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, + 500, 10000, 0, sparx5); + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c new file mode 100644 index 000000000..4af285918 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/switchdev.h> +#include <linux/if_bridge.h> +#include <linux/iopoll.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +/* Commands for Mac Table Command register */ +#define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */ +#define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */ +#define MAC_CMD_LOOKUP 2 /* Look up 1 entry */ +#define MAC_CMD_READ 3 /* Read entry at Mac Table Index */ +#define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */ +#define MAC_CMD_SCAN 5 /* Scan (Age or find next) */ +#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */ +#define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */ + +/* Commands for MAC_ENTRY_ADDR_TYPE */ +#define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0 +#define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1 +#define MAC_ENTRY_ADDR_TYPE_GLAG 2 +#define MAC_ENTRY_ADDR_TYPE_MC_IDX 3 + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + +struct sparx5_mact_entry { + struct list_head list; + unsigned char mac[ETH_ALEN]; + u32 flags; +#define MAC_ENT_ALIVE BIT(0) +#define MAC_ENT_MOVED BIT(1) +#define MAC_ENT_LOCK BIT(2) + u16 vid; + u16 port; +}; + +static int sparx5_mact_get_status(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL); +} + +static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5) +{ + u32 val; + + return readx_poll_timeout(sparx5_mact_get_status, + sparx5, val, + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void sparx5_mact_select(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], + u16 vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0); + spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1); +} + +int sparx5_mact_learn(struct sparx5 *sparx5, int pgid, + const unsigned char mac[ETH_ALEN], u16 vid) +{ + int addr, type, ret; + + if (pgid < SPX5_PORTS) { + type = MAC_ENTRY_ADDR_TYPE_UPSID_PN; + addr = pgid % 32; + addr += (pgid / 32) << 5; /* Add upsid */ + } else { + type = MAC_ENTRY_ADDR_TYPE_MC_IDX; + addr = pgid - SPX5_PORTS; + } + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* MAC entry properties */ + spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) | + LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1), + sparx5, LRN_MAC_ACCESS_CFG_2); + spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3); + + /* Insert/learn new entry */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + + mutex_unlock(&sparx5->lock); + + return ret; +} + +int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + return sparx5_mact_forget(sparx5, addr, port->pvid); +} + +int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid); +} + +static int sparx5_mact_get(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], + u16 *vid, u32 *pcfg2) +{ + u32 mach, macl, cfg2; + int ret = -ENOENT; + + cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2); + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) { + mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0); + macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1); + mac[0] = ((mach >> 8) & 0xff); + mac[1] = ((mach >> 0) & 0xff); + mac[2] = ((macl >> 24) & 0xff); + mac[3] = ((macl >> 16) & 0xff); + mac[4] = ((macl >> 8) & 0xff); + mac[5] = ((macl >> 0) & 0xff); + *vid = mach >> 16; + *pcfg2 = cfg2; + ret = 0; + } + + return ret; +} + +bool sparx5_mact_getnext(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2) +{ + u32 cfg2; + int ret; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, *vid); + + spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) | + LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1), + sparx5, LRN_SCAN_NEXT_CFG); + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET + (MAC_CMD_FIND_SMALLEST) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) { + ret = sparx5_mact_get(sparx5, mac, vid, &cfg2); + if (ret == 0) + *pcfg2 = cfg2; + } + + mutex_unlock(&sparx5->lock); + + return ret == 0; +} + +int sparx5_mact_find(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2) +{ + int ret; + u32 cfg2; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* Issue a lookup command */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) { + cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2); + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) + *pcfg2 = cfg2; + else + ret = -ENOENT; + } + + mutex_unlock(&sparx5->lock); + + return ret; +} + +int sparx5_mact_forget(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid) +{ + int ret; + + mutex_lock(&sparx5->lock); + + sparx5_mact_select(sparx5, mac, vid); + + /* Issue an unlearn command */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + ret = sparx5_mact_wait_for_completion(sparx5); + + mutex_unlock(&sparx5->lock); + + return ret; +} + +static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct sparx5_mact_entry *mact_entry; + + mact_entry = devm_kzalloc(sparx5->dev, + sizeof(*mact_entry), GFP_ATOMIC); + if (!mact_entry) + return NULL; + + memcpy(mact_entry->mac, mac, ETH_ALEN); + mact_entry->vid = vid; + mact_entry->port = port_index; + return mact_entry; +} + +static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct sparx5_mact_entry *mact_entry; + struct sparx5_mact_entry *res = NULL; + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) { + if (mact_entry->vid == vid && + ether_addr_equal(mac, mact_entry->mac) && + mact_entry->port == port_index) { + res = mact_entry; + break; + } + } + mutex_unlock(&sparx5->mact_lock); + + return res; +} + +static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type, + const char *mac, u16 vid, + struct net_device *dev, bool offloaded) +{ + struct switchdev_notifier_fdb_info info = {}; + + info.addr = mac; + info.vid = vid; + info.offloaded = offloaded; + call_switchdev_notifiers(type, dev, &info.info, NULL); +} + +int sparx5_add_mact_entry(struct sparx5 *sparx5, + struct net_device *dev, + u16 portno, + const unsigned char *addr, u16 vid) +{ + struct sparx5_mact_entry *mact_entry; + int ret; + u32 cfg2; + + ret = sparx5_mact_find(sparx5, addr, vid, &cfg2); + if (!ret) + return 0; + + /* In case the entry already exists, don't add it again to SW, + * just update HW, but we need to look in the actual HW because + * it is possible for an entry to be learn by HW and before the + * mact thread to start the frame will reach CPU and the CPU will + * add the entry but without the extern_learn flag. + */ + mact_entry = find_mact_entry(sparx5, addr, vid, portno); + if (mact_entry) + goto update_hw; + + /* Add the entry in SW MAC table not to get the notification when + * SW is pulling again + */ + mact_entry = alloc_mact_entry(sparx5, addr, vid, portno); + if (!mact_entry) + return -ENOMEM; + + mutex_lock(&sparx5->mact_lock); + list_add_tail(&mact_entry->list, &sparx5->mact_entries); + mutex_unlock(&sparx5->mact_lock); + +update_hw: + ret = sparx5_mact_learn(sparx5, portno, addr, vid); + + /* New entry? */ + if (mact_entry->flags == 0) { + mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */ + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid, + dev, true); + } + + return ret; +} + +int sparx5_del_mact_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mact_entry *mact_entry, *tmp; + + /* Delete the entry in SW MAC table not to get the notification when + * SW is pulling again + */ + mutex_lock(&sparx5->mact_lock); + list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries, + list) { + if ((vid == 0 || mact_entry->vid == vid) && + ether_addr_equal(addr, mact_entry->mac)) { + list_del(&mact_entry->list); + devm_kfree(sparx5->dev, mact_entry); + + sparx5_mact_forget(sparx5, addr, mact_entry->vid); + } + } + mutex_unlock(&sparx5->mact_lock); + + return 0; +} + +static void sparx5_mact_handle_entry(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], + u16 vid, u32 cfg2) +{ + struct sparx5_mact_entry *mact_entry; + bool found = false; + u16 port; + + if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) != + MAC_ENTRY_ADDR_TYPE_UPSID_PN) + return; + + port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2); + if (port >= SPX5_PORTS) + return; + + if (!test_bit(port, sparx5->bridge_mask)) + return; + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) { + if (mact_entry->vid == vid && + ether_addr_equal(mac, mact_entry->mac)) { + found = true; + mact_entry->flags |= MAC_ENT_ALIVE; + if (mact_entry->port != port) { + dev_warn(sparx5->dev, "Entry move: %d -> %d\n", + mact_entry->port, port); + mact_entry->port = port; + mact_entry->flags |= MAC_ENT_MOVED; + } + /* Entry handled */ + break; + } + } + mutex_unlock(&sparx5->mact_lock); + + if (found && !(mact_entry->flags & MAC_ENT_MOVED)) + /* Present, not moved */ + return; + + if (!found) { + /* Entry not found - now add */ + mact_entry = alloc_mact_entry(sparx5, mac, vid, port); + if (!mact_entry) + return; + + mact_entry->flags |= MAC_ENT_ALIVE; + mutex_lock(&sparx5->mact_lock); + list_add_tail(&mact_entry->list, &sparx5->mact_entries); + mutex_unlock(&sparx5->mact_lock); + } + + /* New or moved entry - notify bridge */ + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + mac, vid, sparx5->ports[port]->ndev, + true); +} + +void sparx5_mact_pull_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct sparx5 *sparx5 = container_of(del_work, struct sparx5, + mact_work); + struct sparx5_mact_entry *mact_entry, *tmp; + unsigned char mac[ETH_ALEN]; + u32 cfg2; + u16 vid; + int ret; + + /* Reset MAC entry flags */ + mutex_lock(&sparx5->mact_lock); + list_for_each_entry(mact_entry, &sparx5->mact_entries, list) + mact_entry->flags &= MAC_ENT_LOCK; + mutex_unlock(&sparx5->mact_lock); + + /* MAIN mac address processing loop */ + vid = 0; + memset(mac, 0, sizeof(mac)); + do { + mutex_lock(&sparx5->lock); + sparx5_mact_select(sparx5, mac, vid); + spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1), + sparx5, LRN_SCAN_NEXT_CFG); + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET + (MAC_CMD_FIND_SMALLEST) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + ret = sparx5_mact_wait_for_completion(sparx5); + if (ret == 0) + ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2); + mutex_unlock(&sparx5->lock); + if (ret == 0) + sparx5_mact_handle_entry(sparx5, mac, vid, cfg2); + } while (ret == 0); + + mutex_lock(&sparx5->mact_lock); + list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries, + list) { + /* If the entry is in HW or permanent, then skip */ + if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK)) + continue; + + sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mact_entry->mac, mact_entry->vid, + sparx5->ports[mact_entry->port]->ndev, + true); + + list_del(&mact_entry->list); + devm_kfree(sparx5->dev, mact_entry); + } + mutex_unlock(&sparx5->mact_lock); + + queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work, + SPX5_MACT_PULL_DELAY); +} + +void sparx5_set_ageing(struct sparx5 *sparx5, int msecs) +{ + int value = max(1, msecs / 10); /* unit 10 ms */ + + spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */ + LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */ + LRN_AUTOAGE_CFG_UNIT_SIZE | + LRN_AUTOAGE_CFG_PERIOD_VAL, + sparx5, + LRN_AUTOAGE_CFG(0)); +} + +void sparx5_mact_init(struct sparx5 *sparx5) +{ + mutex_init(&sparx5->lock); + + /* Flush MAC table */ + spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) | + LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1), + sparx5, LRN_COMMON_ACCESS_CTRL); + + if (sparx5_mact_wait_for_completion(sparx5) != 0) + dev_warn(sparx5->dev, "MAC flush error\n"); + + sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c new file mode 100644 index 000000000..3423c95cc --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <net/switchdev.h> +#include <linux/etherdevice.h> +#include <linux/io.h> +#include <linux/printk.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/types.h> +#include <linux/reset.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" +#include "sparx5_qos.h" + +#define QLIM_WM(fraction) \ + ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100) +#define IO_RANGES 3 + +struct initial_port_config { + u32 portno; + struct device_node *node; + struct sparx5_port_config conf; + struct phy *serdes; +}; + +struct sparx5_ram_config { + void __iomem *init_reg; + u32 init_val; +}; + +struct sparx5_main_io_resource { + enum sparx5_target id; + phys_addr_t offset; + int range; +}; + +static const struct sparx5_main_io_resource sparx5_main_iomap[] = { + { TARGET_CPU, 0, 0 }, /* 0x600000000 */ + { TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */ + { TARGET_PCEP, 0x400000, 0 }, /* 0x600400000 */ + { TARGET_DEV2G5, 0x10004000, 1 }, /* 0x610004000 */ + { TARGET_DEV5G, 0x10008000, 1 }, /* 0x610008000 */ + { TARGET_PCS5G_BR, 0x1000c000, 1 }, /* 0x61000c000 */ + { TARGET_DEV2G5 + 1, 0x10010000, 1 }, /* 0x610010000 */ + { TARGET_DEV5G + 1, 0x10014000, 1 }, /* 0x610014000 */ + { TARGET_PCS5G_BR + 1, 0x10018000, 1 }, /* 0x610018000 */ + { TARGET_DEV2G5 + 2, 0x1001c000, 1 }, /* 0x61001c000 */ + { TARGET_DEV5G + 2, 0x10020000, 1 }, /* 0x610020000 */ + { TARGET_PCS5G_BR + 2, 0x10024000, 1 }, /* 0x610024000 */ + { TARGET_DEV2G5 + 6, 0x10028000, 1 }, /* 0x610028000 */ + { TARGET_DEV5G + 6, 0x1002c000, 1 }, /* 0x61002c000 */ + { TARGET_PCS5G_BR + 6, 0x10030000, 1 }, /* 0x610030000 */ + { TARGET_DEV2G5 + 7, 0x10034000, 1 }, /* 0x610034000 */ + { TARGET_DEV5G + 7, 0x10038000, 1 }, /* 0x610038000 */ + { TARGET_PCS5G_BR + 7, 0x1003c000, 1 }, /* 0x61003c000 */ + { TARGET_DEV2G5 + 8, 0x10040000, 1 }, /* 0x610040000 */ + { TARGET_DEV5G + 8, 0x10044000, 1 }, /* 0x610044000 */ + { TARGET_PCS5G_BR + 8, 0x10048000, 1 }, /* 0x610048000 */ + { TARGET_DEV2G5 + 9, 0x1004c000, 1 }, /* 0x61004c000 */ + { TARGET_DEV5G + 9, 0x10050000, 1 }, /* 0x610050000 */ + { TARGET_PCS5G_BR + 9, 0x10054000, 1 }, /* 0x610054000 */ + { TARGET_DEV2G5 + 10, 0x10058000, 1 }, /* 0x610058000 */ + { TARGET_DEV5G + 10, 0x1005c000, 1 }, /* 0x61005c000 */ + { TARGET_PCS5G_BR + 10, 0x10060000, 1 }, /* 0x610060000 */ + { TARGET_DEV2G5 + 11, 0x10064000, 1 }, /* 0x610064000 */ + { TARGET_DEV5G + 11, 0x10068000, 1 }, /* 0x610068000 */ + { TARGET_PCS5G_BR + 11, 0x1006c000, 1 }, /* 0x61006c000 */ + { TARGET_DEV2G5 + 12, 0x10070000, 1 }, /* 0x610070000 */ + { TARGET_DEV10G, 0x10074000, 1 }, /* 0x610074000 */ + { TARGET_PCS10G_BR, 0x10078000, 1 }, /* 0x610078000 */ + { TARGET_DEV2G5 + 14, 0x1007c000, 1 }, /* 0x61007c000 */ + { TARGET_DEV10G + 2, 0x10080000, 1 }, /* 0x610080000 */ + { TARGET_PCS10G_BR + 2, 0x10084000, 1 }, /* 0x610084000 */ + { TARGET_DEV2G5 + 15, 0x10088000, 1 }, /* 0x610088000 */ + { TARGET_DEV10G + 3, 0x1008c000, 1 }, /* 0x61008c000 */ + { TARGET_PCS10G_BR + 3, 0x10090000, 1 }, /* 0x610090000 */ + { TARGET_DEV2G5 + 16, 0x10094000, 1 }, /* 0x610094000 */ + { TARGET_DEV2G5 + 17, 0x10098000, 1 }, /* 0x610098000 */ + { TARGET_DEV2G5 + 18, 0x1009c000, 1 }, /* 0x61009c000 */ + { TARGET_DEV2G5 + 19, 0x100a0000, 1 }, /* 0x6100a0000 */ + { TARGET_DEV2G5 + 20, 0x100a4000, 1 }, /* 0x6100a4000 */ + { TARGET_DEV2G5 + 21, 0x100a8000, 1 }, /* 0x6100a8000 */ + { TARGET_DEV2G5 + 22, 0x100ac000, 1 }, /* 0x6100ac000 */ + { TARGET_DEV2G5 + 23, 0x100b0000, 1 }, /* 0x6100b0000 */ + { TARGET_DEV2G5 + 32, 0x100b4000, 1 }, /* 0x6100b4000 */ + { TARGET_DEV2G5 + 33, 0x100b8000, 1 }, /* 0x6100b8000 */ + { TARGET_DEV2G5 + 34, 0x100bc000, 1 }, /* 0x6100bc000 */ + { TARGET_DEV2G5 + 35, 0x100c0000, 1 }, /* 0x6100c0000 */ + { TARGET_DEV2G5 + 36, 0x100c4000, 1 }, /* 0x6100c4000 */ + { TARGET_DEV2G5 + 37, 0x100c8000, 1 }, /* 0x6100c8000 */ + { TARGET_DEV2G5 + 38, 0x100cc000, 1 }, /* 0x6100cc000 */ + { TARGET_DEV2G5 + 39, 0x100d0000, 1 }, /* 0x6100d0000 */ + { TARGET_DEV2G5 + 40, 0x100d4000, 1 }, /* 0x6100d4000 */ + { TARGET_DEV2G5 + 41, 0x100d8000, 1 }, /* 0x6100d8000 */ + { TARGET_DEV2G5 + 42, 0x100dc000, 1 }, /* 0x6100dc000 */ + { TARGET_DEV2G5 + 43, 0x100e0000, 1 }, /* 0x6100e0000 */ + { TARGET_DEV2G5 + 44, 0x100e4000, 1 }, /* 0x6100e4000 */ + { TARGET_DEV2G5 + 45, 0x100e8000, 1 }, /* 0x6100e8000 */ + { TARGET_DEV2G5 + 46, 0x100ec000, 1 }, /* 0x6100ec000 */ + { TARGET_DEV2G5 + 47, 0x100f0000, 1 }, /* 0x6100f0000 */ + { TARGET_DEV2G5 + 57, 0x100f4000, 1 }, /* 0x6100f4000 */ + { TARGET_DEV25G + 1, 0x100f8000, 1 }, /* 0x6100f8000 */ + { TARGET_PCS25G_BR + 1, 0x100fc000, 1 }, /* 0x6100fc000 */ + { TARGET_DEV2G5 + 59, 0x10104000, 1 }, /* 0x610104000 */ + { TARGET_DEV25G + 3, 0x10108000, 1 }, /* 0x610108000 */ + { TARGET_PCS25G_BR + 3, 0x1010c000, 1 }, /* 0x61010c000 */ + { TARGET_DEV2G5 + 60, 0x10114000, 1 }, /* 0x610114000 */ + { TARGET_DEV25G + 4, 0x10118000, 1 }, /* 0x610118000 */ + { TARGET_PCS25G_BR + 4, 0x1011c000, 1 }, /* 0x61011c000 */ + { TARGET_DEV2G5 + 64, 0x10124000, 1 }, /* 0x610124000 */ + { TARGET_DEV5G + 12, 0x10128000, 1 }, /* 0x610128000 */ + { TARGET_PCS5G_BR + 12, 0x1012c000, 1 }, /* 0x61012c000 */ + { TARGET_PORT_CONF, 0x10130000, 1 }, /* 0x610130000 */ + { TARGET_DEV2G5 + 3, 0x10404000, 1 }, /* 0x610404000 */ + { TARGET_DEV5G + 3, 0x10408000, 1 }, /* 0x610408000 */ + { TARGET_PCS5G_BR + 3, 0x1040c000, 1 }, /* 0x61040c000 */ + { TARGET_DEV2G5 + 4, 0x10410000, 1 }, /* 0x610410000 */ + { TARGET_DEV5G + 4, 0x10414000, 1 }, /* 0x610414000 */ + { TARGET_PCS5G_BR + 4, 0x10418000, 1 }, /* 0x610418000 */ + { TARGET_DEV2G5 + 5, 0x1041c000, 1 }, /* 0x61041c000 */ + { TARGET_DEV5G + 5, 0x10420000, 1 }, /* 0x610420000 */ + { TARGET_PCS5G_BR + 5, 0x10424000, 1 }, /* 0x610424000 */ + { TARGET_DEV2G5 + 13, 0x10428000, 1 }, /* 0x610428000 */ + { TARGET_DEV10G + 1, 0x1042c000, 1 }, /* 0x61042c000 */ + { TARGET_PCS10G_BR + 1, 0x10430000, 1 }, /* 0x610430000 */ + { TARGET_DEV2G5 + 24, 0x10434000, 1 }, /* 0x610434000 */ + { TARGET_DEV2G5 + 25, 0x10438000, 1 }, /* 0x610438000 */ + { TARGET_DEV2G5 + 26, 0x1043c000, 1 }, /* 0x61043c000 */ + { TARGET_DEV2G5 + 27, 0x10440000, 1 }, /* 0x610440000 */ + { TARGET_DEV2G5 + 28, 0x10444000, 1 }, /* 0x610444000 */ + { TARGET_DEV2G5 + 29, 0x10448000, 1 }, /* 0x610448000 */ + { TARGET_DEV2G5 + 30, 0x1044c000, 1 }, /* 0x61044c000 */ + { TARGET_DEV2G5 + 31, 0x10450000, 1 }, /* 0x610450000 */ + { TARGET_DEV2G5 + 48, 0x10454000, 1 }, /* 0x610454000 */ + { TARGET_DEV10G + 4, 0x10458000, 1 }, /* 0x610458000 */ + { TARGET_PCS10G_BR + 4, 0x1045c000, 1 }, /* 0x61045c000 */ + { TARGET_DEV2G5 + 49, 0x10460000, 1 }, /* 0x610460000 */ + { TARGET_DEV10G + 5, 0x10464000, 1 }, /* 0x610464000 */ + { TARGET_PCS10G_BR + 5, 0x10468000, 1 }, /* 0x610468000 */ + { TARGET_DEV2G5 + 50, 0x1046c000, 1 }, /* 0x61046c000 */ + { TARGET_DEV10G + 6, 0x10470000, 1 }, /* 0x610470000 */ + { TARGET_PCS10G_BR + 6, 0x10474000, 1 }, /* 0x610474000 */ + { TARGET_DEV2G5 + 51, 0x10478000, 1 }, /* 0x610478000 */ + { TARGET_DEV10G + 7, 0x1047c000, 1 }, /* 0x61047c000 */ + { TARGET_PCS10G_BR + 7, 0x10480000, 1 }, /* 0x610480000 */ + { TARGET_DEV2G5 + 52, 0x10484000, 1 }, /* 0x610484000 */ + { TARGET_DEV10G + 8, 0x10488000, 1 }, /* 0x610488000 */ + { TARGET_PCS10G_BR + 8, 0x1048c000, 1 }, /* 0x61048c000 */ + { TARGET_DEV2G5 + 53, 0x10490000, 1 }, /* 0x610490000 */ + { TARGET_DEV10G + 9, 0x10494000, 1 }, /* 0x610494000 */ + { TARGET_PCS10G_BR + 9, 0x10498000, 1 }, /* 0x610498000 */ + { TARGET_DEV2G5 + 54, 0x1049c000, 1 }, /* 0x61049c000 */ + { TARGET_DEV10G + 10, 0x104a0000, 1 }, /* 0x6104a0000 */ + { TARGET_PCS10G_BR + 10, 0x104a4000, 1 }, /* 0x6104a4000 */ + { TARGET_DEV2G5 + 55, 0x104a8000, 1 }, /* 0x6104a8000 */ + { TARGET_DEV10G + 11, 0x104ac000, 1 }, /* 0x6104ac000 */ + { TARGET_PCS10G_BR + 11, 0x104b0000, 1 }, /* 0x6104b0000 */ + { TARGET_DEV2G5 + 56, 0x104b4000, 1 }, /* 0x6104b4000 */ + { TARGET_DEV25G, 0x104b8000, 1 }, /* 0x6104b8000 */ + { TARGET_PCS25G_BR, 0x104bc000, 1 }, /* 0x6104bc000 */ + { TARGET_DEV2G5 + 58, 0x104c4000, 1 }, /* 0x6104c4000 */ + { TARGET_DEV25G + 2, 0x104c8000, 1 }, /* 0x6104c8000 */ + { TARGET_PCS25G_BR + 2, 0x104cc000, 1 }, /* 0x6104cc000 */ + { TARGET_DEV2G5 + 61, 0x104d4000, 1 }, /* 0x6104d4000 */ + { TARGET_DEV25G + 5, 0x104d8000, 1 }, /* 0x6104d8000 */ + { TARGET_PCS25G_BR + 5, 0x104dc000, 1 }, /* 0x6104dc000 */ + { TARGET_DEV2G5 + 62, 0x104e4000, 1 }, /* 0x6104e4000 */ + { TARGET_DEV25G + 6, 0x104e8000, 1 }, /* 0x6104e8000 */ + { TARGET_PCS25G_BR + 6, 0x104ec000, 1 }, /* 0x6104ec000 */ + { TARGET_DEV2G5 + 63, 0x104f4000, 1 }, /* 0x6104f4000 */ + { TARGET_DEV25G + 7, 0x104f8000, 1 }, /* 0x6104f8000 */ + { TARGET_PCS25G_BR + 7, 0x104fc000, 1 }, /* 0x6104fc000 */ + { TARGET_DSM, 0x10504000, 1 }, /* 0x610504000 */ + { TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */ + { TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */ + { TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */ + { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */ + { TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */ + { TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */ + { TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */ + { TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */ + { TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */ + { TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */ + { TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */ + { TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */ + { TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */ + { TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */ + { TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */ + { TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */ + { TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */ + { TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */ + { TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */ + { TARGET_ANA_AC, 0x11900000, 2 }, /* 0x611900000 */ + { TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */ +}; + +static int sparx5_create_targets(struct sparx5 *sparx5) +{ + struct resource *iores[IO_RANGES]; + void __iomem *iomem[IO_RANGES]; + void __iomem *begin[IO_RANGES]; + int range_id[IO_RANGES]; + int idx, jdx; + + for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { + const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + + if (idx == iomap->range) { + range_id[idx] = jdx; + idx++; + } + } + for (idx = 0; idx < IO_RANGES; idx++) { + iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM, + idx); + if (!iores[idx]) { + dev_err(sparx5->dev, "Invalid resource\n"); + return -EINVAL; + } + iomem[idx] = devm_ioremap(sparx5->dev, + iores[idx]->start, + resource_size(iores[idx])); + if (!iomem[idx]) { + dev_err(sparx5->dev, "Unable to get switch registers: %s\n", + iores[idx]->name); + return -ENOMEM; + } + begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset; + } + for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) { + const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx]; + + sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset; + } + return 0; +} + +static int sparx5_create_port(struct sparx5 *sparx5, + struct initial_port_config *config) +{ + struct sparx5_port *spx5_port; + struct net_device *ndev; + struct phylink *phylink; + int err; + + ndev = sparx5_create_netdev(sparx5, config->portno); + if (IS_ERR(ndev)) { + dev_err(sparx5->dev, "Could not create net device: %02u\n", + config->portno); + return PTR_ERR(ndev); + } + spx5_port = netdev_priv(ndev); + spx5_port->of_node = config->node; + spx5_port->serdes = config->serdes; + spx5_port->pvid = NULL_VID; + spx5_port->signd_internal = true; + spx5_port->signd_active_high = true; + spx5_port->signd_enable = true; + spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE; + spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE; + spx5_port->custom_etype = 0x8880; /* Vitesse */ + spx5_port->phylink_pcs.poll = true; + spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops; + spx5_port->is_mrouter = false; + sparx5->ports[config->portno] = spx5_port; + + err = sparx5_port_init(sparx5, spx5_port, &config->conf); + if (err) { + dev_err(sparx5->dev, "port init failed\n"); + return err; + } + spx5_port->conf = config->conf; + + /* Setup VLAN */ + sparx5_vlan_port_setup(sparx5, spx5_port->portno); + + /* Create a phylink for PHY management. Also handles SFPs */ + spx5_port->phylink_config.dev = &spx5_port->ndev->dev; + spx5_port->phylink_config.type = PHYLINK_NETDEV; + spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD; + + __set_bit(PHY_INTERFACE_MODE_SGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_5000 || + spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_5GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_10GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_25GBASER, + spx5_port->phylink_config.supported_interfaces); + + phylink = phylink_create(&spx5_port->phylink_config, + of_fwnode_handle(config->node), + config->conf.phy_mode, + &sparx5_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + spx5_port->phylink = phylink; + + return 0; +} + +static int sparx5_init_ram(struct sparx5 *s5) +{ + const struct sparx5_ram_config spx5_ram_cfg[] = { + {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET}, + {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT}, + {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}, + {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT} + }; + const struct sparx5_ram_config *cfg; + u32 value, pending, jdx, idx; + + for (jdx = 0; jdx < 10; jdx++) { + pending = ARRAY_SIZE(spx5_ram_cfg); + for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) { + cfg = &spx5_ram_cfg[idx]; + if (jdx == 0) { + writel(cfg->init_val, cfg->init_reg); + } else { + value = readl(cfg->init_reg); + if ((value & cfg->init_val) != cfg->init_val) + pending--; + } + } + if (!pending) + break; + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + } + + if (pending > 0) { + /* Still initializing, should be complete in + * less than 1ms + */ + dev_err(s5->dev, "Memory initialization error\n"); + return -EINVAL; + } + return 0; +} + +static int sparx5_init_switchcore(struct sparx5 *sparx5) +{ + u32 value; + int err = 0; + + spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1), + EACL_POL_EACL_CFG_EACL_FORCE_INIT, + sparx5, + EACL_POL_EACL_CFG); + + spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0), + EACL_POL_EACL_CFG_EACL_FORCE_INIT, + sparx5, + EACL_POL_EACL_CFG); + + /* Initialize memories, if not done already */ + value = spx5_rd(sparx5, HSCH_RESET_CFG); + if (!(value & HSCH_RESET_CFG_CORE_ENA)) { + err = sparx5_init_ram(sparx5); + if (err) + return err; + } + + /* Reset counters */ + spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET); + spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG); + + /* Enable switch-core and queue system */ + spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG); + + return 0; +} + +static int sparx5_init_coreclock(struct sparx5 *sparx5) +{ + enum sparx5_core_clockfreq freq = sparx5->coreclock; + u32 clk_div, clk_period, pol_upd_int, idx; + + /* Verify if core clock frequency is supported on target. + * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported + * freq. is used + */ + switch (sparx5->target_ct) { + case SPX5_TARGET_CT_7546: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_250MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7549: + case SPX5_TARGET_CT_7552: + case SPX5_TARGET_CT_7556: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_500MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7558: + case SPX5_TARGET_CT_7558TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ) + freq = 0; /* Not supported */ + break; + case SPX5_TARGET_CT_7546TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + break; + case SPX5_TARGET_CT_7549TSN: + case SPX5_TARGET_CT_7552TSN: + case SPX5_TARGET_CT_7556TSN: + if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT) + freq = SPX5_CORE_CLOCK_625MHZ; + else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ) + freq = 0; /* Not supported */ + break; + default: + dev_err(sparx5->dev, "Target (%#04x) not supported\n", + sparx5->target_ct); + return -ENODEV; + } + + switch (freq) { + case SPX5_CORE_CLOCK_250MHZ: + clk_div = 10; + pol_upd_int = 312; + break; + case SPX5_CORE_CLOCK_500MHZ: + clk_div = 5; + pol_upd_int = 624; + break; + case SPX5_CORE_CLOCK_625MHZ: + clk_div = 4; + pol_upd_int = 780; + break; + default: + dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n", + sparx5->coreclock, sparx5->target_ct); + return -EINVAL; + } + + /* Update state with chosen frequency */ + sparx5->coreclock = freq; + + /* Configure the LCPLL */ + spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1), + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA | + CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, + sparx5, + CLKGEN_LCPLL1_CORE_CLK_CFG); + + clk_period = sparx5_clk_period(freq); + + spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100), + HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, + sparx5, + HSCH_SYS_CLK_PER); + + spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100), + ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, + sparx5, + ANA_AC_POL_BDLB_DLB_CTRL); + + spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100), + ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, + sparx5, + ANA_AC_POL_SLB_DLB_CTRL); + + spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100), + LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, + sparx5, + LRN_AUTOAGE_CFG_1); + + for (idx = 0; idx < 3; idx++) + spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100), + GCB_SIO_CLOCK_SYS_CLK_PERIOD, + sparx5, + GCB_SIO_CLOCK(idx)); + + spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET + ((256 * 1000) / clk_period), + HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, + sparx5, + HSCH_TAS_STATEMACHINE_CFG); + + spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int), + ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, + sparx5, + ANA_AC_POL_POL_UPD_INT_CFG); + + return 0; +} + +static int sparx5_qlim_set(struct sparx5 *sparx5) +{ + u32 res, dp, prio; + + for (res = 0; res < 2; res++) { + for (prio = 0; prio < 8; prio++) + spx5_wr(0xFFF, sparx5, + QRES_RES_CFG(prio + 630 + res * 1024)); + + for (dp = 0; dp < 4; dp++) + spx5_wr(0xFFF, sparx5, + QRES_RES_CFG(dp + 638 + res * 1024)); + } + + /* Set 80,90,95,100% of memory size for top watermarks */ + spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0)); + spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0)); + spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0)); + spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0)); + + return 0; +} + +/* Some boards needs to map the SGPIO for signal detect explicitly to the + * port module + */ +static void sparx5_board_init(struct sparx5 *sparx5) +{ + int idx; + + if (!sparx5->sd_sgpio_remapping) + return; + + /* Enable SGPIO Signal Detect remapping */ + spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, + GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, + sparx5, + GCB_HW_SGPIO_SD_CFG); + + /* Refer to LOS SGPIO */ + for (idx = 0; idx < SPX5_PORTS; idx++) + if (sparx5->ports[idx]) + if (sparx5->ports[idx]->conf.sd_sgpio != ~0) + spx5_wr(sparx5->ports[idx]->conf.sd_sgpio, + sparx5, + GCB_HW_SGPIO_TO_SD_MAP_CFG(idx)); +} + +static int sparx5_start(struct sparx5 *sparx5) +{ + u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + char queue_name[32]; + u32 idx; + int err; + + /* Setup own UPSIDs */ + for (idx = 0; idx < 3; idx++) { + spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx)); + spx5_wr(idx, sparx5, REW_OWN_UPSID(idx)); + } + + /* Enable CPU ports */ + for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++) + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1), + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(idx)); + + /* Init masks */ + sparx5_update_fwd(sparx5); + + /* CPU copy CPU pgids */ + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), + sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU)); + spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), + sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST)); + + /* Recalc injected frame FCS */ + for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++) + spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1), + ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, + sparx5, ANA_CL_FILTER_CTRL(idx)); + + /* Init MAC table, ageing */ + sparx5_mact_init(sparx5); + + /* Init PGID table arbitrator */ + sparx5_pgid_init(sparx5); + + /* Setup VLANs */ + sparx5_vlan_init(sparx5); + + /* Add host mode BC address (points only to CPU) */ + sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID); + + /* Enable queue limitation watermarks */ + sparx5_qlim_set(sparx5); + + err = sparx5_config_auto_calendar(sparx5); + if (err) + return err; + + err = sparx5_config_dsm_calendar(sparx5); + if (err) + return err; + + /* Init stats */ + err = sparx_stats_init(sparx5); + if (err) + return err; + + /* Init mact_sw struct */ + mutex_init(&sparx5->mact_lock); + INIT_LIST_HEAD(&sparx5->mact_entries); + snprintf(queue_name, sizeof(queue_name), "%s-mact", + dev_name(sparx5->dev)); + sparx5->mact_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->mact_queue) + return -ENOMEM; + + INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work); + queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work, + SPX5_MACT_PULL_DELAY); + + mutex_init(&sparx5->mdb_lock); + INIT_LIST_HEAD(&sparx5->mdb_entries); + + err = sparx5_register_netdevs(sparx5); + if (err) + return err; + + sparx5_board_init(sparx5); + err = sparx5_register_notifier_blocks(sparx5); + + /* Start Frame DMA with fallback to register based INJ/XTR */ + err = -ENXIO; + if (sparx5->fdma_irq >= 0) { + if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) + err = devm_request_threaded_irq(sparx5->dev, + sparx5->fdma_irq, + NULL, + sparx5_fdma_handler, + IRQF_ONESHOT, + "sparx5-fdma", sparx5); + if (!err) + err = sparx5_fdma_start(sparx5); + if (err) + sparx5->fdma_irq = -ENXIO; + } else { + sparx5->fdma_irq = -ENXIO; + } + if (err && sparx5->xtr_irq >= 0) { + err = devm_request_irq(sparx5->dev, sparx5->xtr_irq, + sparx5_xtr_handler, IRQF_SHARED, + "sparx5-xtr", sparx5); + if (!err) + err = sparx5_manual_injection_mode(sparx5); + if (err) + sparx5->xtr_irq = -ENXIO; + } else { + sparx5->xtr_irq = -ENXIO; + } + + if (sparx5->ptp_irq >= 0) { + err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq, + NULL, sparx5_ptp_irq_handler, + IRQF_ONESHOT, "sparx5-ptp", + sparx5); + if (err) + sparx5->ptp_irq = -ENXIO; + + sparx5->ptp = 1; + } + + return err; +} + +static void sparx5_cleanup_ports(struct sparx5 *sparx5) +{ + sparx5_unregister_netdevs(sparx5); + sparx5_destroy_netdevs(sparx5); +} + +static int mchp_sparx5_probe(struct platform_device *pdev) +{ + struct initial_port_config *configs, *config; + struct device_node *np = pdev->dev.of_node; + struct device_node *ports, *portnp; + struct reset_control *reset; + struct sparx5 *sparx5; + int idx = 0, err = 0; + + if (!np && !pdev->dev.platform_data) + return -ENODEV; + + sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL); + if (!sparx5) + return -ENOMEM; + + platform_set_drvdata(pdev, sparx5); + sparx5->pdev = pdev; + sparx5->dev = &pdev->dev; + + /* Do switch core reset if available */ + reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); + if (IS_ERR(reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(reset), + "Failed to get switch reset controller.\n"); + reset_control_reset(reset); + + /* Default values, some from DT */ + sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT; + + ports = of_get_child_by_name(np, "ethernet-ports"); + if (!ports) { + dev_err(sparx5->dev, "no ethernet-ports child node found\n"); + return -ENODEV; + } + sparx5->port_count = of_get_child_count(ports); + + configs = kcalloc(sparx5->port_count, + sizeof(struct initial_port_config), GFP_KERNEL); + if (!configs) { + err = -ENOMEM; + goto cleanup_pnode; + } + + for_each_available_child_of_node(ports, portnp) { + struct sparx5_port_config *conf; + struct phy *serdes; + u32 portno; + + err = of_property_read_u32(portnp, "reg", &portno); + if (err) { + dev_err(sparx5->dev, "port reg property error\n"); + continue; + } + config = &configs[idx]; + conf = &config->conf; + conf->speed = SPEED_UNKNOWN; + conf->bandwidth = SPEED_UNKNOWN; + err = of_get_phy_mode(portnp, &conf->phy_mode); + if (err) { + dev_err(sparx5->dev, "port %u: missing phy-mode\n", + portno); + continue; + } + err = of_property_read_u32(portnp, "microchip,bandwidth", + &conf->bandwidth); + if (err) { + dev_err(sparx5->dev, "port %u: missing bandwidth\n", + portno); + continue; + } + err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio); + if (err) + conf->sd_sgpio = ~0; + else + sparx5->sd_sgpio_remapping = true; + serdes = devm_of_phy_get(sparx5->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = dev_err_probe(sparx5->dev, PTR_ERR(serdes), + "port %u: missing serdes\n", + portno); + of_node_put(portnp); + goto cleanup_config; + } + config->portno = portno; + config->node = portnp; + config->serdes = serdes; + + conf->media = PHY_MEDIA_DAC; + conf->serdes_reset = true; + conf->portmode = conf->phy_mode; + conf->power_down = true; + idx++; + } + + err = sparx5_create_targets(sparx5); + if (err) + goto cleanup_config; + + if (of_get_mac_address(np, sparx5->base_mac)) { + dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n"); + eth_random_addr(sparx5->base_mac); + sparx5->base_mac[5] = 0; + } + + sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma"); + sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr"); + sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp"); + + /* Read chip ID to check CPU interface */ + sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID); + + sparx5->target_ct = (enum spx5_target_chiptype) + GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id); + + /* Initialize Switchcore and internal RAMs */ + err = sparx5_init_switchcore(sparx5); + if (err) { + dev_err(sparx5->dev, "Switchcore initialization error\n"); + goto cleanup_config; + } + + /* Initialize the LC-PLL (core clock) and set affected registers */ + err = sparx5_init_coreclock(sparx5); + if (err) { + dev_err(sparx5->dev, "LC-PLL initialization error\n"); + goto cleanup_config; + } + + for (idx = 0; idx < sparx5->port_count; ++idx) { + config = &configs[idx]; + if (!config->node) + continue; + + err = sparx5_create_port(sparx5, config); + if (err) { + dev_err(sparx5->dev, "port create error\n"); + goto cleanup_ports; + } + } + + err = sparx5_start(sparx5); + if (err) { + dev_err(sparx5->dev, "Start failed\n"); + goto cleanup_ports; + } + + err = sparx5_qos_init(sparx5); + if (err) { + dev_err(sparx5->dev, "Failed to initialize QoS\n"); + goto cleanup_ports; + } + + err = sparx5_ptp_init(sparx5); + if (err) { + dev_err(sparx5->dev, "PTP failed\n"); + goto cleanup_ports; + } + goto cleanup_config; + +cleanup_ports: + sparx5_cleanup_ports(sparx5); + if (sparx5->mact_queue) + destroy_workqueue(sparx5->mact_queue); +cleanup_config: + kfree(configs); +cleanup_pnode: + of_node_put(ports); + return err; +} + +static int mchp_sparx5_remove(struct platform_device *pdev) +{ + struct sparx5 *sparx5 = platform_get_drvdata(pdev); + + if (sparx5->xtr_irq) { + disable_irq(sparx5->xtr_irq); + sparx5->xtr_irq = -ENXIO; + } + if (sparx5->fdma_irq) { + disable_irq(sparx5->fdma_irq); + sparx5->fdma_irq = -ENXIO; + } + sparx5_ptp_deinit(sparx5); + sparx5_fdma_stop(sparx5); + sparx5_cleanup_ports(sparx5); + /* Unregister netdevs */ + sparx5_unregister_notifier_blocks(sparx5); + destroy_workqueue(sparx5->mact_queue); + + return 0; +} + +static const struct of_device_id mchp_sparx5_match[] = { + { .compatible = "microchip,sparx5-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, mchp_sparx5_match); + +static struct platform_driver mchp_sparx5_driver = { + .probe = mchp_sparx5_probe, + .remove = mchp_sparx5_remove, + .driver = { + .name = "sparx5-switch", + .of_match_table = mchp_sparx5_match, + }, +}; + +module_platform_driver(mchp_sparx5_driver); + +MODULE_DESCRIPTION("Microchip Sparx5 switch driver"); +MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h new file mode 100644 index 000000000..7a83222ca --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -0,0 +1,546 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_MAIN_H__ +#define __SPARX5_MAIN_H__ + +#include <linux/types.h> +#include <linux/phy/phy.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> +#include <linux/bitmap.h> +#include <linux/phylink.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/hrtimer.h> + +#include "sparx5_main_regs.h" + +/* Target chip type */ +enum spx5_target_chiptype { + SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ + SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */ + SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */ + SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */ + SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */ + SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */ + SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */ + SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */ + SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */ + SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */ +}; + +enum sparx5_port_max_tags { + SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */ + SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */ + SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */ +}; + +enum sparx5_vlan_port_type { + SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */ + SPX5_VLAN_PORT_TYPE_C, /* C-port */ + SPX5_VLAN_PORT_TYPE_S, /* S-port */ + SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */ +}; + +#define SPX5_PORTS 65 +#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */ +#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */ +#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */ +#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */ +#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */ +#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/ +#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */ + +#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */ +#define PGID_UC_FLOOD (PGID_BASE + 0) +#define PGID_MC_FLOOD (PGID_BASE + 1) +#define PGID_IPV4_MC_DATA (PGID_BASE + 2) +#define PGID_IPV4_MC_CTRL (PGID_BASE + 3) +#define PGID_IPV6_MC_DATA (PGID_BASE + 4) +#define PGID_IPV6_MC_CTRL (PGID_BASE + 5) +#define PGID_BCAST (PGID_BASE + 6) +#define PGID_CPU (PGID_BASE + 7) +#define PGID_MCAST_START (PGID_BASE + 8) + +#define PGID_TABLE_SIZE 3290 + +#define IFH_LEN 9 /* 36 bytes */ +#define NULL_VID 0 +#define SPX5_MACT_PULL_DELAY (2 * HZ) +#define SPX5_STATS_CHECK_DELAY (1 * HZ) +#define SPX5_PRIOS 8 /* Number of priority queues */ +#define SPX5_BUFFER_CELL_SZ 184 /* Cell size */ +#define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */ + +#define XTR_QUEUE 0 +#define INJ_QUEUE 0 + +#define FDMA_DCB_MAX 64 +#define FDMA_RX_DCB_MAX_DBS 15 +#define FDMA_TX_DCB_MAX_DBS 1 + +#define SPARX5_PHC_COUNT 3 +#define SPARX5_PHC_PORT 0 + +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_ONE_STEP_PTP 0x3 +#define IFH_REW_OP_TWO_STEP_PTP 0x4 + +#define IFH_PDU_TYPE_NONE 0x0 +#define IFH_PDU_TYPE_PTP 0x5 +#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 +#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 + +struct sparx5; + +struct sparx5_db_hw { + u64 dataptr; + u64 status; +}; + +struct sparx5_rx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct sparx5_tx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; +}; + +/* Frame DMA receive state: + * For each DB, there is a SKB, and the skb data pointer is mapped in + * the DB. Once a frame is received the skb is given to the upper layers + * and a new skb is added to the dcb. + * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. + */ +struct sparx5_rx { + struct sparx5_rx_dcb_hw *dcb_entries; + struct sparx5_rx_dcb_hw *last_entry; + struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + int db_index; + int dcb_index; + dma_addr_t dma; + struct napi_struct napi; + u32 channel_id; + struct net_device *ndev; + u64 packets; +}; + +/* Frame DMA transmit state: + * DCBs are chained using the DCBs nextptr field. + */ +struct sparx5_tx { + struct sparx5_tx_dcb_hw *curr_entry; + struct sparx5_tx_dcb_hw *first_entry; + struct list_head db_list; + dma_addr_t dma; + u32 channel_id; + u64 packets; + u64 dropped; +}; + +struct sparx5_port_config { + phy_interface_t portmode; + u32 bandwidth; + int speed; + int duplex; + enum phy_media media; + bool inband; + bool power_down; + bool autoneg; + bool serdes_reset; + u32 pause; + u32 pause_adv; + phy_interface_t phy_mode; + u32 sd_sgpio; +}; + +struct sparx5_port { + struct net_device *ndev; + struct sparx5 *sparx5; + struct device_node *of_node; + struct phy *serdes; + struct sparx5_port_config conf; + struct phylink_config phylink_config; + struct phylink *phylink; + struct phylink_pcs phylink_pcs; + u16 portno; + /* Ingress default VLAN (pvid) */ + u16 pvid; + /* Egress default VLAN (vid) */ + u16 vid; + bool signd_internal; + bool signd_active_high; + bool signd_enable; + bool flow_control; + enum sparx5_port_max_tags max_vlan_tags; + enum sparx5_vlan_port_type vlan_type; + u32 custom_etype; + bool vlan_aware; + struct hrtimer inj_timer; + /* ptp */ + u8 ptp_cmd; + u16 ts_id; + struct sk_buff_head tx_skbs; + bool is_mrouter; +}; + +enum sparx5_core_clockfreq { + SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */ + SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */ + SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */ + SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */ +}; + +struct sparx5_phc { + struct ptp_clock *clock; + struct ptp_clock_info info; + struct hwtstamp_config hwtstamp_config; + struct sparx5 *sparx5; + u8 index; +}; + +struct sparx5_skb_cb { + u8 rew_op; + u8 pdu_type; + u8 pdu_w16_offset; + u16 ts_id; + unsigned long jiffies; +}; + +struct sparx5_mdb_entry { + struct list_head list; + DECLARE_BITMAP(port_mask, SPX5_PORTS); + unsigned char addr[ETH_ALEN]; + bool cpu_copy; + u16 vid; + u16 pgid_idx; +}; + +#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) +#define SPARX5_SKB_CB(skb) \ + ((struct sparx5_skb_cb *)((skb)->cb)) + +struct sparx5 { + struct platform_device *pdev; + struct device *dev; + u32 chip_id; + enum spx5_target_chiptype target_ct; + void __iomem *regs[NUM_TARGETS]; + int port_count; + struct mutex lock; /* MAC reg lock */ + /* port structures are in net device */ + struct sparx5_port *ports[SPX5_PORTS]; + enum sparx5_core_clockfreq coreclock; + /* Statistics */ + u32 num_stats; + u32 num_ethtool_stats; + const char * const *stats_layout; + u64 *stats; + /* Workqueue for reading stats */ + struct mutex queue_stats_lock; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; + /* Notifiers */ + struct notifier_block netdevice_nb; + struct notifier_block switchdev_nb; + struct notifier_block switchdev_blocking_nb; + /* Switch state */ + u8 base_mac[ETH_ALEN]; + /* Associated bridge device (when bridged) */ + struct net_device *hw_bridge_dev; + /* Bridged interfaces */ + DECLARE_BITMAP(bridge_mask, SPX5_PORTS); + DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS); + DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS); + DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS); + /* SW MAC table */ + struct list_head mact_entries; + /* mac table list (mact_entries) mutex */ + struct mutex mact_lock; + /* SW MDB table */ + struct list_head mdb_entries; + /* mdb list mutex */ + struct mutex mdb_lock; + struct delayed_work mact_work; + struct workqueue_struct *mact_queue; + /* Board specifics */ + bool sd_sgpio_remapping; + /* Register based inj/xtr */ + int xtr_irq; + /* Frame DMA */ + int fdma_irq; + struct sparx5_rx rx; + struct sparx5_tx tx; + /* PTP */ + bool ptp; + struct sparx5_phc phc[SPARX5_PHC_COUNT]; + spinlock_t ptp_clock_lock; /* lock for phc */ + spinlock_t ptp_ts_id_lock; /* lock for ts_id */ + struct mutex ptp_lock; /* lock for ptp interface state */ + u16 ptp_skbs; + int ptp_irq; + /* PGID allocation map */ + u8 pgid_map[PGID_TABLE_SIZE]; +}; + +/* sparx5_switchdev.c */ +int sparx5_register_notifier_blocks(struct sparx5 *sparx5); +void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); + +/* sparx5_packet.c */ +struct frame_info { + int src_port; + u32 timestamp; +}; + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); +irqreturn_t sparx5_xtr_handler(int irq, void *_priv); +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); +int sparx5_manual_injection_mode(struct sparx5 *sparx5); +void sparx5_port_inj_timer_setup(struct sparx5_port *port); + +/* sparx5_fdma.c */ +int sparx5_fdma_start(struct sparx5 *sparx5); +int sparx5_fdma_stop(struct sparx5 *sparx5); +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); +irqreturn_t sparx5_fdma_handler(int irq, void *args); + +/* sparx5_mactable.c */ +void sparx5_mact_pull_work(struct work_struct *work); +int sparx5_mact_learn(struct sparx5 *sparx5, int port, + const unsigned char mac[ETH_ALEN], u16 vid); +bool sparx5_mact_getnext(struct sparx5 *sparx5, + unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2); +int sparx5_mact_find(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2); +int sparx5_mact_forget(struct sparx5 *sparx5, + const unsigned char mac[ETH_ALEN], u16 vid); +int sparx5_add_mact_entry(struct sparx5 *sparx5, + struct net_device *dev, + u16 portno, + const unsigned char *addr, u16 vid); +int sparx5_del_mact_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid); +int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr); +int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr); +void sparx5_set_ageing(struct sparx5 *sparx5, int msecs); +void sparx5_mact_init(struct sparx5 *sparx5); + +/* sparx5_vlan.c */ +void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable); +void sparx5_pgid_clear(struct sparx5 *spx5, int pgid); +void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]); +void sparx5_update_fwd(struct sparx5 *sparx5); +void sparx5_vlan_init(struct sparx5 *sparx5); +void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno); +int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, + bool untagged); +int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid); +void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port); + +/* sparx5_calendar.c */ +int sparx5_config_auto_calendar(struct sparx5 *sparx5); +int sparx5_config_dsm_calendar(struct sparx5 *sparx5); + +/* sparx5_ethtool.c */ +void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); +int sparx_stats_init(struct sparx5 *sparx5); + +/* sparx5_netdev.c */ +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno); +bool sparx5_netdevice_check(const struct net_device *dev); +struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); +int sparx5_register_netdevs(struct sparx5 *sparx5); +void sparx5_destroy_netdevs(struct sparx5 *sparx5); +void sparx5_unregister_netdevs(struct sparx5 *sparx5); + +/* sparx5_ptp.c */ +int sparx5_ptp_init(struct sparx5 *sparx5); +void sparx5_ptp_deinit(struct sparx5 *sparx5); +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr); +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr); +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp); +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb); +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb); +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); + +/* sparx5_pgid.c */ +enum sparx5_pgid_type { + SPX5_PGID_FREE, + SPX5_PGID_RESERVED, + SPX5_PGID_MULTICAST, +}; + +void sparx5_pgid_init(struct sparx5 *spx5); +int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx); +int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); +int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); + +/* Clock period in picoseconds */ +static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) +{ + switch (cclock) { + case SPX5_CORE_CLOCK_250MHZ: + return 4000; + case SPX5_CORE_CLOCK_500MHZ: + return 2000; + case SPX5_CORE_CLOCK_625MHZ: + default: + return 1600; + } +} + +static inline bool sparx5_is_baser(phy_interface_t interface) +{ + return interface == PHY_INTERFACE_MODE_5GBASER || + interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_25GBASER; +} + +extern const struct phylink_mac_ops sparx5_phylink_mac_ops; +extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops; +extern const struct ethtool_ops sparx5_ethtool_ops; + +/* Calculate raw offset */ +static inline __pure int spx5_offset(int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +/* Read, Write and modify registers content. + * The register definition macros start at the id + */ +static inline void __iomem *spx5_addr(void __iomem *base[], + int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base[id + (tinst)] + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline void __iomem *spx5_inst_addr(void __iomem *base, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(spx5_inst_addr(iomem, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_wr(u32 val, struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_inst_wr(u32 val, void __iomem *iomem, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, spx5_inst_addr(iomem, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, + rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, + rinst, rcnt, rwidth)); +} + +static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst) +{ + return sparx5->regs[id + tinst]; +} + +static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return spx5_addr(sparx5->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); +} + +#endif /* __SPARX5_MAIN_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h new file mode 100644 index 000000000..fa2eb70f4 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -0,0 +1,5138 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. + */ + +/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100. + * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty) + */ + +#ifndef _SPARX5_MAIN_REGS_H_ +#define _SPARX5_MAIN_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum sparx5_target { + TARGET_ANA_AC = 1, + TARGET_ANA_ACL = 2, + TARGET_ANA_AC_POL = 4, + TARGET_ANA_CL = 6, + TARGET_ANA_L2 = 7, + TARGET_ANA_L3 = 8, + TARGET_ASM = 9, + TARGET_CLKGEN = 11, + TARGET_CPU = 12, + TARGET_DEV10G = 17, + TARGET_DEV25G = 29, + TARGET_DEV2G5 = 37, + TARGET_DEV5G = 102, + TARGET_DSM = 115, + TARGET_EACL = 116, + TARGET_FDMA = 117, + TARGET_GCB = 118, + TARGET_HSCH = 119, + TARGET_LRN = 122, + TARGET_PCEP = 129, + TARGET_PCS10G_BR = 132, + TARGET_PCS25G_BR = 144, + TARGET_PCS5G_BR = 160, + TARGET_PORT_CONF = 173, + TARGET_PTP = 174, + TARGET_QFWD = 175, + TARGET_QRES = 176, + TARGET_QS = 177, + TARGET_QSYS = 178, + TARGET_REW = 179, + TARGET_VCAP_SUPER = 326, + TARGET_VOP = 327, + TARGET_XQS = 331, + NUM_TARGETS = 332 +}; + +#define __REG(...) __VA_ARGS__ + +/* ANA_AC:RAM_CTRL:RAM_INIT */ +#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4) + +#define ANA_AC_RAM_INIT_RAM_INIT BIT(1) +#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x) +#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x) + +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x) +#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x) + +/* ANA_AC:PS_COMMON:OWN_UPSID */ +#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4) + +#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x) +#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x) + +/* ANA_AC:SRC:SRC_CFG */ +#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4) + +/* ANA_AC:SRC:SRC_CFG1 */ +#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4) + +/* ANA_AC:SRC:SRC_CFG2 */ +#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4) + +#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0) +#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x) +#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x) + +/* ANA_AC:PGID:PGID_CFG */ +#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4) + +/* ANA_AC:PGID:PGID_CFG1 */ +#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4) + +/* ANA_AC:PGID:PGID_CFG2 */ +#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4) + +#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0) +#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x) +#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x) + +/* ANA_AC:PGID:PGID_MISC_CFG */ +#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4) + +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x) + +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA BIT(1) +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x) +#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x) + +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA BIT(0) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x) +#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\ + FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x) + +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */ +#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4) + +#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0) +#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\ + FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x) +#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\ + FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x) + +/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */ +#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4) + +#define ANA_AC_STAT_RESET_RESET BIT(0) +#define ANA_AC_STAT_RESET_RESET_SET(x)\ + FIELD_PREP(ANA_AC_STAT_RESET_RESET, x) +#define ANA_AC_STAT_RESET_RESET_GET(x)\ + FIELD_GET(ANA_AC_STAT_RESET_RESET, x) + +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */ +#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4) + +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4) +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x) +#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x) + +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE GENMASK(3, 1) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x) + +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE BIT(0) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\ + FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x) +#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\ + FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x) + +/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */ +#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4) + +/* ANA_ACL:COMMON:OWN_UPSID */ +#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4) + +#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x) +#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x) + +/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */ +#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4) + +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0) +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\ + FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x) +#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\ + FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x) + +/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */ +#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4) + +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4) +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA BIT(1) +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x) + +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA BIT(0) +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x) +#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x) + +/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */ +#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4) + +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19) +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x) +#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4) +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x) +#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA BIT(1) +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x) +#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x) + +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA BIT(0) +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\ + FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x) +#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\ + FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x) + +/* ANA_CL:PORT:FILTER_CTRL */ +#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4) + +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2) +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x) +#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x) + +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS BIT(1) +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x) +#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x) + +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA BIT(0) +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\ + FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x) +#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\ + FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x) + +/* ANA_CL:PORT:VLAN_FILTER_CTRL */ +#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4) + +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10) +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x) +#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS BIT(9) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS BIT(8) +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS BIT(7) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS BIT(3) +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS BIT(2) +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS BIT(1) +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x) + +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS BIT(0) +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x) +#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x) + +/* ANA_CL:PORT:ETAG_FILTER_CTRL */ +#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4) + +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\ + FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\ + FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x) + +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS BIT(0) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\ + FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x) +#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\ + FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x) + +/* ANA_CL:PORT:VLAN_CTRL */ +#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26) +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP GENMASK(25, 23) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x) + +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI BIT(22) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x) +#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x) + +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA BIT(21) +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x) +#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x) + +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL BIT(20) +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x) +#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x) + +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA BIT(19) +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x) +#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x) + +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT GENMASK(18, 17) +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x) +#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x) + +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE BIT(16) +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x) +#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x) + +#define ANA_CL_VLAN_CTRL_PORT_PCP GENMASK(15, 13) +#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x) +#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x) + +#define ANA_CL_VLAN_CTRL_PORT_DEI BIT(12) +#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x) +#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x) + +#define ANA_CL_VLAN_CTRL_PORT_VID GENMASK(11, 0) +#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x) +#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x) + +/* ANA_CL:PORT:VLAN_CTRL_2 */ +#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4) + +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0) +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\ + FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x) +#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\ + FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x) + +/* ANA_CL:PORT:CAPTURE_BPDU_CFG */ +#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4) + +/* ANA_CL:COMMON:OWN_UPSID */ +#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4) + +#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x) +#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x) + +/* ANA_L2:COMMON:AUTO_LRN_CFG */ +#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4) + +/* ANA_L2:COMMON:AUTO_LRN_CFG1 */ +#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4) + +/* ANA_L2:COMMON:AUTO_LRN_CFG2 */ +#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4) + +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0) +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\ + FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x) +#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\ + FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x) + +/* ANA_L2:COMMON:OWN_UPSID */ +#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4) + +#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x) +#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x) + +/* ANA_L3:COMMON:VLAN_CTRL */ +#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4) + +#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0) +#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x) +#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x) + +/* ANA_L3:VLAN:VLAN_CFG */ +#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4) + +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24) +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x) +#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x) + +#define ANA_L3_VLAN_CFG_VLAN_FID GENMASK(20, 8) +#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x) +#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x) + +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA BIT(6) +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA BIT(5) +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS BIT(4) +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x) +#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x) + +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS BIT(3) +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x) +#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x) + +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA BIT(2) +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA BIT(1) +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x) + +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA BIT(0) +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x) +#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x) + +/* ANA_L3:VLAN:VLAN_MASK_CFG */ +#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4) + +/* ANA_L3:VLAN:VLAN_MASK_CFG1 */ +#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4) + +/* ANA_L3:VLAN:VLAN_MASK_CFG2 */ +#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4) + +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0) +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x) +#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x) + +/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */ +#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */ +#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */ +#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */ +#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */ +#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */ +#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UC_CNT */ +#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_MC_CNT */ +#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_BC_CNT */ +#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */ +#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */ +#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */ +#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */ +#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */ +#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */ +#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */ +#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */ +#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */ +#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */ +#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */ +#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */ +#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */ +#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */ +#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */ +#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */ +#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UC_CNT */ +#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MC_CNT */ +#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BC_CNT */ +#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */ +#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */ +#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */ +#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */ +#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */ +#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */ +#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */ +#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */ +#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */ +#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */ +#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */ +#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */ +#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */ +#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */ +#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */ +#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */ +#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */ +#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */ +#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */ +#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */ +#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */ +#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */ +#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */ +#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */ +#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */ +#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */ +#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */ +#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */ +#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */ +#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */ +#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */ +#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */ +#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */ +#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */ +#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */ +#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */ +#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */ +#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */ +#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */ +#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */ +#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */ +#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4) + +/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */ +#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */ +#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */ +#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */ +#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4) + +/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */ +#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */ +#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */ +#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */ +#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_DEFER_CNT */ +#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */ +#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */ +#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4) + +/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */ +#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4) + +/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */ +#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4) + +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) +#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */ +#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4) + +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) +#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4) + +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) +#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */ +#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4) + +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) +#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4) + +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) +#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */ +#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4) + +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) +#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */ +#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4) + +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) +#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */ +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4) + +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0) +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) +#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) + +/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */ +#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4) + +/* ASM:CFG:STAT_CFG */ +#define ASM_STAT_CFG __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4) + +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0) +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\ + FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x) +#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\ + FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x) + +/* ASM:CFG:PORT_CFG */ +#define ASM_PORT_CFG(r) __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4) + +#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12) +#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x) +#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\ + FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x) + +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA BIT(11) +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA BIT(10) +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x) +#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x) + +#define ASM_PORT_CFG_NO_PREAMBLE_ENA BIT(9) +#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA BIT(8) +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x) +#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x) + +#define ASM_PORT_CFG_FRM_AGING_DIS BIT(7) +#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x) +#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\ + FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x) + +#define ASM_PORT_CFG_PAD_ENA BIT(6) +#define ASM_PORT_CFG_PAD_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x) +#define ASM_PORT_CFG_PAD_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_PAD_ENA, x) + +#define ASM_PORT_CFG_INJ_DISCARD_CFG GENMASK(5, 4) +#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x) +#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\ + FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x) + +#define ASM_PORT_CFG_INJ_FORMAT_CFG GENMASK(3, 2) +#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x) +#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\ + FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x) + +#define ASM_PORT_CFG_VSTAX2_AWR_ENA BIT(1) +#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x) +#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\ + FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x) + +#define ASM_PORT_CFG_PFRM_FLUSH BIT(0) +#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\ + FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x) +#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\ + FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x) + +/* ASM:RAM_CTRL:RAM_INIT */ +#define ASM_RAM_INIT __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4) + +#define ASM_RAM_INIT_RAM_INIT BIT(1) +#define ASM_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x) +#define ASM_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(ASM_RAM_INIT_RAM_INIT, x) + +#define ASM_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x) +#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x) + +/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */ +#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV GENMASK(10, 8) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR BIT(11) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL GENMASK(13, 12) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA BIT(14) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x) + +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA BIT(15) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\ + FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x) +#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\ + FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x) + +/* CPU:CPU_REGS:PROC_CTRL */ +#define CPU_PROC_CTRL __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4) + +#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12) +#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) +#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x) + +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11) +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) +#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x) + +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10) +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) +#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x) + +#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9) +#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x) +#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x) + +#define CPU_PROC_CTRL_VINITHI BIT(8) +#define CPU_PROC_CTRL_VINITHI_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_VINITHI, x) +#define CPU_PROC_CTRL_VINITHI_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_VINITHI, x) + +#define CPU_PROC_CTRL_CFGTE BIT(7) +#define CPU_PROC_CTRL_CFGTE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_CFGTE, x) +#define CPU_PROC_CTRL_CFGTE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_CFGTE, x) + +#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6) +#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x) +#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x) + +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5) +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) +#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x) + +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4) +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) +#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x) + +#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3) +#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x) +#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x) + +#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2) +#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x) +#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x) + +#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1) +#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x) +#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x) + +#define CPU_PROC_CTRL_ACP_DISABLE BIT(0) +#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\ + FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x) +#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\ + FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x) +#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV10G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x) +#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */ +#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4) + +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0) +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\ + FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x) +#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\ + FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4) + +#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x) +#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x) + +#define DEV10G_MAC_TAGS_CFG_TAG_ENA BIT(4) +#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x) +#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */ +#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4) + +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4) +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3) +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2) +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1) +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x) + +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0) +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\ + FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x) +#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\ + FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV10G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4) + +#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0) +#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\ + FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x) +#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\ + FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x) +#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV25G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x) +#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV25G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */ +#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4) + +#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0) +#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x) +#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\ + FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x) + +/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */ +#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4) + +#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8) +#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x) +#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x) + +#define DEV25G_PCS25G_SD_CFG_SD_POL BIT(4) +#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x) +#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x) + +#define DEV25G_PCS25G_SD_CFG_SD_ENA BIT(0) +#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x) +#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x) + +/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4) + +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23) +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST BIT(17) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST BIT(16) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4) + +#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x) +#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x) + +#define DEV2G5_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x) +#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4) + +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) +#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) + +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +#define DEV2G5_MAC_MODE_CFG_FDX_ENA BIT(0) +#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x) +#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4) + +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4) + +#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x) +#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x) + +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3) +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) + +#define DEV2G5_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1) +#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x) + +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */ +#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4) + +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x) + +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2 GENMASK(15, 0) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\ + FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x) +#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\ + FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4) + +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x) +#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4) + +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x) +#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x) + +#define DEV2G5_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x) +#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x) + +#define DEV2G5_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x) +#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV2G5_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x) +#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4) + +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x) +#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x) + +#define DEV2G5_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x) +#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x) + +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x) + +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x) +#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x) + +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS GENMASK(6, 0) +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\ + FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x) +#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\ + FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4) + +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4) +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x) +#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x) + +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x) +#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x) + +#define DEV2G5_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x) +#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4) + +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x) + +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1) +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x) + +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) +#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4) + +#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8) +#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x) +#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x) + +#define DEV2G5_PCS1G_SD_CFG_SD_POL BIT(4) +#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x) +#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x) + +#define DEV2G5_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x) +#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4) + +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x) +#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x) + +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) +#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) + +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x) + +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA BIT(0) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x) +#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */ +#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4) + +#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4) +#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x) + +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x) + +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x) +#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4) + +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16) +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x) +#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_PR BIT(4) +#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x) +#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x) +#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x) + +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) +#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4) + +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12) +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x) +#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x) + +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8) +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x) +#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x) + +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x) +#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x) + +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x) +#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x) + +/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4) + +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x) +#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x) + +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x) +#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x) + +/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */ +#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4) + +#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26) +#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x) +#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x) + +#define DEV2G5_PCS_FX100_CFG_SD_POL BIT(25) +#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x) +#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x) + +#define DEV2G5_PCS_FX100_CFG_SD_ENA BIT(24) +#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x) +#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA BIT(20) +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x) +#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA BIT(16) +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x) +#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_RXBITSEL GENMASK(15, 12) +#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x) +#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x) + +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG GENMASK(10, 9) +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x) +#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x) + +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8) +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x) +#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER GENMASK(7, 4) +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x) +#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x) + +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3) +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x) +#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA BIT(2) +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x) +#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA BIT(1) +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x) +#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x) + +#define DEV2G5_PCS_FX100_CFG_PCS_ENA BIT(0) +#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x) +#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x) + +/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */ +#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4) + +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8) +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x) +#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x) + +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7) +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6) +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5) +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4) +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x) +#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x) + +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS BIT(2) +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x) +#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x) + +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1) +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x) +#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x) + +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS BIT(0) +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x) +#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4) + +#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x) +#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x) + +#define DEV5G_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x) +#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4) + +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\ + FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\ + FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x) + +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */ +#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4) + +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16) +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x) + +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4) +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x) + +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0) +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\ + FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) +#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\ + FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */ +#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */ +#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */ +#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */ +#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */ +#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */ +#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */ +#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */ +#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */ +#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */ +#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */ +#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */ +#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */ +#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */ +#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */ +#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */ +#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */ +#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */ +#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */ +#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */ +#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */ +#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */ +#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */ +#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */ +#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */ +#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */ +#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */ +#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */ +#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */ +#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */ +#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */ +#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */ +#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */ +#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */ +#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */ +#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */ +#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */ +#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */ +#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */ +#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */ +#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */ +#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */ +#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */ +#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ + t, 13, 60, 0, 1, 312, 184, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */ +#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\ + t, 13, 60, 0, 1, 312, 188, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */ +#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */ +#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */ +#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */ +#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */ +#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */ +#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */ +#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */ +#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */ +#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */ +#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */ +#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */ +#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */ +#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */ +#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */ +#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */ +#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */ +#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */ +#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */ +#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */ +#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */ +#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */ +#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */ +#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4) + +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) +#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */ +#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */ +#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4) + +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) +#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */ +#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4) + +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) +#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */ +#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */ +#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4) + +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) +#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */ +#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */ +#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4) + +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) +#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4) + +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */ +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4) + +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4) + +/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */ +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4) + +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0) +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\ + FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) +#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\ + FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x) + +/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */ +#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4) + +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28) +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x) +#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x) + +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27) +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) +#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x) + +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25) +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) +#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x) + +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23) +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) +#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x) + +#define DEV5G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20) +#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x) +#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x) + +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST BIT(12) +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x) +#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x) + +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST BIT(8) +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x) +#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x) + +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST BIT(4) +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x) +#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x) + +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST BIT(0) +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x) +#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x) + +/* DSM:RAM_CTRL:RAM_INIT */ +#define DSM_RAM_INIT __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4) + +#define DSM_RAM_INIT_RAM_INIT BIT(1) +#define DSM_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x) +#define DSM_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(DSM_RAM_INIT_RAM_INIT, x) + +#define DSM_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x) +#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x) + +/* DSM:CFG:BUF_CFG */ +#define DSM_BUF_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4) + +#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13) +#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x) +#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\ + FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x) + +#define DSM_BUF_CFG_AGING_ENA BIT(12) +#define DSM_BUF_CFG_AGING_ENA_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x) +#define DSM_BUF_CFG_AGING_ENA_GET(x)\ + FIELD_GET(DSM_BUF_CFG_AGING_ENA, x) + +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS BIT(11) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\ + FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x) + +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT GENMASK(10, 0) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\ + FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x) +#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\ + FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x) + +/* DSM:CFG:DEV_TX_STOP_WM_CFG */ +#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4) + +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9) +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x) +#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8) +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM GENMASK(7, 1) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x) + +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR BIT(0) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\ + FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x) +#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\ + FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x) + +/* DSM:CFG:RX_PAUSE_CFG */ +#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4) + +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1) +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\ + FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x) +#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\ + FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x) + +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL BIT(0) +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\ + FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x) +#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\ + FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x) + +/* DSM:CFG:MAC_CFG */ +#define DSM_MAC_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4) + +#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16) +#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x) +#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\ + FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x) + +#define DSM_MAC_CFG_HDX_BACKPREASSURE BIT(2) +#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x) +#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\ + FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x) + +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE BIT(1) +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x) +#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\ + FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x) + +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF BIT(0) +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\ + FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x) +#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\ + FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x) + +/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */ +#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4) + +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0) +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\ + FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x) +#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\ + FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x) + +/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */ +#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4) + +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0) +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\ + FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x) +#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\ + FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x) + +/* DSM:CFG:TAXI_CAL_CFG */ +#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4) + +#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15) +#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x) +#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x) + +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN GENMASK(14, 9) +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x) +#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x) + +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL GENMASK(8, 5) +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x) +#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x) + +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL GENMASK(4, 1) +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x) +#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x) + +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA BIT(0) +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\ + FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x) +#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\ + FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x) + +/* EACL:POL_CFG:POL_EACL_CFG */ +#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4) + +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5) +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x) +#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x) + +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY BIT(4) +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x) +#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x) + +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY BIT(3) +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x) +#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE BIT(2) +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN BIT(1) +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x) + +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT BIT(0) +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\ + FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x) +#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\ + FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x) + +/* EACL:RAM_CTRL:RAM_INIT */ +#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4) + +#define EACL_RAM_INIT_RAM_INIT BIT(1) +#define EACL_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x) +#define EACL_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(EACL_RAM_INIT_RAM_INIT, x) + +#define EACL_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x) +#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x) + +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV */ +#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */ +#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7) +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) +#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(5) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_CH_TRANSLATE */ +#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4) + +#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0) +#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\ + FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x) +#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\ + FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x) + +/* FDMA:FDMA:FDMA_XTR_CFG */ +#define FDMA_XTR_CFG __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4) + +#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11) +#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\ + FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x) +#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\ + FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x) + +#define FDMA_XTR_CFG_XTR_ARB_SAT GENMASK(10, 0) +#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\ + FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x) +#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\ + FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_INJ_STOP_FORCE BIT(3) +#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x) +#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY BIT(1) +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x) +#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x) + +#define FDMA_PORT_CTRL_XTR_BUF_RST BIT(0) +#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x) +#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x) + +/* FDMA:FDMA:FDMA_INTR_DCB */ +#define FDMA_INTR_DCB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4) + +#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0) +#define FDMA_INTR_DCB_INTR_DCB_SET(x)\ + FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x) +#define FDMA_INTR_DCB_INTR_DCB_GET(x)\ + FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x) + +/* FDMA:FDMA:FDMA_INTR_DCB_ENA */ +#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4) + +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0) +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x) +#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0) +#define FDMA_INTR_DB_INTR_DB_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_INTR_DB, x) +#define FDMA_INTR_DB_INTR_DB_GET(x)\ + FIELD_GET(FDMA_INTR_DB_INTR_DB, x) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8) +#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\ + FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x) +#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\ + FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x) + +#define FDMA_INTR_ERR_INTR_CH_ERR GENMASK(7, 0) +#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\ + FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x) +#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\ + FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + +#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30) +#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x) +#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x) + +#define FDMA_ERRORS_ERR_XTR_OVF GENMASK(29, 28) +#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x) +#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x) + +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF GENMASK(27, 26) +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x) +#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x) + +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL GENMASK(25, 24) +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x) +#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x) + +#define FDMA_ERRORS_ERR_DCB_RD GENMASK(23, 16) +#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x) +#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x) + +#define FDMA_ERRORS_ERR_INJ_RD GENMASK(15, 10) +#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x) +#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x) + +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC GENMASK(9, 8) +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x) +#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x) + +#define FDMA_ERRORS_ERR_CH_WR GENMASK(7, 0) +#define FDMA_ERRORS_ERR_CH_WR_SET(x)\ + FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x) +#define FDMA_ERRORS_ERR_CH_WR_GET(x)\ + FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x) + +/* FDMA:FDMA:FDMA_ERRORS_2 */ +#define FDMA_ERRORS_2 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4) + +#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0) +#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\ + FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x) +#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\ + FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x) + +/* FDMA:FDMA:FDMA_CTRL */ +#define FDMA_CTRL __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4) + +#define FDMA_CTRL_NRESET BIT(0) +#define FDMA_CTRL_NRESET_SET(x)\ + FIELD_PREP(FDMA_CTRL_NRESET, x) +#define FDMA_CTRL_NRESET_GET(x)\ + FIELD_GET(FDMA_CTRL_NRESET, x) + +/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */ +#define GCB_CHIP_ID __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4) + +#define GCB_CHIP_ID_REV_ID GENMASK(31, 28) +#define GCB_CHIP_ID_REV_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_REV_ID, x) +#define GCB_CHIP_ID_REV_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_REV_ID, x) + +#define GCB_CHIP_ID_PART_ID GENMASK(27, 12) +#define GCB_CHIP_ID_PART_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_PART_ID, x) +#define GCB_CHIP_ID_PART_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_PART_ID, x) + +#define GCB_CHIP_ID_MFG_ID GENMASK(11, 1) +#define GCB_CHIP_ID_MFG_ID_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_MFG_ID, x) +#define GCB_CHIP_ID_MFG_ID_GET(x)\ + FIELD_GET(GCB_CHIP_ID_MFG_ID, x) + +#define GCB_CHIP_ID_ONE BIT(0) +#define GCB_CHIP_ID_ONE_SET(x)\ + FIELD_PREP(GCB_CHIP_ID_ONE, x) +#define GCB_CHIP_ID_ONE_GET(x)\ + FIELD_GET(GCB_CHIP_ID_ONE, x) + +/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */ +#define GCB_SOFT_RST __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4) + +#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2) +#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x) +#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x) + +#define GCB_SOFT_RST_SOFT_SWC_RST BIT(1) +#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x) +#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x) + +#define GCB_SOFT_RST_SOFT_CHIP_RST BIT(0) +#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\ + FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x) +#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\ + FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x) + +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */ +#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4) + +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1) +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x) +#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x) + +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL BIT(0) +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x) +#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x) + +/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */ +#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4) + +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0) +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\ + FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) +#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\ + FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x) + +/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */ +#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4) + +#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8) +#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\ + FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x) +#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\ + FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x) + +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD GENMASK(7, 0) +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\ + FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x) +#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\ + FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x) + +/* HSCH:HSCH_CFG:CIR_CFG */ +#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4) + +#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6) +#define HSCH_CIR_CFG_CIR_RATE_SET(x)\ + FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x) +#define HSCH_CIR_CFG_CIR_RATE_GET(x)\ + FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x) + +#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0) +#define HSCH_CIR_CFG_CIR_BURST_SET(x)\ + FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x) +#define HSCH_CIR_CFG_CIR_BURST_GET(x)\ + FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x) + +/* HSCH:HSCH_CFG:EIR_CFG */ +#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4) + +#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6) +#define HSCH_EIR_CFG_EIR_RATE_SET(x)\ + FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x) +#define HSCH_EIR_CFG_EIR_RATE_GET(x)\ + FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x) + +#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0) +#define HSCH_EIR_CFG_EIR_BURST_SET(x)\ + FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x) +#define HSCH_EIR_CFG_EIR_BURST_GET(x)\ + FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x) + +/* HSCH:HSCH_CFG:SE_CFG */ +#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4) + +#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6) +#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x) +#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x) + +#define HSCH_SE_CFG_SE_AVB_ENA BIT(5) +#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x) +#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x) + +#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3) +#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x) +#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x) + +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1) +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x) +#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x) + +#define HSCH_SE_CFG_SE_STOP BIT(0) +#define HSCH_SE_CFG_SE_STOP_SET(x)\ + FIELD_PREP(HSCH_SE_CFG_SE_STOP, x) +#define HSCH_SE_CFG_SE_STOP_GET(x)\ + FIELD_GET(HSCH_SE_CFG_SE_STOP, x) + +/* HSCH:HSCH_CFG:SE_CONNECT */ +#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4) + +#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0) +#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\ + FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x) +#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\ + FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x) + +/* HSCH:HSCH_CFG:SE_DLB_SENSE */ +#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4) + +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2) +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x) + +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\ + FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x) +#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\ + FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x) + +/* HSCH:HSCH_DWRR:DWRR_ENTRY */ +#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4) + +#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20) +#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\ + FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x) +#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\ + FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x) + +#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0) +#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\ + FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x) +#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\ + FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x) + +/* HSCH:HSCH_MISC:HSCH_CFG_CFG */ +#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4) + +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14) +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) +#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x) + +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12) +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x) +#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x) + +#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0) +#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\ + FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x) +#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\ + FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x) + +/* HSCH:HSCH_MISC:SYS_CLK_PER */ +#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4) + +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0) +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\ + FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x) +#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\ + FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x) + +/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */ +#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4) + +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0) +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\ + FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x) +#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\ + FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x) + +/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */ +#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4) + +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1) +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\ + FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) +#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\ + FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x) + +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0) +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\ + FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x) +#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\ + FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x) + +/* HSCH:SYSTEM:FLUSH_CTRL */ +#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4) + +#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27) +#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x) +#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x) + +#define HSCH_FLUSH_CTRL_FLUSH_SRC BIT(26) +#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x) +#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x) + +#define HSCH_FLUSH_CTRL_FLUSH_DST BIT(25) +#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x) +#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x) + +#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18) +#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x) +#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x) + +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17) +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x) +#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x) + +#define HSCH_FLUSH_CTRL_FLUSH_SE BIT(16) +#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x) +#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x) + +#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0) +#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\ + FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x) +#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\ + FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x) + +/* HSCH:SYSTEM:PORT_MODE */ +#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4) + +#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4) +#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x) +#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x) + +#define HSCH_PORT_MODE_AGE_DIS BIT(3) +#define HSCH_PORT_MODE_AGE_DIS_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x) +#define HSCH_PORT_MODE_AGE_DIS_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x) + +#define HSCH_PORT_MODE_TRUNC_ENA BIT(2) +#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x) +#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x) + +#define HSCH_PORT_MODE_EIR_REMARK_ENA BIT(1) +#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x) +#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x) + +#define HSCH_PORT_MODE_CPU_PRIO_MODE BIT(0) +#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\ + FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x) +#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\ + FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x) + +/* HSCH:SYSTEM:OUTB_SHARE_ENA */ +#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4) + +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0) +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\ + FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x) +#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\ + FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x) + +/* HSCH:MMGT:RESET_CFG */ +#define HSCH_RESET_CFG __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4) + +#define HSCH_RESET_CFG_CORE_ENA BIT(0) +#define HSCH_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x) +#define HSCH_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x) + +/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */ +#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4) + +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0) +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\ + FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) +#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\ + FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x) + +/* LRN:COMMON:COMMON_ACCESS_CTRL */ +#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x) + +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x) +#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x) + +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0) +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\ + FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x) +#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\ + FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_0 */ +#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x) + +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB GENMASK(15, 0) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x) +#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_1 */ +#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4) + +/* LRN:COMMON:MAC_ACCESS_CFG_2 */ +#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU GENMASK(26, 24) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY BIT(23) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR BIT(21) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG GENMASK(20, 19) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED BIT(16) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD BIT(15) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x) + +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR GENMASK(11, 0) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x) +#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x) + +/* LRN:COMMON:MAC_ACCESS_CFG_3 */ +#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4) + +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0) +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\ + FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) +#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\ + FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x) + +/* LRN:COMMON:SCAN_NEXT_CFG */ +#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17) +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL GENMASK(16, 15) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13) +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12) +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x) +#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x) + +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x) +#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x) + +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2) +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA BIT(1) +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x) + +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA BIT(0) +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x) +#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x) + +/* LRN:COMMON:SCAN_NEXT_CFG_1 */ +#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4) + +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16) +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x) +#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x) + +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0) +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\ + FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x) +#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\ + FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x) + +/* LRN:COMMON:AUTOAGE_CFG */ +#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4) + +#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28) +#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x) +#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x) + +#define LRN_AUTOAGE_CFG_PERIOD_VAL GENMASK(27, 0) +#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x) +#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x) + +/* LRN:COMMON:AUTOAGE_CFG_1 */ +#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4) + +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25) +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x) +#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x) + +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15) +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x) +#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x) + +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS GENMASK(14, 7) +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x) +#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x) + +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA BIT(6) +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x) +#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT GENMASK(5, 2) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x) +#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x) + +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA BIT(0) +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x) +#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x) + +/* LRN:COMMON:AUTOAGE_CFG_2 */ +#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4) + +#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4) +#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) +#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x) + +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0) +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\ + FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x) +#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\ + FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */ +#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4) + +#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0) +#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x) +#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x) + +#define PCEP_RCTRL_2_OUT_0_TAG GENMASK(15, 8) +#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x) +#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x) + +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN BIT(16) +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x) +#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x) + +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS BIT(19) +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x) +#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x) + +#define PCEP_RCTRL_2_OUT_0_SNP BIT(20) +#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x) +#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x) + +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD BIT(22) +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x) +#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x) + +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN BIT(23) +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x) +#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x) + +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE BIT(28) +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x) +#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x) + +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE BIT(29) +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x) +#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x) + +#define PCEP_RCTRL_2_OUT_0_REGION_EN BIT(31) +#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\ + FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x) +#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\ + FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4) + +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x) + +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW GENMASK(31, 16) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x) +#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4) + +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x) + +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW GENMASK(31, 16) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x) +#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4) + +/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */ +#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4) + +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\ + FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\ + FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x) + +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\ + FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x) +#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\ + FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x) +#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS10G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS10G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS10G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x) +#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS25G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS25G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS25G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */ +#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4) + +#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31) +#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x) +#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x) + +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30) +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) +#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x) + +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24) +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x) +#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x) + +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP BIT(18) +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x) +#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x) + +#define PCS5G_BR_PCS_CFG_RESYNC_ENA BIT(15) +#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x) +#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x) + +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS BIT(14) +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x) +#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x) + +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE BIT(13) +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x) +#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x) + +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12) +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x) +#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x) + +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP BIT(7) +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x) +#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x) + +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6) +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) +#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x) + +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE BIT(4) +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x) +#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x) + +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3) +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x) +#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x) + +/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */ +#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4) + +#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8) +#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x) +#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x) + +#define PCS5G_BR_PCS_SD_CFG_SD_POL BIT(4) +#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x) +#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x) + +#define PCS5G_BR_PCS_SD_CFG_SD_ENA BIT(0) +#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x) +#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x) + +/* PORT_CONF:HW_CFG:DEV5G_MODES */ +#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0) +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1) +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2) +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3) +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4) +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5) +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6) +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7) +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8) +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE BIT(9) +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10) +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11) +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x) + +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12) +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) +#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x) + +/* PORT_CONF:HW_CFG:DEV10G_MODES */ +#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0) +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1) +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2) +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3) +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4) +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5) +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6) +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7) +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8) +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9) +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10) +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x) + +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11) +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) +#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x) + +/* PORT_CONF:HW_CFG:DEV25G_MODES */ +#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0) +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE BIT(1) +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE BIT(2) +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE BIT(3) +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE BIT(4) +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE BIT(5) +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE BIT(6) +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x) + +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE BIT(7) +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x) +#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\ + FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x) + +/* PORT_CONF:HW_CFG:QSGMII_ENA */ +#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1 BIT(1) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2 BIT(2) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3 BIT(3) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4 BIT(4) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5 BIT(5) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x) + +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\ + FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) +#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\ + FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x) + +/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */ +#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4) + +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9) +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x) +#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x) + +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM BIT(8) +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x) +#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x) + +#define PORT_CONF_USGMII_CFG_FLIP_LANES BIT(7) +#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x) +#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x) + +#define PORT_CONF_USGMII_CFG_SHYST_DIS BIT(6) +#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x) +#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x) + +#define PORT_CONF_USGMII_CFG_E_DET_ENA BIT(5) +#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x) +#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x) + +#define PORT_CONF_USGMII_CFG_USE_I1_ENA BIT(4) +#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x) +#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x) + +#define PORT_CONF_USGMII_CFG_QUAD_MODE BIT(1) +#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\ + FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x) +#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\ + FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */ +#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4) + +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\ + FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\ + FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4) + +#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9) +#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x) +#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x) + +#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6) +#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x) +#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x) + +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) + +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */ +#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */ +#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */ +#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */ +#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4) + +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */ +#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4) + +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4) + +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */ +#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */ +#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */ +#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4) + +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\ + FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\ + FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */ +#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4) + +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\ + FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\ + FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */ +#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */ +#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4) + +#define PTP_PHAD_CTRL_PHAD_ENA BIT(7) +#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x) +#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x) + +#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6) +#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x) +#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x) + +#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3) +#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x) +#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x) + +#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0) +#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x) +#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */ +#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4) + +/* QFWD:SYSTEM:SWITCH_PORT_MODE */ +#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4) + +#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19) +#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x) +#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x) + +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY GENMASK(18, 10) +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x) +#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x) + +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD GENMASK(9, 6) +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x) +#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x) + +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(5) +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x) +#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING BIT(4) +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x) +#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x) + +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING BIT(3) +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x) +#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x) + +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE BIT(2) +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x) +#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x) + +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS BIT(1) +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x) +#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x) + +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE BIT(0) +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\ + FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) +#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\ + FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) + +/* QRES:RES_CTRL:RES_CFG */ +#define QRES_RES_CFG(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4) + +#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0) +#define QRES_RES_CFG_WM_HIGH_SET(x)\ + FIELD_PREP(QRES_RES_CFG_WM_HIGH, x) +#define QRES_RES_CFG_WM_HIGH_GET(x)\ + FIELD_GET(QRES_RES_CFG_WM_HIGH, x) + +/* QRES:RES_CTRL:RES_STAT */ +#define QRES_RES_STAT(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4) + +#define QRES_RES_STAT_MAXUSE GENMASK(20, 0) +#define QRES_RES_STAT_MAXUSE_SET(x)\ + FIELD_PREP(QRES_RES_STAT_MAXUSE, x) +#define QRES_RES_STAT_MAXUSE_GET(x)\ + FIELD_GET(QRES_RES_STAT_MAXUSE, x) + +/* QRES:RES_CTRL:RES_STAT_CUR */ +#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4) + +#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0) +#define QRES_RES_STAT_CUR_INUSE_SET(x)\ + FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x) +#define QRES_RES_STAT_CUR_INUSE_GET(x)\ + FIELD_GET(QRES_RES_STAT_CUR_INUSE, x) + +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) + +#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_MODE, x) +#define QS_XTR_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_MODE, x) + +#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x) + +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x) +#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0) +#define QS_XTR_FLUSH_FLUSH_SET(x)\ + FIELD_PREP(QS_XTR_FLUSH_FLUSH, x) +#define QS_XTR_FLUSH_FLUSH_GET(x)\ + FIELD_GET(QS_XTR_FLUSH_FLUSH, x) + +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) + +#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0) +#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\ + FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x) +#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\ + FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_ABORT_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_ABORT, x) +#define QS_INJ_CTRL_ABORT_GET(x)\ + FIELD_GET(QS_INJ_CTRL_ABORT, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) + +#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x) +#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\ + FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x) + +#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x) +#define QS_INJ_STATUS_FIFO_RDY_GET(x)\ + FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x) + +#define QS_INJ_STATUS_INJ_IN_PROGRESS GENMASK(1, 0) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\ + FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x) + +/* QSYS:PAUSE_CFG:PAUSE_CFG */ +#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4) + +#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14) +#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x) +#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x) + +#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2) +#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x) +#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x) + +#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1) +#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x) +#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x) + +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA BIT(0) +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\ + FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x) +#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\ + FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x) + +/* QSYS:PAUSE_CFG:ATOP */ +#define QSYS_ATOP(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4) + +#define QSYS_ATOP_ATOP GENMASK(11, 0) +#define QSYS_ATOP_ATOP_SET(x)\ + FIELD_PREP(QSYS_ATOP_ATOP, x) +#define QSYS_ATOP_ATOP_GET(x)\ + FIELD_GET(QSYS_ATOP_ATOP, x) + +/* QSYS:PAUSE_CFG:FWD_PRESSURE */ +#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4) + +#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\ + FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\ + FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x) + +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS BIT(0) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\ + FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x) +#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\ + FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x) + +/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4) + +#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0) +#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\ + FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) +#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\ + FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x) + +/* QSYS:CALCFG:CAL_AUTO */ +#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4) + +#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0) +#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\ + FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x) +#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\ + FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x) + +/* QSYS:CALCFG:CAL_CTRL */ +#define QSYS_CAL_CTRL __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4) + +#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11) +#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x) +#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x) + +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE GENMASK(10, 1) +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x) +#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x) + +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR BIT(0) +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\ + FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x) +#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\ + FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x) + +/* QSYS:RAM_CTRL:RAM_INIT */ +#define QSYS_RAM_INIT __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4) + +#define QSYS_RAM_INIT_RAM_INIT BIT(1) +#define QSYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x) +#define QSYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x) + +#define QSYS_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x) +#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x) + +/* REW:COMMON:OWN_UPSID */ +#define REW_OWN_UPSID(r) __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4) + +#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0) +#define REW_OWN_UPSID_OWN_UPSID_SET(x)\ + FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x) +#define REW_OWN_UPSID_OWN_UPSID_GET(x)\ + FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13) +#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x) +#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x) + +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(12) +#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x) +#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CTRL */ +#define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4) + +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13) +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x) +#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x) + +#define REW_TAG_CTRL_TAG_CFG GENMASK(12, 11) +#define REW_TAG_CTRL_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x) +#define REW_TAG_CTRL_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_CFG, x) + +#define REW_TAG_CTRL_TAG_TPID_CFG GENMASK(10, 8) +#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x) +#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x) + +#define REW_TAG_CTRL_TAG_VID_CFG GENMASK(7, 6) +#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x) +#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x) + +#define REW_TAG_CTRL_TAG_PCP_CFG GENMASK(5, 3) +#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x) +#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x) + +#define REW_TAG_CTRL_TAG_DEI_CFG GENMASK(2, 0) +#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x) +#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\ + FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */ +#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */ +#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */ +#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */ +#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */ +#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */ +#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4) + +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\ + FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\ + FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) + +/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */ +#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4) + +#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) + +#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) + +/* REW:RAM_CTRL:RAM_INIT */ +#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4) + +#define REW_RAM_INIT_RAM_INIT BIT(1) +#define REW_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(REW_RAM_INIT_RAM_INIT, x) +#define REW_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(REW_RAM_INIT_RAM_INIT, x) + +#define REW_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x) +#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x) + +/* VCAP_SUPER:RAM_CTRL:RAM_INIT */ +#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4) + +#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1) +#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x) +#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x) + +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x) +#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x) + +/* VOP:RAM_CTRL:RAM_INIT */ +#define VOP_RAM_INIT __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4) + +#define VOP_RAM_INIT_RAM_INIT BIT(1) +#define VOP_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x) +#define VOP_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(VOP_RAM_INIT_RAM_INIT, x) + +#define VOP_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x) +#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x) + +/* XQS:SYSTEM:STAT_CFG */ +#define XQS_STAT_CFG __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4) + +#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18) +#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x) +#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x) + +#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5) +#define XQS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x) +#define XQS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x) + +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4) +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x) +#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x) + +#define XQS_STAT_CFG_STAT_WRAP_DIS GENMASK(3, 0) +#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\ + FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x) +#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\ + FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */ +#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4) + +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) +#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */ +#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4) + +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) +#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */ +#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4) + +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0) +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) +#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x) + +/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */ +#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4) + +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0) +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\ + FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) +#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\ + FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x) + +/* XQS:STAT:CNT */ +#define XQS_CNT(g) __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4) + +#endif /* _SPARX5_MAIN_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c new file mode 100644 index 000000000..d07815658 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" +#include "sparx5_tc.h" + +/* The IFH bit position of the first VSTAX bit. This is because the + * VSTAX bit positions in Data sheet is starting from zero. + */ +#define VSTAX 73 + +#define ifh_encode_bitfield(ifh, value, pos, _width) \ + ({ \ + u32 width = (_width); \ + \ + /* Max width is 5 bytes - 40 bits. In worst case this will + * spread over 6 bytes - 48 bits + */ \ + compiletime_assert(width <= 40, \ + "Unsupported width, must be <= 40"); \ + __ifh_encode_bitfield((ifh), (value), (pos), width); \ + }) + +static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) +{ + u8 *ifh_hdr = ifh; + /* Calculate the Start IFH byte position of this IFH bit position */ + u32 byte = (35 - (pos / 8)); + /* Calculate the Start bit position in the Start IFH byte */ + u32 bit = (pos % 8); + u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit); + + /* The b0-b7 goes into the start IFH byte */ + if (encode & 0xFF) + ifh_hdr[byte] |= (u8)((encode & 0xFF)); + /* The b8-b15 goes into the next IFH byte */ + if (encode & 0xFF00) + ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8); + /* The b16-b23 goes into the next IFH byte */ + if (encode & 0xFF0000) + ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16); + /* The b24-b31 goes into the next IFH byte */ + if (encode & 0xFF000000) + ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24); + /* The b32-b39 goes into the next IFH byte */ + if (encode & 0xFF00000000) + ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32); + /* The b40-b47 goes into the next IFH byte */ + if (encode & 0xFF0000000000) + ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40); +} + +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) +{ + /* VSTAX.RSV = 1. MSBit must be 1 */ + ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1); + /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */ + ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55, 1); + /* MISC.CPU_MASK/DPORT = Destination port */ + ifh_encode_bitfield(ifh_hdr, portno, 29, 8); + /* MISC.PIPELINE_PT */ + ifh_encode_bitfield(ifh_hdr, 16, 37, 5); + /* MISC.PIPELINE_ACT */ + ifh_encode_bitfield(ifh_hdr, 1, 42, 3); + /* FWD.SRC_PORT = CPU */ + ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7); + /* FWD.SFLOW_ID (disable SFlow sampling) */ + ifh_encode_bitfield(ifh_hdr, 124, 57, 7); + /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */ + ifh_encode_bitfield(ifh_hdr, 1, 67, 1); +} + +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op) +{ + ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10); +} + +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type) +{ + ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4); +} + +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset) +{ + ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6); +} + +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp) +{ + ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40); +} + +static int sparx5_port_open(struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + int err = 0; + + sparx5_port_enable(port, true); + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(ndev, "Could not attach to PHY\n"); + goto err_connect; + } + + phylink_start(port->phylink); + + if (!ndev->phydev) { + /* power up serdes */ + port->conf.power_down = false; + if (port->conf.serdes_reset) + err = sparx5_serdes_set(port->sparx5, port, &port->conf); + else + err = phy_power_on(port->serdes); + if (err) { + netdev_err(ndev, "%s failed\n", __func__); + goto out_power; + } + } + + return 0; + +out_power: + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); +err_connect: + sparx5_port_enable(port, false); + + return err; +} + +static int sparx5_port_stop(struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + int err = 0; + + sparx5_port_enable(port, false); + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); + + if (!ndev->phydev) { + /* power down serdes */ + port->conf.power_down = true; + if (port->conf.serdes_reset) + err = sparx5_serdes_set(port->sparx5, port, &port->conf); + else + err = phy_power_off(port->serdes); + if (err) + netdev_err(ndev, "%s failed\n", __func__); + } + return 0; +} + +static void sparx5_set_rx_mode(struct net_device *dev) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + + if (!test_bit(port->portno, sparx5->bridge_mask)) + __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync); +} + +static int sparx5_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct sparx5_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->portno); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int sparx5_set_mac_address(struct net_device *dev, void *p) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* Remove current */ + sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid); + + /* Add new */ + sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid); + + /* Record the address */ + eth_hw_addr_set(dev, addr->sa_data); + + return 0; +} + +static int sparx5_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + ppid->id_len = sizeof(sparx5->base_mac); + memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len); + + return 0; +} + +static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return sparx5_ptp_hwtstamp_set(sparx5_port, ifr); + case SIOCGHWTSTAMP: + return sparx5_ptp_hwtstamp_get(sparx5_port, ifr); + } + } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static const struct net_device_ops sparx5_port_netdev_ops = { + .ndo_open = sparx5_port_open, + .ndo_stop = sparx5_port_stop, + .ndo_start_xmit = sparx5_port_xmit_impl, + .ndo_set_rx_mode = sparx5_set_rx_mode, + .ndo_get_phys_port_name = sparx5_port_get_phys_port_name, + .ndo_set_mac_address = sparx5_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = sparx5_get_stats64, + .ndo_get_port_parent_id = sparx5_get_port_parent_id, + .ndo_eth_ioctl = sparx5_port_ioctl, + .ndo_setup_tc = sparx5_port_setup_tc, +}; + +bool sparx5_netdevice_check(const struct net_device *dev) +{ + return dev && (dev->netdev_ops == &sparx5_port_netdev_ops); +} + +struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno) +{ + struct sparx5_port *spx5_port; + struct net_device *ndev; + + ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port), + SPX5_PRIOS, 1); + if (!ndev) + return ERR_PTR(-ENOMEM); + + ndev->hw_features |= NETIF_F_HW_TC; + ndev->features |= NETIF_F_HW_TC; + + SET_NETDEV_DEV(ndev, sparx5->dev); + spx5_port = netdev_priv(ndev); + spx5_port->ndev = ndev; + spx5_port->sparx5 = sparx5; + spx5_port->portno = portno; + + ndev->netdev_ops = &sparx5_port_netdev_ops; + ndev->ethtool_ops = &sparx5_ethtool_ops; + + eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1); + + return ndev; +} + +int sparx5_register_netdevs(struct sparx5 *sparx5) +{ + int portno; + int err; + + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) { + err = register_netdev(sparx5->ports[portno]->ndev); + if (err) { + dev_err(sparx5->dev, + "port: %02u: netdev registration failed\n", + portno); + return err; + } + sparx5_port_inj_timer_setup(sparx5->ports[portno]); + } + return 0; +} + +void sparx5_destroy_netdevs(struct sparx5 *sparx5) +{ + struct sparx5_port *port; + int portno; + + for (portno = 0; portno < SPX5_PORTS; portno++) { + port = sparx5->ports[portno]; + if (port && port->phylink) { + /* Disconnect the phy */ + rtnl_lock(); + sparx5_port_stop(port->ndev); + phylink_disconnect_phy(port->phylink); + rtnl_unlock(); + phylink_destroy(port->phylink); + port->phylink = NULL; + } + } +} + +void sparx5_unregister_netdevs(struct sparx5 *sparx5) +{ + int portno; + + for (portno = 0; portno < SPX5_PORTS; portno++) + if (sparx5->ports[portno]) + unregister_netdev(sparx5->ports[portno]->ndev); +} + diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c new file mode 100644 index 000000000..6db6ac6a3 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +#define XTR_EOF_0 ntohl((__force __be32)0x80000000u) +#define XTR_EOF_1 ntohl((__force __be32)0x80000001u) +#define XTR_EOF_2 ntohl((__force __be32)0x80000002u) +#define XTR_EOF_3 ntohl((__force __be32)0x80000003u) +#define XTR_PRUNED ntohl((__force __be32)0x80000004u) +#define XTR_ABORT ntohl((__force __be32)0x80000005u) +#define XTR_ESCAPE ntohl((__force __be32)0x80000006u) +#define XTR_NOT_READY ntohl((__force __be32)0x80000007u) + +#define XTR_VALID_BYTES(x) (4 - ((x) & 3)) + +#define INJ_TIMEOUT_NS 50000 + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) +{ + /* Start flush */ + spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + spx5_wr(0, sparx5, QS_XTR_FLUSH); +} + +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) +{ + u8 *xtr_hdr = (u8 *)ifh; + + /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ + u32 fwd = + ((u32)xtr_hdr[27] << 24) | + ((u32)xtr_hdr[28] << 16) | + ((u32)xtr_hdr[29] << 8) | + ((u32)xtr_hdr[30] << 0); + fwd = (fwd >> 5); + info->src_port = FIELD_GET(GENMASK(7, 1), fwd); + + info->timestamp = + ((u64)xtr_hdr[2] << 24) | + ((u64)xtr_hdr[3] << 16) | + ((u64)xtr_hdr[4] << 8) | + ((u64)xtr_hdr[5] << 0); +} + +static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) +{ + bool eof_flag = false, pruned_flag = false, abort_flag = false; + struct net_device *netdev; + struct sparx5_port *port; + struct frame_info fi; + int i, byte_cnt = 0; + struct sk_buff *skb; + u32 ifh[IFH_LEN]; + u32 *rxbuf; + + /* Get IFH */ + for (i = 0; i < IFH_LEN; i++) + ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); + + /* Decode IFH (whats needed) */ + sparx5_ifh_parse(ifh, &fi); + + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? + sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, grp); + return; + } + + /* Have netdev, get skb */ + netdev = port->ndev; + skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); + if (!skb) { + sparx5_xtr_flush(sparx5, grp); + dev_err(sparx5->dev, "No skb allocated\n"); + netdev->stats.rx_dropped++; + return; + } + rxbuf = (u32 *)skb->data; + + /* Now, pull frame data */ + while (!eof_flag) { + u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); + u32 cmp = val; + + if (byte_swap) + cmp = ntohl((__force __be32)val); + + switch (cmp) { + case XTR_NOT_READY: + break; + case XTR_ABORT: + /* No accompanying data */ + abort_flag = true; + eof_flag = true; + break; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + /* This assumes STATUS_WORD_POS == 1, Status + * just after last data + */ + if (!byte_swap) + val = ntohl((__force __be32)val); + byte_cnt -= (4 - XTR_VALID_BYTES(val)); + eof_flag = true; + break; + case XTR_PRUNED: + /* But get the last 4 bytes as well */ + eof_flag = true; + pruned_flag = true; + fallthrough; + case XTR_ESCAPE: + *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); + byte_cnt += 4; + rxbuf++; + break; + default: + *rxbuf = val; + byte_cnt += 4; + rxbuf++; + } + } + + if (abort_flag || pruned_flag || !eof_flag) { + netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n", + abort_flag, pruned_flag, eof_flag); + kfree_skb(skb); + netdev->stats.rx_dropped++; + return; + } + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + + /* Finish up skb */ + skb_put(skb, byte_cnt - ETH_FCS_LEN); + eth_skb_pad(skb); + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_bytes += skb->len; + netdev->stats.rx_packets++; + netif_rx(skb); +} + +static int sparx5_inject(struct sparx5 *sparx5, + u32 *ifh, + struct sk_buff *skb, + struct net_device *ndev) +{ + int grp = INJ_QUEUE; + u32 val, w, count; + u8 *buf; + + val = spx5_rd(sparx5, QS_INJ_STATUS); + if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) { + pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n", + QS_INJ_STATUS_FIFO_RDY_GET(val)); + return -EBUSY; + } + + /* Indicate SOF */ + spx5_wr(QS_INJ_CTRL_SOF_SET(1) | + QS_INJ_CTRL_GAP_SIZE_SET(1), + sparx5, QS_INJ_CTRL(grp)); + + /* Write the IFH to the chip. */ + for (w = 0; w < IFH_LEN; w++) + spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); + + /* Write words, round up */ + count = DIV_ROUND_UP(skb->len, 4); + buf = skb->data; + for (w = 0; w < count; w++, buf += 4) { + val = get_unaligned((const u32 *)buf); + spx5_wr(val, sparx5, QS_INJ_WR(grp)); + } + + /* Add padding */ + while (w < (60 / 4)) { + spx5_wr(0, sparx5, QS_INJ_WR(grp)); + w++; + } + + /* Indicate EOF and valid bytes in last word */ + spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | + QS_INJ_CTRL_EOF_SET(1), + sparx5, QS_INJ_CTRL(grp)); + + /* Add dummy CRC */ + spx5_wr(0, sparx5, QS_INJ_WR(grp)); + w++; + + val = spx5_rd(sparx5, QS_INJ_STATUS); + if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { + struct sparx5_port *port = netdev_priv(ndev); + + pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n", + QS_INJ_STATUS_WMARK_REACHED_GET(val)); + netif_stop_queue(ndev); + hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, + HRTIMER_MODE_REL); + } + + return NETDEV_TX_OK; +} + +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + u32 ifh[IFH_LEN]; + netdev_tx_t ret; + + memset(ifh, 0, IFH_LEN * 4); + sparx5_set_port_ifh(ifh, port->portno); + + if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + if (sparx5_ptp_txtstamp_request(port, skb) < 0) + return NETDEV_TX_BUSY; + + sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); + sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); + sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset); + sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id); + } + + skb_tx_timestamp(skb); + if (sparx5->fdma_irq > 0) + ret = sparx5_fdma_xmit(sparx5, ifh, skb); + else + ret = sparx5_inject(sparx5, ifh, skb, dev); + + if (ret == -EBUSY) + goto busy; + if (ret < 0) + goto drop; + + stats->tx_bytes += skb->len; + stats->tx_packets++; + sparx5->tx.packets++; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return NETDEV_TX_OK; + + dev_consume_skb_any(skb); + return NETDEV_TX_OK; +drop: + stats->tx_dropped++; + sparx5->tx.dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +busy: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + sparx5_ptp_txtstamp_release(port, skb); + return NETDEV_TX_BUSY; +} + +static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) +{ + struct sparx5_port *port = container_of(tmr, struct sparx5_port, + inj_timer); + int grp = INJ_QUEUE; + u32 val; + + val = spx5_rd(port->sparx5, QS_INJ_STATUS); + if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { + pr_err_ratelimited("Injection: Reset watermark count\n"); + /* Reset Watermark count to restart */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + port->sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + } + netif_wake_queue(port->ndev); + return HRTIMER_NORESTART; +} + +int sparx5_manual_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + + /* Change mode to manual extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable Disassembler buffer underrun watchdog + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + } + return 0; +} + +irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5) +{ + struct sparx5 *s5 = _sparx5; + int poll = 64; + + /* Check data in queue */ + while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) + sparx5_xtr_grp(s5, XTR_QUEUE, false); + + return IRQ_HANDLED; +} + +void sparx5_port_inj_timer_setup(struct sparx5_port *port) +{ + hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->inj_timer.function = sparx5_injection_timeout; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c new file mode 100644 index 000000000..af8b43500 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include "sparx5_main.h" + +void sparx5_pgid_init(struct sparx5 *spx5) +{ + int i; + + for (i = 0; i < PGID_TABLE_SIZE; i++) + spx5->pgid_map[i] = SPX5_PGID_FREE; + + /* Reserved for unicast, flood control, broadcast, and CPU. + * These cannot be freed. + */ + for (i = 0; i <= PGID_CPU; i++) + spx5->pgid_map[i] = SPX5_PGID_RESERVED; +} + +int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx) +{ + int i; + + /* The multicast area starts at index 65, but the first 7 + * are reserved for flood masks and CPU. Start alloc after that. + */ + for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) { + if (spx5->pgid_map[i] == SPX5_PGID_FREE) { + spx5->pgid_map[i] = SPX5_PGID_MULTICAST; + *idx = i; + return 0; + } + } + + return -EBUSY; +} + +int sparx5_pgid_free(struct sparx5 *spx5, u16 idx) +{ + if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE) + return -EINVAL; + + if (spx5->pgid_map[idx] == SPX5_PGID_FREE) + return -EINVAL; + + spx5->pgid_map[idx] = SPX5_PGID_FREE; + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c new file mode 100644 index 000000000..830da0e5f --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/phylink.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/sfp.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b) +{ + if (a->speed != b->speed || + a->portmode != b->portmode || + a->autoneg != b->autoneg || + a->pause_adv != b->pause_adv || + a->power_down != b->power_down || + a->media != b->media) + return true; + return false; +} + +static struct phylink_pcs * +sparx5_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); + + return &port->phylink_pcs; +} + +static void sparx5_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + /* Currently not used */ +} + +static void sparx5_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); + struct sparx5_port_config conf; + int err; + + conf = port->conf; + conf.duplex = duplex; + conf.pause = 0; + conf.pause |= tx_pause ? MLO_PAUSE_TX : 0; + conf.pause |= rx_pause ? MLO_PAUSE_RX : 0; + conf.speed = speed; + /* Configure the port to speed/duplex/pause */ + err = sparx5_port_config(port->sparx5, port, &conf); + if (err) + netdev_err(port->ndev, "port config failed: %d\n", err); +} + +static void sparx5_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + /* Currently not used */ +} + +static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct sparx5_port, phylink_pcs); +} + +static void sparx5_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct sparx5_port *port = sparx5_pcs_to_port(pcs); + struct sparx5_port_status status; + + sparx5_get_port_status(port->sparx5, port, &status); + state->link = status.link && !status.link_down; + state->an_complete = status.an_complete; + state->speed = status.speed; + state->duplex = status.duplex; + state->pause = status.pause; +} + +static int sparx5_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct sparx5_port *port = sparx5_pcs_to_port(pcs); + struct sparx5_port_config conf; + int ret = 0; + + conf = port->conf; + conf.power_down = false; + conf.portmode = interface; + conf.inband = phylink_autoneg_inband(mode); + conf.autoneg = phylink_test(advertising, Autoneg); + conf.pause_adv = 0; + if (phylink_test(advertising, Pause)) + conf.pause_adv |= ADVERTISE_1000XPAUSE; + if (phylink_test(advertising, Asym_Pause)) + conf.pause_adv |= ADVERTISE_1000XPSE_ASYM; + if (sparx5_is_baser(interface)) { + if (phylink_test(advertising, FIBRE)) + conf.media = PHY_MEDIA_SR; + else + conf.media = PHY_MEDIA_DAC; + } + if (!port_conf_has_changed(&port->conf, &conf)) + return ret; + /* Enable the PCS matching this interface type */ + ret = sparx5_port_pcs_set(port->sparx5, port, &conf); + if (ret) + netdev_err(port->ndev, "port PCS config failed: %d\n", ret); + return ret; +} + +static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs) +{ + /* Currently not used */ +} + +const struct phylink_pcs_ops sparx5_phylink_pcs_ops = { + .pcs_get_state = sparx5_pcs_get_state, + .pcs_config = sparx5_pcs_config, + .pcs_an_restart = sparx5_pcs_aneg_restart, +}; + +const struct phylink_mac_ops sparx5_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = sparx5_phylink_mac_select_pcs, + .mac_config = sparx5_phylink_mac_config, + .mac_link_down = sparx5_phylink_mac_link_down, + .mac_link_up = sparx5_phylink_mac_link_up, +}; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c new file mode 100644 index 000000000..32709d21a --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -0,0 +1,1146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/module.h> +#include <linux/phy/phy.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define SPX5_ETYPE_TAG_C 0x8100 +#define SPX5_ETYPE_TAG_S 0x88a8 + +#define SPX5_WAIT_US 1000 +#define SPX5_WAIT_MAX_US 2000 + +enum port_error { + SPX5_PERR_SPEED, + SPX5_PERR_IFTYPE, +}; + +#define PAUSE_DISCARD 0xC +#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN) + +static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status) +{ + status->an_complete = true; + if (!(lp_abil & LPA_SGMII_LINK)) { + status->link = false; + return; + } + + switch (lp_abil & LPA_SGMII_SPD_MASK) { + case LPA_SGMII_10: + status->speed = SPEED_10; + break; + case LPA_SGMII_100: + status->speed = SPEED_100; + break; + case LPA_SGMII_1000: + status->speed = SPEED_1000; + break; + default: + status->link = false; + return; + } + if (lp_abil & LPA_SGMII_FULL_DUPLEX) + status->duplex = DUPLEX_FULL; + else + status->duplex = DUPLEX_HALF; +} + +static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status) +{ + status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link; + status->an_complete = true; + status->duplex = (ADVERTISE_1000XFULL & lp_abil) ? + DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported + + if ((ld_abil & ADVERTISE_1000XPAUSE) && + (lp_abil & ADVERTISE_1000XPAUSE)) { + status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX; + } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) && + (lp_abil & ADVERTISE_1000XPSE_ASYM)) { + status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ? + MLO_PAUSE_TX : 0; + status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ? + MLO_PAUSE_RX : 0; + } else { + status->pause = MLO_PAUSE_NONE; + } +} + +static int sparx5_get_dev2g5_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + u32 portno = port->portno; + u16 lp_adv, ld_adv; + u32 value; + + /* Get PCS Link down sticky */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno)); + status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value); + if (status->link_down) /* Clear the sticky */ + spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno)); + + /* Get both current Link and Sync status */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno)); + status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) && + DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value); + + if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX) + status->speed = SPEED_1000; + else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX) + status->speed = SPEED_2500; + + status->duplex = DUPLEX_FULL; + + /* Get PCS ANEG status register */ + value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno)); + + /* Aneg complete provides more information */ + if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) { + lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value); + if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) { + decode_sgmii_word(lp_adv, status); + } else { + value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno)); + ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value); + decode_cl37_word(lp_adv, ld_adv, status); + } + } + return 0; +} + +static int sparx5_get_sfi_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + bool high_speed_dev = sparx5_is_baser(port->conf.portmode); + u32 portno = port->portno; + u32 value, dev, tinst; + void __iomem *inst; + + if (!high_speed_dev) { + netdev_err(port->ndev, "error: low speed and SFI mode\n"); + return -EINVAL; + } + + dev = sparx5_to_high_dev(portno); + tinst = sparx5_port_dev_index(portno); + inst = spx5_inst_get(sparx5, dev, tinst); + + value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) { + /* The link is or has been down. Clear the sticky bit */ + status->link_down = 1; + spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0)); + } + status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY); + status->duplex = DUPLEX_FULL; + if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER) + status->speed = SPEED_5000; + else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER) + status->speed = SPEED_10000; + else + status->speed = SPEED_25000; + + return 0; +} + +/* Get link status of 1000Base-X/in-band and SFI ports. + */ +int sparx5_get_port_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status) +{ + memset(status, 0, sizeof(*status)); + status->speed = port->conf.speed; + if (port->conf.power_down) { + status->link = false; + return 0; + } + switch (port->conf.portmode) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return sparx5_get_dev2g5_status(sparx5, port, status); + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_25GBASER: + return sparx5_get_sfi_status(sparx5, port, status); + case PHY_INTERFACE_MODE_NA: + return 0; + default: + netdev_err(port->ndev, "Status not supported"); + return -ENODEV; + } + return 0; +} + +static int sparx5_port_error(struct sparx5_port *port, + struct sparx5_port_config *conf, + enum port_error errtype) +{ + switch (errtype) { + case SPX5_PERR_SPEED: + netdev_err(port->ndev, + "Interface does not support speed: %u: for %s\n", + conf->speed, phy_modes(conf->portmode)); + break; + case SPX5_PERR_IFTYPE: + netdev_err(port->ndev, + "Switch port does not support interface type: %s\n", + phy_modes(conf->portmode)); + break; + default: + netdev_err(port->ndev, + "Interface configuration error\n"); + } + + return -EINVAL; +} + +static int sparx5_port_verify_speed(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + if ((sparx5_port_is_2g5(port->portno) && + conf->speed > SPEED_2500) || + (sparx5_port_is_5g(port->portno) && + conf->speed > SPEED_5000) || + (sparx5_port_is_10g(port->portno) && + conf->speed > SPEED_10000)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + + switch (conf->portmode) { + case PHY_INTERFACE_MODE_NA: + return -EINVAL; + case PHY_INTERFACE_MODE_1000BASEX: + if (conf->speed != SPEED_1000 || + sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + if (sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + break; + case PHY_INTERFACE_MODE_2500BASEX: + if (conf->speed != SPEED_2500 || + sparx5_port_is_2g5(port->portno)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + case PHY_INTERFACE_MODE_QSGMII: + if (port->portno > 47) + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + fallthrough; + case PHY_INTERFACE_MODE_SGMII: + if (conf->speed != SPEED_1000 && + conf->speed != SPEED_100 && + conf->speed != SPEED_10 && + conf->speed != SPEED_2500) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + case PHY_INTERFACE_MODE_5GBASER: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_25GBASER: + if ((conf->speed != SPEED_5000 && + conf->speed != SPEED_10000 && + conf->speed != SPEED_25000)) + return sparx5_port_error(port, conf, SPX5_PERR_SPEED); + break; + default: + return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE); + } + return 0; +} + +static bool sparx5_dev_change(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + return sparx5_is_baser(port->conf.portmode) ^ + sparx5_is_baser(conf->portmode); +} + +static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno) +{ + u32 value, resource, prio, delay_cnt = 0; + bool poll_src = true; + char *mem = ""; + + /* Resource == 0: Memory tracked per source (SRC-MEM) + * Resource == 1: Frame references tracked per source (SRC-REF) + * Resource == 2: Memory tracked per destination (DST-MEM) + * Resource == 3: Frame references tracked per destination. (DST-REF) + */ + while (1) { + bool empty = true; + + for (resource = 0; resource < (poll_src ? 2 : 1); resource++) { + u32 base; + + base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno; + for (prio = 0; prio < SPX5_PRIOS; prio++) { + value = spx5_rd(sparx5, + QRES_RES_STAT(base + prio)); + if (value) { + mem = resource == 0 ? + "DST-MEM" : "SRC-MEM"; + empty = false; + } + } + } + + if (empty) + break; + + if (delay_cnt++ == 2000) { + dev_err(sparx5->dev, + "Flush timeout port %u. %s queue not empty\n", + portno, mem); + return -EINVAL; + } + + usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US); + } + return 0; +} + +static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev) +{ + u32 tinst = high_spd_dev ? + sparx5_port_dev_index(port->portno) : port->portno; + u32 dev = high_spd_dev ? + sparx5_to_high_dev(port->portno) : TARGET_DEV2G5; + void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst); + u32 spd = port->conf.speed; + u32 spd_prm; + int err; + + if (high_spd_dev) { + /* 1: Reset the PCS Rx clock domain */ + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST, + DEV10G_DEV_RST_CTRL_PCS_RX_RST, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + /* 2: Disable MAC frame reception */ + spx5_inst_rmw(0, + DEV10G_MAC_ENA_CFG_RX_ENA, + devinst, + DEV10G_MAC_ENA_CFG(0)); + } else { + /* 1: Reset the PCS Rx clock domain */ + spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + devinst, + DEV2G5_DEV_RST_CTRL(0)); + /* 2: Disable MAC frame reception */ + spx5_inst_rmw(0, + DEV2G5_MAC_ENA_CFG_RX_ENA, + devinst, + DEV2G5_MAC_ENA_CFG(0)); + } + /* 3: Disable traffic being sent to or from switch port->portno */ + spx5_rmw(0, + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); + + /* 4: Disable dequeuing from the egress queues */ + spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS, + HSCH_PORT_MODE_DEQUEUE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* 5: Disable Flowcontrol */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1), + QSYS_PAUSE_CFG_PAUSE_STOP, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10; + /* 6: Wait while the last frame is exiting the queues */ + usleep_range(8 * spd_prm, 10 * spd_prm); + + /* 7: Flush the queues accociated with the port->portno */ + spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) | + HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) | + HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) | + HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1), + HSCH_FLUSH_CTRL_FLUSH_PORT | + HSCH_FLUSH_CTRL_FLUSH_DST | + HSCH_FLUSH_CTRL_FLUSH_SRC | + HSCH_FLUSH_CTRL_FLUSH_ENA, + sparx5, + HSCH_FLUSH_CTRL); + + /* 8: Enable dequeuing from the egress queues */ + spx5_rmw(0, + HSCH_PORT_MODE_DEQUEUE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* 9: Wait until flushing is complete */ + err = sparx5_port_flush_poll(sparx5, port->portno); + if (err) + return err; + + /* 10: Reset the MAC clock domain */ + if (high_spd_dev) { + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) | + DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) | + DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1), + DEV10G_DEV_RST_CTRL_PCS_TX_RST | + DEV10G_DEV_RST_CTRL_MAC_RX_RST | + DEV10G_DEV_RST_CTRL_MAC_TX_RST, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + } else { + spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST, + devinst, + DEV2G5_DEV_RST_CTRL(0)); + } + /* 11: Clear flushing */ + spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) | + HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0), + HSCH_FLUSH_CTRL_FLUSH_PORT | + HSCH_FLUSH_CTRL_FLUSH_ENA, + sparx5, + HSCH_FLUSH_CTRL); + + if (high_spd_dev) { + u32 pcs = sparx5_to_pcs_dev(port->portno); + void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst); + + /* 12: Disable 5G/10G/25 BaseR PCS */ + spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0), + PCS10G_BR_PCS_CFG_PCS_ENA, + pcsinst, + PCS10G_BR_PCS_CFG(0)); + + if (sparx5_port_is_25g(port->portno)) + /* Disable 25G PCS */ + spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0), + DEV25G_PCS25G_CFG_PCS25G_ENA, + sparx5, + DEV25G_PCS25G_CFG(tinst)); + } else { + /* 12: Disable 1G PCS */ + spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0), + DEV2G5_PCS1G_CFG_PCS_ENA, + sparx5, + DEV2G5_PCS1G_CFG(port->portno)); + } + + /* The port is now flushed and disabled */ + return 0; +} + +static int sparx5_port_fifo_sz(struct sparx5 *sparx5, + u32 portno, u32 speed) +{ + u32 sys_clk = sparx5_clk_period(sparx5->coreclock); + const u32 taxi_dist[SPX5_PORTS_ALL] = { + 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10, + 4, 4, 4, 4, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 11, 12, 13, 14, 15, 16, 17, 18, + 4, 6, 8, 4, 6, 8, 6, 8, + 2, 2, 2, 2, 2, 2, 2, 4, 2 + }; + u32 mac_per = 6400, tmp1, tmp2, tmp3; + u32 fifo_width = 16; + u32 mac_width = 8; + u32 addition = 0; + + switch (speed) { + case SPEED_25000: + return 0; + case SPEED_10000: + mac_per = 6400; + mac_width = 8; + addition = 1; + break; + case SPEED_5000: + mac_per = 12800; + mac_width = 8; + addition = 0; + break; + case SPEED_2500: + mac_per = 3200; + mac_width = 1; + addition = 0; + break; + case SPEED_1000: + mac_per = 8000; + mac_width = 1; + addition = 0; + break; + case SPEED_100: + case SPEED_10: + return 1; + default: + break; + } + + tmp1 = 1000 * mac_width / fifo_width; + tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000) + * sys_clk / mac_per); + tmp3 = tmp1 * tmp2 / 1000; + return (tmp3 + 2000 + 999) / 1000 + addition; +} + +/* Configure port muxing: + * QSGMII: 4x2G5 devices + */ +static int sparx5_port_mux_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 portno = port->portno; + u32 inst; + + if (port->conf.portmode == conf->portmode) + return 0; /* Nothing to do */ + + switch (conf->portmode) { + case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */ + inst = (portno - portno % 4) / 4; + spx5_rmw(BIT(inst), + BIT(inst), + sparx5, + PORT_CONF_QSGMII_ENA); + + if ((portno / 4 % 2) == 0) { + /* Affects d0-d3,d8-d11..d40-d43 */ + spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) | + PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) | + PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1), + PORT_CONF_USGMII_CFG_BYPASS_SCRAM | + PORT_CONF_USGMII_CFG_BYPASS_DESCRAM | + PORT_CONF_USGMII_CFG_QUAD_MODE, + sparx5, + PORT_CONF_USGMII_CFG((portno / 8))); + } + break; + default: + break; + } + return 0; +} + +static int sparx5_port_max_tags_set(struct sparx5 *sparx5, + struct sparx5_port *port) +{ + enum sparx5_port_max_tags max_tags = port->max_vlan_tags; + int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 : + max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0; + bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO; + enum sparx5_vlan_port_type vlan_type = port->vlan_type; + bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE; + u32 dev = sparx5_to_high_dev(port->portno); + u32 tinst = sparx5_port_dev_index(port->portno); + void __iomem *inst = spx5_inst_get(sparx5, dev, tinst); + u32 etype; + + etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ? + port->custom_etype : + vlan_type == SPX5_VLAN_PORT_TYPE_C ? + SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S); + + spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) | + DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) | + DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) | + DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag), + sparx5, + DEV2G5_MAC_TAGS_CFG(port->portno)); + + if (sparx5_port_is_2g5(port->portno)) + return 0; + + spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) | + DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag), + DEV10G_MAC_TAGS_CFG_TAG_ID | + DEV10G_MAC_TAGS_CFG_TAG_ENA, + inst, + DEV10G_MAC_TAGS_CFG(0, 0)); + + spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct), + DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, + inst, + DEV10G_MAC_NUM_TAGS_CFG(0)); + + spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag), + DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, + inst, + DEV10G_MAC_MAXLEN_CFG(0)); + return 0; +} + +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) +{ + u32 clk_period_ps = 1600; /* 625Mhz for now */ + u32 urg = 672000; + + switch (speed) { + case SPEED_10: + case SPEED_100: + case SPEED_1000: + urg = 672000; + break; + case SPEED_2500: + urg = 270000; + break; + case SPEED_5000: + urg = 135000; + break; + case SPEED_10000: + urg = 67200; + break; + case SPEED_25000: + urg = 27000; + break; + } + return urg / clk_period_ps - 1; +} + +static u16 sparx5_wm_enc(u16 value) +{ + if (value >= 2048) + return 2048 + value / 16; + + return value; +} + +static int sparx5_port_fc_setup(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0; + u32 pause_stop = 0xFFF - 1; /* FC gen disabled */ + + if (conf->pause & MLO_PAUSE_TX) + pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN / + SPX5_BUFFER_CELL_SZ)); + + /* Set HDX flowcontrol */ + spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF), + DSM_MAC_CFG_HDX_BACKPREASSURE, + sparx5, + DSM_MAC_CFG(port->portno)); + + /* Obey flowcontrol */ + spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey), + DSM_RX_PAUSE_CFG_RX_PAUSE_EN, + sparx5, + DSM_RX_PAUSE_CFG(port->portno)); + + /* Disable forward pressure */ + spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey), + QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, + sparx5, + QSYS_FWD_PRESSURE(port->portno)); + + /* Generate pause frames */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop), + QSYS_PAUSE_CFG_PAUSE_STOP, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + return 0; +} + +static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf) +{ + if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */ + return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL); + else + return 1; /* Enable SGMII Aneg */ +} + +int sparx5_serdes_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + int portmode, err, speed = conf->speed; + + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII && + ((port->portno % 4) != 0)) { + return 0; + } + if (sparx5_is_baser(conf->portmode)) { + if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) + speed = SPEED_25000; + else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER) + speed = SPEED_10000; + else + speed = SPEED_5000; + } + + err = phy_set_media(port->serdes, conf->media); + if (err) + return err; + if (speed > 0) { + err = phy_set_speed(port->serdes, speed); + if (err) + return err; + } + if (conf->serdes_reset) { + err = phy_reset(port->serdes); + if (err) + return err; + } + + /* Configure SerDes with port parameters + * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G + */ + portmode = conf->portmode; + if (sparx5_is_baser(conf->portmode)) + portmode = PHY_INTERFACE_MODE_10GBASER; + err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode); + if (err) + return err; + conf->serdes_reset = false; + return err; +} + +static int sparx5_port_pcs_low_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool sgmii = false, inband_aneg = false; + int err; + + if (port->conf.inband) { + if (conf->portmode == PHY_INTERFACE_MODE_SGMII || + conf->portmode == PHY_INTERFACE_MODE_QSGMII) + inband_aneg = true; /* Cisco-SGMII in-band-aneg */ + else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX && + conf->autoneg) + inband_aneg = true; /* Clause-37 in-band-aneg */ + + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return -EINVAL; + } else { + sgmii = true; /* Phy is connected to the MAC */ + } + + /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */ + spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii), + DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, + sparx5, + DEV2G5_PCS1G_MODE_CFG(port->portno)); + + /* Enable PCS */ + spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1), + sparx5, + DEV2G5_PCS1G_CFG(port->portno)); + + if (inband_aneg) { + u16 abil = sparx5_get_aneg_word(conf); + + /* Enable in-band aneg */ + spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) | + DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) | + DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) | + DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1), + sparx5, + DEV2G5_PCS1G_ANEG_CFG(port->portno)); + } else { + spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno)); + } + + /* Take PCS out of reset */ + spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_PCS_TX_RST | + DEV2G5_DEV_RST_CTRL_PCS_RX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + + return 0; +} + +static int sparx5_port_pcs_high_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0; + u32 pix = sparx5_port_dev_index(port->portno); + u32 dev = sparx5_to_high_dev(port->portno); + u32 pcs = sparx5_to_pcs_dev(port->portno); + void __iomem *devinst; + void __iomem *pcsinst; + int err; + + devinst = spx5_inst_get(sparx5, dev, pix); + pcsinst = spx5_inst_get(sparx5, pcs, pix); + + /* SFI : No in-band-aneg. Speeds 5G/10G/25G */ + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return -EINVAL; + if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) { + /* Enable PCS for 25G device, speed 25G */ + spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1), + DEV25G_PCS25G_CFG_PCS25G_ENA, + sparx5, + DEV25G_PCS25G_CFG(pix)); + } else { + /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */ + spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1), + PCS10G_BR_PCS_CFG_PCS_ENA, + pcsinst, + PCS10G_BR_PCS_CFG(0)); + } + + /* Enable 5G/10G/25G MAC module */ + spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV10G_MAC_ENA_CFG_TX_ENA_SET(1), + devinst, + DEV10G_MAC_ENA_CFG(0)); + + /* Take the device out of reset */ + spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) | + DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd), + DEV10G_DEV_RST_CTRL_PCS_RX_RST | + DEV10G_DEV_RST_CTRL_PCS_TX_RST | + DEV10G_DEV_RST_CTRL_MAC_RX_RST | + DEV10G_DEV_RST_CTRL_MAC_TX_RST | + DEV10G_DEV_RST_CTRL_SPEED_SEL, + devinst, + DEV10G_DEV_RST_CTRL(0)); + + return 0; +} + +/* Switch between 1G/2500 and 5G/10G/25G devices */ +static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd) +{ + int bt_indx = BIT(sparx5_port_dev_index(port)); + + if (sparx5_port_is_5g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV5G_MODES); + } else if (sparx5_port_is_10g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV10G_MODES); + } else if (sparx5_port_is_25g(port)) { + spx5_rmw(hsd ? 0 : bt_indx, + bt_indx, + sparx5, + PORT_CONF_DEV25G_MODES); + } +} + +/* Configure speed/duplex dependent registers */ +static int sparx5_port_config_low_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2; + bool fdx = conf->duplex == DUPLEX_FULL; + int spd = conf->speed; + + clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2; + gig_mode = spd == SPEED_1000 || spd == SPEED_2500; + tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5; + hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2; + hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1; + + /* GIG/FDX mode */ + spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) | + DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx), + DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA | + DEV2G5_MAC_MODE_CFG_FDX_ENA, + sparx5, + DEV2G5_MAC_MODE_CFG(port->portno)); + + /* Set MAC IFG Gaps */ + spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) | + DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) | + DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2), + sparx5, + DEV2G5_MAC_IFG_CFG(port->portno)); + + /* Disabling frame aging when in HDX (due to HDX issue) */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(port->portno)); + + /* Enable MAC module */ + spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA | + DEV2G5_MAC_ENA_CFG_TX_ENA, + sparx5, + DEV2G5_MAC_ENA_CFG(port->portno)); + + /* Select speed and take MAC out of reset */ + spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_SPEED_SEL | + DEV2G5_DEV_RST_CTRL_MAC_TX_RST | + DEV2G5_DEV_RST_CTRL_MAC_RX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + + return 0; +} + +int sparx5_port_pcs_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) + +{ + bool high_speed_dev = sparx5_is_baser(conf->portmode); + int err; + + if (sparx5_dev_change(sparx5, port, conf)) { + /* switch device */ + sparx5_dev_switch(sparx5, port->portno, high_speed_dev); + + /* Disable the not-in-use device */ + err = sparx5_port_disable(sparx5, port, !high_speed_dev); + if (err) + return err; + } + /* Disable the port before re-configuring */ + err = sparx5_port_disable(sparx5, port, high_speed_dev); + if (err) + return -EINVAL; + + if (high_speed_dev) + err = sparx5_port_pcs_high_set(sparx5, port, conf); + else + err = sparx5_port_pcs_low_set(sparx5, port, conf); + + if (err) + return -EINVAL; + + if (port->conf.inband) { + /* Enable/disable 1G counters in ASM */ + spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev), + ASM_PORT_CFG_CSC_STAT_DIS, + sparx5, + ASM_PORT_CFG(port->portno)); + + /* Enable/disable 1G counters in DSM */ + spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev), + DSM_BUF_CFG_CSC_STAT_DIS, + sparx5, + DSM_BUF_CFG(port->portno)); + } + + port->conf = *conf; + + return 0; +} + +int sparx5_port_config(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + bool high_speed_dev = sparx5_is_baser(conf->portmode); + int err, urgency, stop_wm; + + err = sparx5_port_verify_speed(sparx5, port, conf); + if (err) + return err; + + /* high speed device is already configured */ + if (!high_speed_dev) + sparx5_port_config_low_set(sparx5, port, conf); + + /* Configure flow control */ + err = sparx5_port_fc_setup(sparx5, port, conf); + if (err) + return err; + + /* Set the DSM stop watermark */ + stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed); + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, conf->speed); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); + + /* Save the new values */ + port->conf = *conf; + + return 0; +} + +/* Initialize port config to default */ +int sparx5_port_init(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf) +{ + u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); + u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ)); + u32 devhigh = sparx5_to_high_dev(port->portno); + u32 pix = sparx5_port_dev_index(port->portno); + u32 pcs = sparx5_to_pcs_dev(port->portno); + bool sd_pol = port->signd_active_high; + bool sd_sel = !port->signd_internal; + bool sd_ena = port->signd_enable; + u32 pause_stop = 0xFFF - 1; /* FC generate disabled */ + void __iomem *devinst; + void __iomem *pcsinst; + int err; + + devinst = spx5_inst_get(sparx5, devhigh, pix); + pcsinst = spx5_inst_get(sparx5, pcs, pix); + + /* Set the mux port mode */ + err = sparx5_port_mux_set(sparx5, port, conf); + if (err) + return err; + + /* Configure MAC vlan awareness */ + err = sparx5_port_max_tags_set(sparx5, port); + if (err) + return err; + + /* Set Max Length */ + spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), + DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, + sparx5, + DEV2G5_MAC_MAXLEN_CFG(port->portno)); + + /* 1G/2G5: Signal Detect configuration */ + spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) | + DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) | + DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena), + sparx5, + DEV2G5_PCS1G_SD_CFG(port->portno)); + + /* Set Pause WM hysteresis */ + spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) | + QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) | + QSYS_PAUSE_CFG_PAUSE_ENA_SET(1), + QSYS_PAUSE_CFG_PAUSE_START | + QSYS_PAUSE_CFG_PAUSE_STOP | + QSYS_PAUSE_CFG_PAUSE_ENA, + sparx5, + QSYS_PAUSE_CFG(port->portno)); + + /* Port ATOP. Frames are tail dropped when this WM is hit */ + spx5_wr(QSYS_ATOP_ATOP_SET(atop), + sparx5, + QSYS_ATOP(port->portno)); + + /* Discard pause frame 01-80-C2-00-00-01 */ + spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno)); + + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII || + conf->portmode == PHY_INTERFACE_MODE_SGMII) { + err = sparx5_serdes_set(sparx5, port, conf); + if (err) + return err; + + if (!sparx5_port_is_2g5(port->portno)) + /* Enable shadow device */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(port->portno)); + + sparx5_dev_switch(sparx5, port->portno, false); + } + if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) { + // All ports must be PCS enabled in QSGMII mode + spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0), + DEV2G5_DEV_RST_CTRL_PCS_TX_RST, + sparx5, + DEV2G5_DEV_RST_CTRL(port->portno)); + } + /* Default IFGs for 1G */ + spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) | + DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) | + DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0), + sparx5, + DEV2G5_MAC_IFG_CFG(port->portno)); + + if (sparx5_port_is_2g5(port->portno)) + return 0; /* Low speed device only - return */ + + /* Now setup the high speed device */ + if (conf->portmode == PHY_INTERFACE_MODE_NA) + conf->portmode = PHY_INTERFACE_MODE_10GBASER; + + if (sparx5_is_baser(conf->portmode)) + sparx5_dev_switch(sparx5, port->portno, true); + + /* Set Max Length */ + spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), + DEV10G_MAC_MAXLEN_CFG_MAX_LEN, + devinst, + DEV10G_MAC_ENA_CFG(0)); + + /* Handle Signal Detect in 10G PCS */ + spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) | + PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) | + PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena), + pcsinst, + PCS10G_BR_PCS_SD_CFG(0)); + + if (sparx5_port_is_25g(port->portno)) { + /* Handle Signal Detect in 25G PCS */ + spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) | + DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) | + DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena), + sparx5, + DEV25G_PCS25G_SD_CFG(pix)); + } + + return 0; +} + +void sparx5_port_enable(struct sparx5_port *port, bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + + /* Enable port for frame transfer? */ + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable), + QFWD_SWITCH_PORT_MODE_PORT_ENA, + sparx5, + QFWD_SWITCH_PORT_MODE(port->portno)); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h new file mode 100644 index 000000000..2f8043eac --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_PORT_H__ +#define __SPARX5_PORT_H__ + +#include "sparx5_main.h" + +static inline bool sparx5_port_is_2g5(int portno) +{ + return portno >= 16 && portno <= 47; +} + +static inline bool sparx5_port_is_5g(int portno) +{ + return portno <= 11 || portno == 64; +} + +static inline bool sparx5_port_is_10g(int portno) +{ + return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55); +} + +static inline bool sparx5_port_is_25g(int portno) +{ + return portno >= 56 && portno <= 63; +} + +static inline u32 sparx5_to_high_dev(int port) +{ + if (sparx5_port_is_5g(port)) + return TARGET_DEV5G; + if (sparx5_port_is_10g(port)) + return TARGET_DEV10G; + return TARGET_DEV25G; +} + +static inline u32 sparx5_to_pcs_dev(int port) +{ + if (sparx5_port_is_5g(port)) + return TARGET_PCS5G_BR; + if (sparx5_port_is_10g(port)) + return TARGET_PCS10G_BR; + return TARGET_PCS25G_BR; +} + +static inline int sparx5_port_dev_index(int port) +{ + if (sparx5_port_is_2g5(port)) + return port; + if (sparx5_port_is_5g(port)) + return (port <= 11 ? port : 12); + if (sparx5_port_is_10g(port)) + return (port >= 12 && port <= 15) ? + port - 12 : port - 44; + return (port - 56); +} + +int sparx5_port_init(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +int sparx5_port_config(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +int sparx5_port_pcs_set(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_config *conf); + +int sparx5_serdes_set(struct sparx5 *sparx5, + struct sparx5_port *spx5_port, + struct sparx5_port_config *conf); + +struct sparx5_port_status { + bool link; + bool link_down; + int speed; + bool an_complete; + int duplex; + int pause; +}; + +int sparx5_get_port_status(struct sparx5 *sparx5, + struct sparx5_port *port, + struct sparx5_port_status *status); + +void sparx5_port_enable(struct sparx5_port *port, bool enable); +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed); + +#endif /* __SPARX5_PORT_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c new file mode 100644 index 000000000..69e76634f --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ +#include <linux/ptp_classify.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +#define SPARX5_MAX_PTP_ID 512 + +#define TOD_ACC_PIN 0x4 + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_TOD +}; + +static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5) +{ + /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625) + * 1.99609375000(500), 3.99218750000(250) as reference + * The value is calculated as following: + * (1/1000000)/((2^-59)/X) + */ + + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 2301339409586; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 1150669704793; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 920535763834; + break; + default: + WARN(1, "Invalid core clock"); + break; + } + + return res; +} + +static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5) +{ + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 0x1FF0000000000000; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 0x0FF8000000000000; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 0x0CC6666666666666; + break; + default: + WARN(1, "Invalid core clock"); + break; + } + + return res; +} + +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct hwtstamp_config cfg; + struct sparx5_phc *phc; + + /* For now don't allow to run ptp on ports that are part of a bridge, + * because in case of transparent clock the HW will still forward the + * frames, so there would be duplicate frames + */ + + if (test_bit(port->portno, sparx5->bridge_mask)) + return -EINVAL; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = IFH_REW_OP_NOOP; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* Commit back the result & save it */ + mutex_lock(&sparx5->ptp_lock); + phc = &sparx5->phc[SPARX5_PHC_PORT]; + memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&sparx5->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, + sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; +} + +static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb, + u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset) +{ + struct ptp_header *header; + u8 msgtype; + int type; + + if (port->ptp_cmd == IFH_REW_OP_NOOP) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + header = ptp_parse_header(skb, type); + if (!header) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + *pdu_w16_offset = 7; + if (type & PTP_CLASS_L2) + *pdu_type = IFH_PDU_TYPE_PTP; + if (type & PTP_CLASS_IPV4) + *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP; + if (type & PTP_CLASS_IPV6) + *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP; + + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *rew_op = IFH_REW_OP_TWO_STEP_PTP; + return; + } + + /* If it is sync and run 1 step then set the correct operation, + * otherwise run as 2 step + */ + msgtype = ptp_get_msgtype(header, type); + if ((msgtype & 0xf) == 0) { + *rew_op = IFH_REW_OP_ONE_STEP_PTP; + return; + } + + *rew_op = IFH_REW_OP_TWO_STEP_PTP; +} + +static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port) +{ + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT, + jiffies) + break; + + __skb_unlink(skb, &port->tx_skbs); + dev_kfree_skb_any(skb); + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); +} + +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + u8 rew_op, pdu_type, pdu_w16_offset; + unsigned long flags; + + sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset); + SPARX5_SKB_CB(skb)->rew_op = rew_op; + SPARX5_SKB_CB(skb)->pdu_type = pdu_type; + SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset; + + if (rew_op != IFH_REW_OP_TWO_STEP_PTP) + return 0; + + sparx5_ptp_txtstamp_old_release(port); + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) { + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_queue_tail(&port->tx_skbs, skb); + SPARX5_SKB_CB(skb)->ts_id = port->ts_id; + SPARX5_SKB_CB(skb)->jiffies = jiffies; + + sparx5->ptp_skbs++; + port->ts_id++; + if (port->ts_id == SPARX5_MAX_PTP_ID) + port->ts_id = 0; + + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + + return 0; +} + +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + port->ts_id--; + sparx5->ptp_skbs--; + skb_unlink(skb, &port->tx_skbs); + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); +} + +static void sparx5_get_hwtimestamp(struct sparx5 *sparx5, + struct timespec64 *ts, + u32 nsec) +{ + /* Read current PTP time to get seconds */ + unsigned long flags; + u32 curr_nsec; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + ts->tv_nsec = nsec; + + /* Sec has incremented since the ts was registered */ + if (curr_nsec < nsec) + ts->tv_sec--; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); +} + +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args) +{ + int budget = SPARX5_MAX_PTP_ID; + struct sparx5 *sparx5 = args; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + struct sparx5_port *port; + struct timespec64 ts; + unsigned long flags; + u32 val, id, txport; + u32 delay; + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retrieved */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL); + + if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX)) + continue; + + /* Retrieve the ts Tx port */ + txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); + + /* Retrieve its associated skb */ + port = sparx5->ports[txport]; + + /* Retrieve the delay */ + delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); + + /* Get next timestamp from fifo, which needs to be the + * rx timestamp which represents the id of the frame + */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retried */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + /* Read RX timestamping to get the ID */ + id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + id <<= 8; + id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS); + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (SPARX5_SKB_CB(skb)->ts_id != id) + continue; + + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + /* Next ts */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + if (WARN_ON(!skb_match)) + continue; + + spin_lock(&sparx5->ptp_ts_id_lock); + sparx5->ptp_skbs--; + spin_unlock(&sparx5->ptp_ts_id_lock); + + /* Get the h/w timestamp */ + sparx5_get_hwtimestamp(sparx5, &ts, delay); + + /* Set the timestamp into the skb */ + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb_match, &shhwtstamps); + + dev_kfree_skb_any(skb_match); + } + + return IRQ_HANDLED; +} + +static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + bool neg_adj = 0; + u64 tod_inc; + u64 ref; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + tod_inc = sparx5_ptp_get_nominal_value(sparx5); + + /* The multiplication is split in 2 separate additions because of + * overflow issues. If scaled_ppm with 16bit fractional part was bigger + * than 20ppm then we got overflow. + */ + ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16); + ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16; + tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(phc->index, 0)); + spx5_wr((u32)(tod_inc >> 32), sparx5, + PTP_CLK_PER_CFG(phc->index, 1)); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5, + PTP_PTP_DOM_CFG); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + /* Set new value */ + spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), + sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + spx5_wr(lower_32_bits(ts->tv_sec), + sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Apply new values */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + time64_t s; + s64 ns; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s <<= 32; + s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + /* Deal with negative values */ + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} + +static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta), + sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Adjust time with the value of PTP_TOD_NSEC */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + } else { + /* Fall back using sparx5_ptp_settime64 which is not exact */ + struct timespec64 ts; + u64 now; + + sparx5_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + sparx5_ptp_settime64(ptp, &ts); + } + + return 0; +} + +static struct ptp_clock_info sparx5_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "sparx5 ptp", + .max_adj = 200000, + .gettime64 = sparx5_ptp_gettime64, + .settime64 = sparx5_ptp_settime64, + .adjtime = sparx5_ptp_adjtime, + .adjfine = sparx5_ptp_adjfine, +}; + +static int sparx5_ptp_phc_init(struct sparx5 *sparx5, + int index, + struct ptp_clock_info *clock_info) +{ + struct sparx5_phc *phc = &sparx5->phc[index]; + + phc->info = *clock_info; + phc->clock = ptp_clock_register(&phc->info, sparx5->dev); + if (IS_ERR(phc->clock)) + return PTR_ERR(phc->clock); + + phc->index = index; + phc->sparx5 = sparx5; + + /* PTP Rx stamping is always enabled. */ + phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +int sparx5_ptp_init(struct sparx5 *sparx5) +{ + u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5); + struct sparx5_port *port; + int err, i; + + if (!sparx5->ptp) + return 0; + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info); + if (err) + return err; + } + + spin_lock_init(&sparx5->ptp_clock_lock); + spin_lock_init(&sparx5->ptp_ts_id_lock); + mutex_init(&sparx5->ptp_lock); + + /* Disable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG); + + /* Configure the nominal TOD increment per clock cycle */ + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(i, 0)); + spx5_wr((u32)(tod_adj >> 32), sparx5, + PTP_CLK_PER_CFG(i, 1)); + } + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + /* Enable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < SPX5_PORTS; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_head_init(&port->tx_skbs); + } + + return 0; +} + +void sparx5_ptp_deinit(struct sparx5 *sparx5) +{ + struct sparx5_port *port; + int i; + + for (i = 0; i < SPX5_PORTS; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_purge(&port->tx_skbs); + } + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) + ptp_clock_unregister(sparx5->phc[i].clock); +} + +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct sparx5_phc *phc; + struct timespec64 ts; + u64 full_ts_in_ns; + + if (!sparx5->ptp) + return; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + sparx5_ptp_gettime64(&phc->info, &ts); + + if (ts.tv_nsec < timestamp) + ts.tv_sec--; + ts.tv_nsec = timestamp; + full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = full_ts_in_ns; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c new file mode 100644 index 000000000..1e79d0ef0 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/pkt_cls.h> + +#include "sparx5_main.h" +#include "sparx5_qos.h" + +/* Max rates for leak groups */ +static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = { + 1048568, /* 1.049 Gbps */ + 2621420, /* 2.621 Gbps */ + 10485680, /* 10.486 Gbps */ + 26214200 /* 26.214 Gbps */ +}; + +static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT]; + +static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group)); + return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value); +} + +static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group, + u32 leak_time) +{ + spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5, + HSCH_HSCH_TIMER_CFG(layer, group)); +} + +static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group)); + return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value); +} + +static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) + +{ + u32 value; + + value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx)); + return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value); +} + +static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group) +{ + u32 itr, next; + + itr = sparx5_lg_get_first(sparx5, layer, group); + + for (;;) { + next = sparx5_lg_get_next(sparx5, layer, group, itr); + if (itr == next) + return itr; + + itr = next; + } +} + +static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) +{ + return idx == sparx5_lg_get_next(sparx5, layer, group, idx); +} + +static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx) +{ + return idx == sparx5_lg_get_first(sparx5, layer, group); +} + +static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group) +{ + return sparx5_lg_get_leak_time(sparx5, layer, group) == 0; +} + +static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group) +{ + if (sparx5_lg_is_empty(sparx5, layer, group)) + return false; + + return sparx5_lg_get_first(sparx5, layer, group) == + sparx5_lg_get_last(sparx5, layer, group); +} + +static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group, + u32 leak_time) +{ + sparx5_lg_set_leak_time(sparx5, layer, group, leak_time); +} + +static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group) +{ + sparx5_lg_set_leak_time(sparx5, layer, group, 0); +} + +static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer, + u32 idx, u32 *group) +{ + u32 itr, next; + int i; + + for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { + if (sparx5_lg_is_empty(sparx5, layer, i)) + continue; + + itr = sparx5_lg_get_first(sparx5, layer, i); + + for (;;) { + next = sparx5_lg_get_next(sparx5, layer, i, itr); + + if (itr == idx) { + *group = i; + return 0; /* Found it */ + } + if (itr == next) + break; /* Was not found */ + + itr = next; + } + } + + return -1; +} + +static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group) +{ + struct sparx5_layer *l = &layers[layer]; + struct sparx5_lg *lg; + u32 i; + + for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { + lg = &l->leak_groups[i]; + if (rate <= lg->max_rate) { + *group = i; + return 0; + } + } + + return -1; +} + +static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group, + u32 idx, u32 *prev, u32 *next, u32 *first) +{ + u32 itr; + + *first = sparx5_lg_get_first(sparx5, layer, group); + *prev = *first; + *next = *first; + itr = *first; + + for (;;) { + *next = sparx5_lg_get_next(sparx5, layer, group, itr); + + if (itr == idx) + return 0; /* Found it */ + + if (itr == *next) + return -1; /* Was not found */ + + *prev = itr; + itr = *next; + } + + return -1; +} + +static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group, + u32 se_first, u32 idx, u32 idx_next, bool empty) +{ + u32 leak_time = layers[layer].leak_groups[group].leak_time; + + /* Stop leaking */ + sparx5_lg_disable(sparx5, layer, group); + + if (empty) + return 0; + + /* Select layer */ + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), + HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); + + /* Link elements */ + spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5, + HSCH_SE_CONNECT(idx)); + + /* Set the first element. */ + spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first), + HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5, + HSCH_HSCH_LEAK_CFG(layer, group)); + + /* Start leaking */ + sparx5_lg_enable(sparx5, layer, group, leak_time); + + return 0; +} + +static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx) +{ + u32 first, next, prev; + bool empty = false; + + /* idx *must* be present in the leak group */ + WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next, + &first) < 0); + + if (sparx5_lg_is_singular(sparx5, layer, group)) { + empty = true; + } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) { + /* idx is removed, prev is now last */ + idx = prev; + next = prev; + } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) { + /* idx is removed and points to itself, first is next */ + first = next; + next = idx; + } else { + /* Next is not touched */ + idx = prev; + } + + return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next, + empty); +} + +static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group, + u32 idx) +{ + u32 first, next, old_group; + + pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group, + idx); + + /* Is this SE already shaping ? */ + if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) { + if (old_group != new_group) { + /* Delete from old group */ + sparx5_lg_del(sparx5, layer, old_group, idx); + } else { + /* Nothing to do here */ + return 0; + } + } + + /* We always add to head of the list */ + first = idx; + + if (sparx5_lg_is_empty(sparx5, layer, new_group)) + next = idx; + else + next = sparx5_lg_get_first(sparx5, layer, new_group); + + return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next, + false); +} + +static int sparx5_shaper_conf_set(struct sparx5_port *port, + const struct sparx5_shaper *sh, u32 layer, + u32 idx, u32 group) +{ + int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32); + struct sparx5 *sparx5 = port->sparx5; + + if (!sh->rate && !sh->burst) + sparx5_lg_action = &sparx5_lg_del; + else + sparx5_lg_action = &sparx5_lg_add; + + /* Select layer */ + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), + HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); + + /* Set frame mode */ + spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE, + sparx5, HSCH_SE_CFG(idx)); + + /* Set committed rate and burst */ + spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) | + HSCH_CIR_CFG_CIR_BURST_SET(sh->burst), + sparx5, HSCH_CIR_CFG(idx)); + + /* This has to be done after the shaper configuration has been set */ + sparx5_lg_action(sparx5, layer, group, idx); + + return 0; +} + +static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight) +{ + return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) - + 1; +} + +static int sparx5_dwrr_conf_set(struct sparx5_port *port, + struct sparx5_dwrr *dwrr) +{ + int i; + + spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) | + HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno), + HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX, + port->sparx5, HSCH_HSCH_CFG_CFG); + + /* Number of *lower* indexes that are arbitrated dwrr */ + spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count), + HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5, + HSCH_SE_CFG(port->portno)); + + for (i = 0; i < dwrr->count; i++) { + spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]), + HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5, + HSCH_DWRR_ENTRY(i)); + } + + return 0; +} + +static int sparx5_leak_groups_init(struct sparx5 *sparx5) +{ + struct sparx5_layer *layer; + u32 sys_clk_per_100ps; + struct sparx5_lg *lg; + u32 leak_time_us; + int i, ii; + + sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER); + + for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) { + layer = &layers[i]; + for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) { + lg = &layer->leak_groups[ii]; + lg->max_rate = spx5_hsch_max_group_rate[ii]; + + /* Calculate the leak time in us, to serve a maximum + * rate of 'max_rate' for this group + */ + leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate; + + /* Hardware wants leak time in ns */ + lg->leak_time = 1000 * leak_time_us; + + /* Calculate resolution */ + lg->resolution = 1000 / leak_time_us; + + /* Maximum number of shapers that can be served by + * this leak group + */ + lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps; + + /* Example: + * Wanted bandwidth is 100Mbit: + * + * 100 mbps can be served by leak group zero. + * + * leak_time is 125000 ns. + * resolution is: 8 + * + * cir = 100000 / 8 = 12500 + * leaks_pr_sec = 125000 / 10^9 = 8000 + * bw = 12500 * 8000 = 10^8 (100 Mbit) + */ + + /* Disable by default - this also indicates an empty + * leak group + */ + sparx5_lg_disable(sparx5, i, ii); + } + } + + return 0; +} + +int sparx5_qos_init(struct sparx5 *sparx5) +{ + int ret; + + ret = sparx5_leak_groups_init(sparx5); + if (ret < 0) + return ret; + + return 0; +} + +int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc) +{ + int i; + + if (num_tc != SPX5_PRIOS) { + netdev_err(ndev, "Only %d traffic classes supported\n", + SPX5_PRIOS); + return -EINVAL; + } + + netdev_set_num_tc(ndev, num_tc); + + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(ndev, i, 1, i); + + netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", + ndev->num_tc, ndev->real_num_tx_queues); + + return 0; +} + +int sparx5_tc_mqprio_del(struct net_device *ndev) +{ + netdev_reset_tc(ndev); + + netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", + ndev->num_tc, ndev->real_num_tx_queues); + + return 0; +} + +int sparx5_tc_tbf_add(struct sparx5_port *port, + struct tc_tbf_qopt_offload_replace_params *params, + u32 layer, u32 idx) +{ + struct sparx5_shaper sh = { + .mode = SPX5_SE_MODE_DATARATE, + .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8, + .burst = params->max_size, + }; + struct sparx5_lg *lg; + u32 group; + + /* Find suitable group for this se */ + if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) { + pr_debug("Could not find leak group for se with rate: %d", + sh.rate); + return -EINVAL; + } + + lg = &layers[layer].leak_groups[group]; + + pr_debug("Found matching group (speed: %d)\n", lg->max_rate); + + if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN) + return -EINVAL; + + /* Calculate committed rate and burst */ + sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution); + sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT); + + if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX) + return -EINVAL; + + return sparx5_shaper_conf_set(port, &sh, layer, idx, group); +} + +int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx) +{ + struct sparx5_shaper sh = {0}; + u32 group; + + sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group); + + return sparx5_shaper_conf_set(port, &sh, layer, idx, group); +} + +int sparx5_tc_ets_add(struct sparx5_port *port, + struct tc_ets_qopt_offload_replace_params *params) +{ + struct sparx5_dwrr dwrr = {0}; + /* Minimum weight for each iteration */ + unsigned int w_min = 100; + int i; + + /* Find minimum weight for all dwrr bands */ + for (i = 0; i < SPX5_PRIOS; i++) { + if (params->quanta[i] == 0) + continue; + w_min = min(w_min, params->weights[i]); + } + + for (i = 0; i < SPX5_PRIOS; i++) { + /* Strict band; skip */ + if (params->quanta[i] == 0) + continue; + + dwrr.count++; + + /* On the sparx5, bands with higher indexes are preferred and + * arbitrated strict. Strict bands are put in the lower indexes, + * by tc, so we reverse the bands here. + * + * Also convert the weight to something the hardware + * understands. + */ + dwrr.cost[SPX5_PRIOS - i - 1] = + sparx5_weight_to_hw_cost(w_min, params->weights[i]); + } + + return sparx5_dwrr_conf_set(port, &dwrr); +} + +int sparx5_tc_ets_del(struct sparx5_port *port) +{ + struct sparx5_dwrr dwrr = {0}; + + return sparx5_dwrr_conf_set(port, &dwrr); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h new file mode 100644 index 000000000..ced35033a --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_QOS_H__ +#define __SPARX5_QOS_H__ + +#include <linux/netdevice.h> + +/* Number of Layers */ +#define SPX5_HSCH_LAYER_CNT 3 + +/* Scheduling elements per layer */ +#define SPX5_HSCH_L0_SE_CNT 5040 +#define SPX5_HSCH_L1_SE_CNT 64 +#define SPX5_HSCH_L2_SE_CNT 64 + +/* Calculate Layer 0 Scheduler Element when using normal hierarchy */ +#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue))) + +/* Number of leak groups */ +#define SPX5_HSCH_LEAK_GRP_CNT 4 + +/* Scheduler modes */ +#define SPX5_SE_MODE_LINERATE 0 +#define SPX5_SE_MODE_DATARATE 1 + +/* Rate and burst */ +#define SPX5_SE_RATE_MAX 262143 +#define SPX5_SE_BURST_MAX 127 +#define SPX5_SE_RATE_MIN 1 +#define SPX5_SE_BURST_MIN 1 +#define SPX5_SE_BURST_UNIT 4096 + +/* Dwrr */ +#define SPX5_DWRR_COST_MAX 63 + +struct sparx5_shaper { + u32 mode; + u32 rate; + u32 burst; +}; + +struct sparx5_lg { + u32 max_rate; + u32 resolution; + u32 leak_time; + u32 max_ses; +}; + +struct sparx5_layer { + struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT]; +}; + +struct sparx5_dwrr { + u32 count; /* Number of inputs running dwrr */ + u8 cost[SPX5_PRIOS]; +}; + +int sparx5_qos_init(struct sparx5 *sparx5); + +/* Multi-Queue Priority */ +int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc); +int sparx5_tc_mqprio_del(struct net_device *ndev); + +/* Token Bucket Filter */ +struct tc_tbf_qopt_offload_replace_params; +int sparx5_tc_tbf_add(struct sparx5_port *port, + struct tc_tbf_qopt_offload_replace_params *params, + u32 layer, u32 idx); +int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx); + +/* Enhanced Transmission Selection */ +struct tc_ets_qopt_offload_replace_params; +int sparx5_tc_ets_add(struct sparx5_port *port, + struct tc_ets_qopt_offload_replace_params *params); + +int sparx5_tc_ets_del(struct sparx5_port *port); + +#endif /* __SPARX5_QOS_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c new file mode 100644 index 000000000..4af85d108 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include <linux/if_bridge.h> +#include <net/switchdev.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +static struct workqueue_struct *sparx5_owq; + +struct sparx5_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + struct sparx5 *sparx5; + unsigned long event; +}; + +static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag) +{ + bool should_flood = flood_flag || port->is_mrouter; + int pgid; + + for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++) + sparx5_pgid_update_mask(port, pgid, should_flood); +} + +static void sparx5_port_attr_bridge_flags(struct sparx5_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_MCAST_FLOOD) { + sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD)); + sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD)); + } + + if (flags.mask & BR_FLOOD) + sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD)); + if (flags.mask & BR_BCAST_FLOOD) + sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD)); +} + +static void sparx5_attr_stp_state_set(struct sparx5_port *port, + u8 state) +{ + struct sparx5 *sparx5 = port->sparx5; + + if (!test_bit(port->portno, sparx5->bridge_mask)) { + netdev_err(port->ndev, + "Controlling non-bridged port %d?\n", port->portno); + return; + } + + switch (state) { + case BR_STATE_FORWARDING: + set_bit(port->portno, sparx5->bridge_fwd_mask); + fallthrough; + case BR_STATE_LEARNING: + set_bit(port->portno, sparx5->bridge_lrn_mask); + break; + + default: + /* All other states treated as blocking */ + clear_bit(port->portno, sparx5->bridge_fwd_mask); + clear_bit(port->portno, sparx5->bridge_lrn_mask); + break; + } + + /* apply the bridge_fwd_mask to all the ports */ + sparx5_update_fwd(sparx5); +} + +static void sparx5_port_attr_ageing_set(struct sparx5_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); + + sparx5_set_ageing(port->sparx5, ageing_time); +} + +static void sparx5_port_attr_mrouter_set(struct sparx5_port *port, + struct net_device *orig_dev, + bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_mdb_entry *e; + bool flood_flag; + + if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter)) + return; + + /* Add/del mrouter port on all active mdb entries in HW. + * Don't change entry port mask, since that represents + * ports that actually joined that group. + */ + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry(e, &sparx5->mdb_entries, list) { + if (!test_bit(port->portno, e->port_mask) && + ether_addr_is_ip_mcast(e->addr)) + sparx5_pgid_update_mask(port, e->pgid_idx, enable); + } + mutex_unlock(&sparx5->mdb_lock); + + /* Enable/disable flooding depending on if port is mrouter port + * or if mcast flood is enabled. + */ + port->is_mrouter = enable; + flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD); + sparx5_port_update_mcast_ip_flood(port, flood_flag); +} + +static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct sparx5_port *port = netdev_priv(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + return sparx5_port_attr_pre_bridge_flags(port, + attr->u.brport_flags); + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + sparx5_port_attr_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + sparx5_attr_stp_state_set(port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + sparx5_port_attr_ageing_set(port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + /* Used PVID 1 when default_pvid is 0, to avoid + * collision with non-bridged ports. + */ + if (port->pvid == 0) + port->pvid = 1; + port->vlan_aware = attr->u.vlan_filtering; + sparx5_vlan_port_apply(port->sparx5, port); + break; + case SWITCHDEV_ATTR_ID_PORT_MROUTER: + sparx5_port_attr_mrouter_set(port, + attr->orig_dev, + attr->u.mrouter); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int sparx5_port_bridge_join(struct sparx5_port *port, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct sparx5 *sparx5 = port->sparx5; + struct net_device *ndev = port->ndev; + int err; + + if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) + /* First bridged port */ + sparx5->hw_bridge_dev = bridge; + else + if (sparx5->hw_bridge_dev != bridge) + /* This is adding the port to a second bridge, this is + * unsupported + */ + return -ENODEV; + + set_bit(port->portno, sparx5->bridge_mask); + + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + goto err_switchdev_offload; + + /* Remove standalone port entry */ + sparx5_mact_forget(sparx5, ndev->dev_addr, 0); + + /* Port enters in bridge mode therefor don't need to copy to CPU + * frames for multicast in case the bridge is not requesting them + */ + __dev_mc_unsync(ndev, sparx5_mc_unsync); + + return 0; + +err_switchdev_offload: + clear_bit(port->portno, sparx5->bridge_mask); + return err; +} + +static void sparx5_port_bridge_leave(struct sparx5_port *port, + struct net_device *bridge) +{ + struct sparx5 *sparx5 = port->sparx5; + + switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); + + clear_bit(port->portno, sparx5->bridge_mask); + if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) + sparx5->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before updating the port settings */ + port->vlan_aware = 0; + port->pvid = NULL_VID; + port->vid = NULL_VID; + + /* Forward frames to CPU */ + sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0); + + /* Port enters in host more therefore restore mc list */ + __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); +} + +static int sparx5_port_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct sparx5_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = sparx5_port_bridge_join(port, info->upper_dev, + extack); + else + sparx5_port_bridge_leave(port, info->upper_dev); + + sparx5_vlan_port_apply(port->sparx5, port); + } + + return err; +} + +static int sparx5_port_add_addr(struct net_device *dev, bool up) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + u16 vid = port->pvid; + + if (up) + sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid); + else + sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid); + + return 0; +} + +static int sparx5_netdevice_port_event(struct net_device *dev, + struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + if (!sparx5_netdevice_check(dev)) + return 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + err = sparx5_port_changeupper(dev, ptr); + break; + case NETDEV_PRE_UP: + err = sparx5_port_add_addr(dev, true); + break; + case NETDEV_DOWN: + err = sparx5_port_add_addr(dev, false); + break; + } + + return err; +} + +static int sparx5_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; + + ret = sparx5_netdevice_port_event(dev, nb, event, ptr); + + return notifier_from_errno(ret); +} + +static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work) +{ + struct sparx5_switchdev_event_work *switchdev_work = + container_of(work, struct sparx5_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct switchdev_notifier_fdb_info *fdb_info; + struct sparx5_port *port; + struct sparx5 *sparx5; + bool host_addr; + u16 vid; + + rtnl_lock(); + if (!sparx5_netdevice_check(dev)) { + host_addr = true; + sparx5 = switchdev_work->sparx5; + } else { + host_addr = false; + sparx5 = switchdev_work->sparx5; + port = netdev_priv(dev); + } + + fdb_info = &switchdev_work->fdb_info; + + /* Used PVID 1 when default_pvid is 0, to avoid + * collision with non-bridged ports. + */ + if (fdb_info->vid == 0) + vid = 1; + else + vid = fdb_info->vid; + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (host_addr) + sparx5_add_mact_entry(sparx5, dev, PGID_CPU, + fdb_info->addr, vid); + else + sparx5_add_mact_entry(sparx5, port->ndev, port->portno, + fdb_info->addr, vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + sparx5_del_mact_entry(sparx5, fdb_info->addr, vid); + break; + } + + rtnl_unlock(); + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(dev); +} + +static void sparx5_schedule_work(struct work_struct *work) +{ + queue_work(sparx5_owq, work); +} + +static int sparx5_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct sparx5_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct sparx5 *spx5; + int err; + + spx5 = container_of(nb, struct sparx5, switchdev_nb); + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + sparx5_netdevice_check, + sparx5_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fallthrough; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + switchdev_work->dev = dev; + switchdev_work->event = event; + switchdev_work->sparx5 = spx5; + + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + INIT_WORK(&switchdev_work->work, + sparx5_switchdev_bridge_fdb_event_work); + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(dev); + + sparx5_schedule_work(&switchdev_work->work); + break; + } + + return NOTIFY_DONE; +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int sparx5_handle_port_vlan_add(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_vlan *v) +{ + struct sparx5_port *port = netdev_priv(dev); + + if (netif_is_bridge_master(dev)) { + struct sparx5 *sparx5 = + container_of(nb, struct sparx5, + switchdev_blocking_nb); + + /* Flood broadcast to CPU */ + sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast, + v->vid); + return 0; + } + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + return sparx5_vlan_vid_add(port, v->vid, + v->flags & BRIDGE_VLAN_INFO_PVID, + v->flags & BRIDGE_VLAN_INFO_UNTAGGED); +} + +static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid, + struct sparx5_mdb_entry **entry_out) +{ + struct sparx5_mdb_entry *entry; + u16 pgid_idx; + int err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx); + if (err) { + kfree(entry); + return err; + } + + memcpy(entry->addr, addr, ETH_ALEN); + entry->vid = vid; + entry->pgid_idx = pgid_idx; + + mutex_lock(&sparx5->mdb_lock); + list_add_tail(&entry->list, &sparx5->mdb_entries); + mutex_unlock(&sparx5->mdb_lock); + + *entry_out = entry; + return 0; +} + +static void sparx5_free_mdb_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mdb_entry *entry, *tmp; + + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) { + if ((vid == 0 || entry->vid == vid) && + ether_addr_equal(addr, entry->addr)) { + list_del(&entry->list); + + sparx5_pgid_free(sparx5, entry->pgid_idx); + kfree(entry); + goto out; + } + } + +out: + mutex_unlock(&sparx5->mdb_lock); +} + +static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5, + const unsigned char *addr, + u16 vid) +{ + struct sparx5_mdb_entry *e, *found = NULL; + + mutex_lock(&sparx5->mdb_lock); + list_for_each_entry(e, &sparx5->mdb_entries, list) { + if (ether_addr_equal(e->addr, addr) && e->vid == vid) { + found = e; + goto out; + } + } + +out: + mutex_unlock(&sparx5->mdb_lock); + return found; +} + +static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable) +{ + spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable), + ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5, + ANA_AC_PGID_MISC_CFG(pgid)); +} + +static int sparx5_handle_port_mdb_add(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_mdb *v) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *spx5 = port->sparx5; + struct sparx5_mdb_entry *entry; + bool is_host, is_new; + int err, i; + u16 vid; + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + is_host = netif_is_bridge_master(v->obj.orig_dev); + + /* When VLAN unaware the vlan value is not parsed and we receive vid 0. + * Fall back to bridge vid 1. + */ + if (!br_vlan_enabled(spx5->hw_bridge_dev)) + vid = 1; + else + vid = v->vid; + + is_new = false; + entry = sparx5_mdb_get_entry(spx5, v->addr, vid); + if (!entry) { + err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry); + is_new = true; + if (err) + return err; + } + + mutex_lock(&spx5->mdb_lock); + + /* Add any mrouter ports to the new entry */ + if (is_new && ether_addr_is_ip_mcast(v->addr)) + for (i = 0; i < SPX5_PORTS; i++) + if (spx5->ports[i] && spx5->ports[i]->is_mrouter) + sparx5_pgid_update_mask(spx5->ports[i], + entry->pgid_idx, + true); + + if (is_host && !entry->cpu_copy) { + sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true); + entry->cpu_copy = true; + } else if (!is_host) { + sparx5_pgid_update_mask(port, entry->pgid_idx, true); + set_bit(port->portno, entry->port_mask); + } + mutex_unlock(&spx5->mdb_lock); + + sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid); + + return 0; +} + +static int sparx5_handle_port_mdb_del(struct net_device *dev, + struct notifier_block *nb, + const struct switchdev_obj_port_mdb *v) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *spx5 = port->sparx5; + struct sparx5_mdb_entry *entry; + bool is_host; + u16 vid; + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + is_host = netif_is_bridge_master(v->obj.orig_dev); + + if (!br_vlan_enabled(spx5->hw_bridge_dev)) + vid = 1; + else + vid = v->vid; + + entry = sparx5_mdb_get_entry(spx5, v->addr, vid); + if (!entry) + return 0; + + mutex_lock(&spx5->mdb_lock); + if (is_host && entry->cpu_copy) { + sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false); + entry->cpu_copy = false; + } else if (!is_host) { + clear_bit(port->portno, entry->port_mask); + + /* Port not mrouter port or addr is L2 mcast, remove port from mask. */ + if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr)) + sparx5_pgid_update_mask(port, entry->pgid_idx, false); + } + mutex_unlock(&spx5->mdb_lock); + + if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) { + /* Clear pgid in case mrouter ports exists + * that are not part of the group. + */ + sparx5_pgid_clear(spx5, entry->pgid_idx); + sparx5_mact_forget(spx5, entry->addr, entry->vid); + sparx5_free_mdb_entry(spx5, entry->addr, entry->vid); + } + return 0; +} + +static int sparx5_handle_port_obj_add(struct net_device *dev, + struct notifier_block *nb, + struct switchdev_notifier_port_obj_info *info) +{ + const struct switchdev_obj *obj = info->obj; + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = sparx5_handle_port_vlan_add(dev, nb, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = sparx5_handle_port_mdb_add(dev, nb, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + info->handled = true; + return err; +} + +static int sparx5_handle_port_vlan_del(struct net_device *dev, + struct notifier_block *nb, + u16 vid) +{ + struct sparx5_port *port = netdev_priv(dev); + int ret; + + /* Master bridge? */ + if (netif_is_bridge_master(dev)) { + struct sparx5 *sparx5 = + container_of(nb, struct sparx5, + switchdev_blocking_nb); + + sparx5_mact_forget(sparx5, dev->broadcast, vid); + return 0; + } + + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + + ret = sparx5_vlan_vid_del(port, vid); + if (ret) + return ret; + + return 0; +} + +static int sparx5_handle_port_obj_del(struct net_device *dev, + struct notifier_block *nb, + struct switchdev_notifier_port_obj_info *info) +{ + const struct switchdev_obj *obj = info->obj; + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = sparx5_handle_port_vlan_del(dev, nb, + SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = sparx5_handle_port_mdb_del(dev, nb, + SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + info->handled = true; + return err; +} + +static int sparx5_switchdev_blocking_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = sparx5_handle_port_obj_add(dev, nb, ptr); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = sparx5_handle_port_obj_del(dev, nb, ptr); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + sparx5_netdevice_check, + sparx5_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +int sparx5_register_notifier_blocks(struct sparx5 *s5) +{ + int err; + + s5->netdevice_nb.notifier_call = sparx5_netdevice_event; + err = register_netdevice_notifier(&s5->netdevice_nb); + if (err) + return err; + + s5->switchdev_nb.notifier_call = sparx5_switchdev_event; + err = register_switchdev_notifier(&s5->switchdev_nb); + if (err) + goto err_switchdev_nb; + + s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event; + err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); + if (err) + goto err_switchdev_blocking_nb; + + sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0); + if (!sparx5_owq) { + err = -ENOMEM; + goto err_switchdev_blocking_nb; + } + + return 0; + +err_switchdev_blocking_nb: + unregister_switchdev_notifier(&s5->switchdev_nb); +err_switchdev_nb: + unregister_netdevice_notifier(&s5->netdevice_nb); + + return err; +} + +void sparx5_unregister_notifier_blocks(struct sparx5 *s5) +{ + destroy_workqueue(sparx5_owq); + + unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); + unregister_switchdev_notifier(&s5->switchdev_nb); + unregister_netdevice_notifier(&s5->netdevice_nb); +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c new file mode 100644 index 000000000..dc2c3756e --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#include <net/pkt_cls.h> + +#include "sparx5_tc.h" +#include "sparx5_main.h" +#include "sparx5_qos.h" + +static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer, + u32 *idx) +{ + if (parent == TC_H_ROOT) { + *layer = 2; + *idx = portno; + } else { + u32 queue = TC_H_MIN(parent) - 1; + *layer = 0; + *idx = SPX5_HSCH_L0_GET_IDX(portno, queue); + } +} + +static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev, + struct tc_mqprio_qopt_offload *m) +{ + m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + if (m->qopt.num_tc == 0) + return sparx5_tc_mqprio_del(ndev); + else + return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc); +} + +static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev, + struct tc_tbf_qopt_offload *qopt) +{ + struct sparx5_port *port = netdev_priv(ndev); + u32 layer, se_idx; + + sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer, + &se_idx); + + switch (qopt->command) { + case TC_TBF_REPLACE: + return sparx5_tc_tbf_add(port, &qopt->replace_params, layer, + se_idx); + case TC_TBF_DESTROY: + return sparx5_tc_tbf_del(port, layer, se_idx); + case TC_TBF_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev, + struct tc_ets_qopt_offload *qopt) +{ + struct tc_ets_qopt_offload_replace_params *params = + &qopt->replace_params; + struct sparx5_port *port = netdev_priv(ndev); + int i; + + /* Only allow ets on ports */ + if (qopt->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + switch (qopt->command) { + case TC_ETS_REPLACE: + + /* We support eight priorities */ + if (params->bands != SPX5_PRIOS) + return -EOPNOTSUPP; + + /* Sanity checks */ + for (i = 0; i < SPX5_PRIOS; ++i) { + /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */ + if (params->priomap[i] != (7 - i)) + return -EOPNOTSUPP; + /* Throw an error if we receive zero weights by tc */ + if (params->quanta[i] && params->weights[i] == 0) { + pr_err("Invalid ets configuration; band %d has weight zero", + i); + return -EINVAL; + } + } + + return sparx5_tc_ets_add(port, params); + case TC_ETS_DESTROY: + + return sparx5_tc_ets_del(port); + case TC_ETS_GRAFT: + return -EOPNOTSUPP; + + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return sparx5_tc_setup_qdisc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TBF: + return sparx5_tc_setup_qdisc_tbf(ndev, type_data); + case TC_SETUP_QDISC_ETS: + return sparx5_tc_setup_qdisc_ets(ndev, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h new file mode 100644 index 000000000..5b55e11b7 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + */ + +#ifndef __SPARX5_TC_H__ +#define __SPARX5_TC_H__ + +#include <linux/netdevice.h> + +int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data); + +#endif /* __SPARX5_TC_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c new file mode 100644 index 000000000..34f954bbf --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid) +{ + u32 mask[3]; + + /* Divide up mask in 32 bit words */ + bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS); + + /* Output mask to respective registers */ + spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid)); + spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid)); + spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid)); + + return 0; +} + +void sparx5_vlan_init(struct sparx5 *sparx5) +{ + u16 vid; + + spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1), + ANA_L3_VLAN_CTRL_VLAN_ENA, + sparx5, + ANA_L3_VLAN_CTRL); + + /* Map VLAN = FID */ + for (vid = NULL_VID; vid < VLAN_N_VID; vid++) + spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid), + ANA_L3_VLAN_CFG_VLAN_FID, + sparx5, + ANA_L3_VLAN_CFG(vid)); +} + +void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno) +{ + struct sparx5_port *port = sparx5->ports[portno]; + + /* Configure PVID */ + spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) | + ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid), + ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA | + ANA_CL_VLAN_CTRL_PORT_VID, + sparx5, + ANA_CL_VLAN_CTRL(port->portno)); +} + +int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, + bool untagged) +{ + struct sparx5 *sparx5 = port->sparx5; + int ret; + + /* Untagged egress vlan classification */ + if (untagged && port->vid != vid) { + if (port->vid) { + netdev_err(port->ndev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } + port->vid = vid; + } + + /* Make the port a member of the VLAN */ + set_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + sparx5_vlan_port_apply(sparx5, port); + + return 0; +} + +int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid) +{ + struct sparx5 *sparx5 = port->sparx5; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Stop the port from being a member of the vlan */ + clear_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + sparx5_vlan_port_apply(sparx5, port); + + return 0; +} + +void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable) +{ + struct sparx5 *sparx5 = port->sparx5; + u32 val, mask; + + /* mask is spread across 3 registers x 32 bit */ + if (port->portno < 32) { + mask = BIT(port->portno); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid)); + } else if (port->portno < 64) { + mask = BIT(port->portno - 32); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid)); + } else if (port->portno < SPX5_PORTS) { + mask = BIT(port->portno - 64); + val = enable ? mask : 0; + spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid)); + } else { + netdev_err(port->ndev, "Invalid port no: %d\n", port->portno); + } +} + +void sparx5_pgid_clear(struct sparx5 *spx5, int pgid) +{ + spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid)); + spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid)); + spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid)); +} + +void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3]) +{ + portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid)); + portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid)); + portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid)); +} + +void sparx5_update_fwd(struct sparx5 *sparx5) +{ + DECLARE_BITMAP(workmask, SPX5_PORTS); + u32 mask[3]; + int port; + + /* Divide up fwd mask in 32 bit words */ + bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS); + + /* Update flood masks */ + for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) { + spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port)); + spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); + } + + /* Update SRC masks */ + for (port = 0; port < SPX5_PORTS; port++) { + if (test_bit(port, sparx5->bridge_fwd_mask)) { + /* Allow to send to all bridged but self */ + bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS); + clear_bit(port, workmask); + bitmap_to_arr32(mask, workmask, SPX5_PORTS); + spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port)); + spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port)); + } else { + spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port)); + spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port)); + spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port)); + } + } + + /* Learning enabled only for bridged ports */ + bitmap_and(workmask, sparx5->bridge_fwd_mask, + sparx5->bridge_lrn_mask, SPX5_PORTS); + bitmap_to_arr32(mask, workmask, SPX5_PORTS); + + /* Apply learning mask */ + spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG); + spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1); + spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2); +} + +void sparx5_vlan_port_apply(struct sparx5 *sparx5, + struct sparx5_port *port) + +{ + u32 val; + + /* Configure PVID, vlan aware */ + val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) | + ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) | + ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid); + spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno)); + + val = 0; + if (port->vlan_aware && !port->pvid) + /* If port is vlan-aware and tagged, drop untagged and + * priority tagged frames. + */ + val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) | + ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) | + ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1); + spx5_wr(val, sparx5, + ANA_CL_VLAN_FILTER_CTRL(port->portno, 0)); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */ + val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0); + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CTRL_TAG_CFG_SET(1); + else + val |= REW_TAG_CTRL_TAG_CFG_SET(3); + } + spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno)); + + /* Egress VID */ + spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid), + REW_PORT_VLAN_CFG_PORT_VID, + sparx5, + REW_PORT_VLAN_CFG(port->portno)); +} |