From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/renesas/Kconfig | 57 + drivers/net/ethernet/renesas/Makefile | 14 + drivers/net/ethernet/renesas/ravb.h | 1134 ++++++++ drivers/net/ethernet/renesas/ravb_main.c | 3101 ++++++++++++++++++++++ drivers/net/ethernet/renesas/ravb_ptp.c | 351 +++ drivers/net/ethernet/renesas/rcar_gen4_ptp.c | 181 ++ drivers/net/ethernet/renesas/rcar_gen4_ptp.h | 72 + drivers/net/ethernet/renesas/rswitch.c | 2013 +++++++++++++++ drivers/net/ethernet/renesas/rswitch.h | 1022 ++++++++ drivers/net/ethernet/renesas/sh_eth.c | 3578 ++++++++++++++++++++++++++ drivers/net/ethernet/renesas/sh_eth.h | 567 ++++ 11 files changed, 12090 insertions(+) create mode 100644 drivers/net/ethernet/renesas/Kconfig create mode 100644 drivers/net/ethernet/renesas/Makefile create mode 100644 drivers/net/ethernet/renesas/ravb.h create mode 100644 drivers/net/ethernet/renesas/ravb_main.c create mode 100644 drivers/net/ethernet/renesas/ravb_ptp.c create mode 100644 drivers/net/ethernet/renesas/rcar_gen4_ptp.c create mode 100644 drivers/net/ethernet/renesas/rcar_gen4_ptp.h create mode 100644 drivers/net/ethernet/renesas/rswitch.c create mode 100644 drivers/net/ethernet/renesas/rswitch.h create mode 100644 drivers/net/ethernet/renesas/sh_eth.c create mode 100644 drivers/net/ethernet/renesas/sh_eth.h (limited to 'drivers/net/ethernet/renesas') diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig new file mode 100644 index 0000000000..3ceb57408e --- /dev/null +++ b/drivers/net/ethernet/renesas/Kconfig @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Renesas device configuration +# + +config NET_VENDOR_RENESAS + bool "Renesas devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Renesas devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_RENESAS + +config SH_ETH + tristate "Renesas SuperH Ethernet support" + depends on ARCH_RENESAS || SUPERH || COMPILE_TEST + select CRC32 + select MII + select MDIO_BITBANG + select PHYLIB + help + Renesas SuperH Ethernet device driver. + This driver supporting CPUs are: + - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, + R8A7740, R8A774x, R8A777x and R8A779x. + +config RAVB + tristate "Renesas Ethernet AVB support" + depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + select CRC32 + select MII + select MDIO_BITBANG + select PHYLIB + help + Renesas Ethernet AVB device driver. + This driver supports the following SoCs: + - R8A779x. + +config RENESAS_ETHER_SWITCH + tristate "Renesas Ethernet Switch support" + depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + select CRC32 + select MII + select PHYLINK + help + Renesas Ethernet Switch device driver. + This driver supports the following SoCs: + - R8A779Fx. + +endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile new file mode 100644 index 0000000000..5920058934 --- /dev/null +++ b/drivers/net/ethernet/renesas/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Renesas device drivers. +# + +obj-$(CONFIG_SH_ETH) += sh_eth.o + +ravb-objs := ravb_main.o ravb_ptp.o + +obj-$(CONFIG_RAVB) += ravb.o + +rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o + +obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h new file mode 100644 index 0000000000..e0f8276cff --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb.h @@ -0,0 +1,1134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Renesas Ethernet AVB device driver + * + * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. + * + * Based on the SuperH Ethernet driver + */ + +#ifndef __RAVB_H__ +#define __RAVB_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */ +#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */ +#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */ +#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */ +#define BE_TX_RING_MIN 64 +#define BE_RX_RING_MIN 64 +#define BE_TX_RING_MAX 1024 +#define BE_RX_RING_MAX 2048 + +#define PKT_BUF_SZ 1538 + +/* Driver's parameters */ +#define RAVB_ALIGN 128 + +/* Hardware time stamp */ +#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */ +#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */ + +#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */ +#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */ +#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002 +#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006 +#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */ + +enum ravb_reg { + /* AVB-DMAC registers */ + CCC = 0x0000, + DBAT = 0x0004, + DLR = 0x0008, + CSR = 0x000C, + CDAR0 = 0x0010, + CDAR1 = 0x0014, + CDAR2 = 0x0018, + CDAR3 = 0x001C, + CDAR4 = 0x0020, + CDAR5 = 0x0024, + CDAR6 = 0x0028, + CDAR7 = 0x002C, + CDAR8 = 0x0030, + CDAR9 = 0x0034, + CDAR10 = 0x0038, + CDAR11 = 0x003C, + CDAR12 = 0x0040, + CDAR13 = 0x0044, + CDAR14 = 0x0048, + CDAR15 = 0x004C, + CDAR16 = 0x0050, + CDAR17 = 0x0054, + CDAR18 = 0x0058, + CDAR19 = 0x005C, + CDAR20 = 0x0060, + CDAR21 = 0x0064, + ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ + RCR = 0x0090, + RQC0 = 0x0094, + RQC1 = 0x0098, + RQC2 = 0x009C, + RQC3 = 0x00A0, + RQC4 = 0x00A4, + RPC = 0x00B0, + RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */ + UFCW = 0x00BC, + UFCS = 0x00C0, + UFCV0 = 0x00C4, + UFCV1 = 0x00C8, + UFCV2 = 0x00CC, + UFCV3 = 0x00D0, + UFCV4 = 0x00D4, + UFCD0 = 0x00E0, + UFCD1 = 0x00E4, + UFCD2 = 0x00E8, + UFCD3 = 0x00EC, + UFCD4 = 0x00F0, + SFO = 0x00FC, + SFP0 = 0x0100, + SFP1 = 0x0104, + SFP2 = 0x0108, + SFP3 = 0x010C, + SFP4 = 0x0110, + SFP5 = 0x0114, + SFP6 = 0x0118, + SFP7 = 0x011C, + SFP8 = 0x0120, + SFP9 = 0x0124, + SFP10 = 0x0128, + SFP11 = 0x012C, + SFP12 = 0x0130, + SFP13 = 0x0134, + SFP14 = 0x0138, + SFP15 = 0x013C, + SFP16 = 0x0140, + SFP17 = 0x0144, + SFP18 = 0x0148, + SFP19 = 0x014C, + SFP20 = 0x0150, + SFP21 = 0x0154, + SFP22 = 0x0158, + SFP23 = 0x015C, + SFP24 = 0x0160, + SFP25 = 0x0164, + SFP26 = 0x0168, + SFP27 = 0x016C, + SFP28 = 0x0170, + SFP29 = 0x0174, + SFP30 = 0x0178, + SFP31 = 0x017C, + SFM0 = 0x01C0, + SFM1 = 0x01C4, + TGC = 0x0300, + TCCR = 0x0304, + TSR = 0x0308, + TFA0 = 0x0310, + TFA1 = 0x0314, + TFA2 = 0x0318, + CIVR0 = 0x0320, + CIVR1 = 0x0324, + CDVR0 = 0x0328, + CDVR1 = 0x032C, + CUL0 = 0x0330, + CUL1 = 0x0334, + CLL0 = 0x0338, + CLL1 = 0x033C, + DIC = 0x0350, + DIS = 0x0354, + EIC = 0x0358, + EIS = 0x035C, + RIC0 = 0x0360, + RIS0 = 0x0364, + RIC1 = 0x0368, + RIS1 = 0x036C, + RIC2 = 0x0370, + RIS2 = 0x0374, + TIC = 0x0378, + TIS = 0x037C, + ISS = 0x0380, + CIE = 0x0384, /* R-Car Gen3 only */ + GCCR = 0x0390, + GMTT = 0x0394, + GPTC = 0x0398, + GTI = 0x039C, + GTO0 = 0x03A0, + GTO1 = 0x03A4, + GTO2 = 0x03A8, + GIC = 0x03AC, + GIS = 0x03B0, + GCPT = 0x03B4, /* Documented for R-Car Gen3 only */ + GCT0 = 0x03B8, + GCT1 = 0x03BC, + GCT2 = 0x03C0, + GIE = 0x03CC, /* R-Car Gen3 only */ + GID = 0x03D0, /* R-Car Gen3 only */ + DIL = 0x0440, /* R-Car Gen3 only */ + RIE0 = 0x0460, /* R-Car Gen3 only */ + RID0 = 0x0464, /* R-Car Gen3 only */ + RIE2 = 0x0470, /* R-Car Gen3 only */ + RID2 = 0x0474, /* R-Car Gen3 only */ + TIE = 0x0478, /* R-Car Gen3 only */ + TID = 0x047c, /* R-Car Gen3 only */ + + /* E-MAC registers */ + ECMR = 0x0500, + RFLR = 0x0508, + ECSR = 0x0510, + ECSIPR = 0x0518, + PIR = 0x0520, + PSR = 0x0528, + PIPR = 0x052c, + CXR31 = 0x0530, /* RZ/G2L only */ + CXR35 = 0x0540, /* RZ/G2L only */ + MPR = 0x0558, + PFTCR = 0x055c, + PFRCR = 0x0560, + GECMR = 0x05b0, + MAHR = 0x05c0, + MALR = 0x05c8, + TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */ + CXR41 = 0x0708, /* RZ/G2L only */ + CXR42 = 0x0710, /* RZ/G2L only */ + CEFCR = 0x0740, + FRECR = 0x0748, + TSFRCR = 0x0750, + TLFRCR = 0x0758, + RFCR = 0x0760, + MAFCR = 0x0778, + CSR0 = 0x0800, /* RZ/G2L only */ +}; + + +/* Register bits of the Ethernet AVB */ +/* CCC */ +enum CCC_BIT { + CCC_OPC = 0x00000003, + CCC_OPC_RESET = 0x00000000, + CCC_OPC_CONFIG = 0x00000001, + CCC_OPC_OPERATION = 0x00000002, + CCC_GAC = 0x00000080, + CCC_DTSR = 0x00000100, + CCC_CSEL = 0x00030000, + CCC_CSEL_HPB = 0x00010000, + CCC_CSEL_ETH_TX = 0x00020000, + CCC_CSEL_GMII_REF = 0x00030000, + CCC_LBME = 0x01000000, +}; + +/* CSR */ +enum CSR_BIT { + CSR_OPS = 0x0000000F, + CSR_OPS_RESET = 0x00000001, + CSR_OPS_CONFIG = 0x00000002, + CSR_OPS_OPERATION = 0x00000004, + CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */ + CSR_DTS = 0x00000100, + CSR_TPO0 = 0x00010000, + CSR_TPO1 = 0x00020000, + CSR_TPO2 = 0x00040000, + CSR_TPO3 = 0x00080000, + CSR_RPO = 0x00100000, +}; + +/* ESR */ +enum ESR_BIT { + ESR_EQN = 0x0000001F, + ESR_ET = 0x00000F00, + ESR_EIL = 0x00001000, +}; + +/* APSR (R-Car Gen3 only) */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, /* Undocumented */ + APSR_CMSW = 0x00000010, + APSR_RDM = 0x00002000, + APSR_TDM = 0x00004000, +}; + +/* RCR */ +enum RCR_BIT { + RCR_EFFS = 0x00000001, + RCR_ENCF = 0x00000002, + RCR_ESF = 0x0000000C, + RCR_ETS0 = 0x00000010, + RCR_ETS2 = 0x00000020, + RCR_RFCL = 0x1FFF0000, +}; + +/* RQC0/1/2/3/4 */ +enum RQC_BIT { + RQC_RSM0 = 0x00000003, + RQC_UFCC0 = 0x00000030, + RQC_RSM1 = 0x00000300, + RQC_UFCC1 = 0x00003000, + RQC_RSM2 = 0x00030000, + RQC_UFCC2 = 0x00300000, + RQC_RSM3 = 0x03000000, + RQC_UFCC3 = 0x30000000, +}; + +/* RPC */ +enum RPC_BIT { + RPC_PCNT = 0x00000700, + RPC_DCNT = 0x00FF0000, +}; + +/* UFCW */ +enum UFCW_BIT { + UFCW_WL0 = 0x0000003F, + UFCW_WL1 = 0x00003F00, + UFCW_WL2 = 0x003F0000, + UFCW_WL3 = 0x3F000000, +}; + +/* UFCS */ +enum UFCS_BIT { + UFCS_SL0 = 0x0000003F, + UFCS_SL1 = 0x00003F00, + UFCS_SL2 = 0x003F0000, + UFCS_SL3 = 0x3F000000, +}; + +/* UFCV0/1/2/3/4 */ +enum UFCV_BIT { + UFCV_CV0 = 0x0000003F, + UFCV_CV1 = 0x00003F00, + UFCV_CV2 = 0x003F0000, + UFCV_CV3 = 0x3F000000, +}; + +/* UFCD0/1/2/3/4 */ +enum UFCD_BIT { + UFCD_DV0 = 0x0000003F, + UFCD_DV1 = 0x00003F00, + UFCD_DV2 = 0x003F0000, + UFCD_DV3 = 0x3F000000, +}; + +/* SFO */ +enum SFO_BIT { + SFO_FBP = 0x0000003F, +}; + +/* RTC */ +enum RTC_BIT { + RTC_MFL0 = 0x00000FFF, + RTC_MFL1 = 0x0FFF0000, +}; + +/* TGC */ +enum TGC_BIT { + TGC_TSM0 = 0x00000001, + TGC_TSM1 = 0x00000002, + TGC_TSM2 = 0x00000004, + TGC_TSM3 = 0x00000008, + TGC_TQP = 0x00000030, + TGC_TQP_NONAVB = 0x00000000, + TGC_TQP_AVBMODE1 = 0x00000010, + TGC_TQP_AVBMODE2 = 0x00000030, + TGC_TBD0 = 0x00000300, + TGC_TBD1 = 0x00003000, + TGC_TBD2 = 0x00030000, + TGC_TBD3 = 0x00300000, +}; + +/* TCCR */ +enum TCCR_BIT { + TCCR_TSRQ0 = 0x00000001, + TCCR_TSRQ1 = 0x00000002, + TCCR_TSRQ2 = 0x00000004, + TCCR_TSRQ3 = 0x00000008, + TCCR_TFEN = 0x00000100, + TCCR_TFR = 0x00000200, +}; + +/* TSR */ +enum TSR_BIT { + TSR_CCS0 = 0x00000003, + TSR_CCS1 = 0x0000000C, + TSR_TFFL = 0x00000700, +}; + +/* TFA2 */ +enum TFA2_BIT { + TFA2_TSV = 0x0000FFFF, + TFA2_TST = 0x03FF0000, +}; + +/* DIC */ +enum DIC_BIT { + DIC_DPE1 = 0x00000002, + DIC_DPE2 = 0x00000004, + DIC_DPE3 = 0x00000008, + DIC_DPE4 = 0x00000010, + DIC_DPE5 = 0x00000020, + DIC_DPE6 = 0x00000040, + DIC_DPE7 = 0x00000080, + DIC_DPE8 = 0x00000100, + DIC_DPE9 = 0x00000200, + DIC_DPE10 = 0x00000400, + DIC_DPE11 = 0x00000800, + DIC_DPE12 = 0x00001000, + DIC_DPE13 = 0x00002000, + DIC_DPE14 = 0x00004000, + DIC_DPE15 = 0x00008000, +}; + +/* DIS */ +enum DIS_BIT { + DIS_DPF1 = 0x00000002, + DIS_DPF2 = 0x00000004, + DIS_DPF3 = 0x00000008, + DIS_DPF4 = 0x00000010, + DIS_DPF5 = 0x00000020, + DIS_DPF6 = 0x00000040, + DIS_DPF7 = 0x00000080, + DIS_DPF8 = 0x00000100, + DIS_DPF9 = 0x00000200, + DIS_DPF10 = 0x00000400, + DIS_DPF11 = 0x00000800, + DIS_DPF12 = 0x00001000, + DIS_DPF13 = 0x00002000, + DIS_DPF14 = 0x00004000, + DIS_DPF15 = 0x00008000, +}; + +/* EIC */ +enum EIC_BIT { + EIC_MREE = 0x00000001, + EIC_MTEE = 0x00000002, + EIC_QEE = 0x00000004, + EIC_SEE = 0x00000008, + EIC_CLLE0 = 0x00000010, + EIC_CLLE1 = 0x00000020, + EIC_CULE0 = 0x00000040, + EIC_CULE1 = 0x00000080, + EIC_TFFE = 0x00000100, +}; + +/* EIS */ +enum EIS_BIT { + EIS_MREF = 0x00000001, + EIS_MTEF = 0x00000002, + EIS_QEF = 0x00000004, + EIS_SEF = 0x00000008, + EIS_CLLF0 = 0x00000010, + EIS_CLLF1 = 0x00000020, + EIS_CULF0 = 0x00000040, + EIS_CULF1 = 0x00000080, + EIS_TFFF = 0x00000100, + EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), +}; + +/* RIC0 */ +enum RIC0_BIT { + RIC0_FRE0 = 0x00000001, + RIC0_FRE1 = 0x00000002, + RIC0_FRE2 = 0x00000004, + RIC0_FRE3 = 0x00000008, + RIC0_FRE4 = 0x00000010, + RIC0_FRE5 = 0x00000020, + RIC0_FRE6 = 0x00000040, + RIC0_FRE7 = 0x00000080, + RIC0_FRE8 = 0x00000100, + RIC0_FRE9 = 0x00000200, + RIC0_FRE10 = 0x00000400, + RIC0_FRE11 = 0x00000800, + RIC0_FRE12 = 0x00001000, + RIC0_FRE13 = 0x00002000, + RIC0_FRE14 = 0x00004000, + RIC0_FRE15 = 0x00008000, + RIC0_FRE16 = 0x00010000, + RIC0_FRE17 = 0x00020000, +}; + +/* RIC0 */ +enum RIS0_BIT { + RIS0_FRF0 = 0x00000001, + RIS0_FRF1 = 0x00000002, + RIS0_FRF2 = 0x00000004, + RIS0_FRF3 = 0x00000008, + RIS0_FRF4 = 0x00000010, + RIS0_FRF5 = 0x00000020, + RIS0_FRF6 = 0x00000040, + RIS0_FRF7 = 0x00000080, + RIS0_FRF8 = 0x00000100, + RIS0_FRF9 = 0x00000200, + RIS0_FRF10 = 0x00000400, + RIS0_FRF11 = 0x00000800, + RIS0_FRF12 = 0x00001000, + RIS0_FRF13 = 0x00002000, + RIS0_FRF14 = 0x00004000, + RIS0_FRF15 = 0x00008000, + RIS0_FRF16 = 0x00010000, + RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), +}; + +/* RIC1 */ +enum RIC1_BIT { + RIC1_RFWE = 0x80000000, +}; + +/* RIS1 */ +enum RIS1_BIT { + RIS1_RFWF = 0x80000000, +}; + +/* RIC2 */ +enum RIC2_BIT { + RIC2_QFE0 = 0x00000001, + RIC2_QFE1 = 0x00000002, + RIC2_QFE2 = 0x00000004, + RIC2_QFE3 = 0x00000008, + RIC2_QFE4 = 0x00000010, + RIC2_QFE5 = 0x00000020, + RIC2_QFE6 = 0x00000040, + RIC2_QFE7 = 0x00000080, + RIC2_QFE8 = 0x00000100, + RIC2_QFE9 = 0x00000200, + RIC2_QFE10 = 0x00000400, + RIC2_QFE11 = 0x00000800, + RIC2_QFE12 = 0x00001000, + RIC2_QFE13 = 0x00002000, + RIC2_QFE14 = 0x00004000, + RIC2_QFE15 = 0x00008000, + RIC2_QFE16 = 0x00010000, + RIC2_QFE17 = 0x00020000, + RIC2_RFFE = 0x80000000, +}; + +/* RIS2 */ +enum RIS2_BIT { + RIS2_QFF0 = 0x00000001, + RIS2_QFF1 = 0x00000002, + RIS2_QFF2 = 0x00000004, + RIS2_QFF3 = 0x00000008, + RIS2_QFF4 = 0x00000010, + RIS2_QFF5 = 0x00000020, + RIS2_QFF6 = 0x00000040, + RIS2_QFF7 = 0x00000080, + RIS2_QFF8 = 0x00000100, + RIS2_QFF9 = 0x00000200, + RIS2_QFF10 = 0x00000400, + RIS2_QFF11 = 0x00000800, + RIS2_QFF12 = 0x00001000, + RIS2_QFF13 = 0x00002000, + RIS2_QFF14 = 0x00004000, + RIS2_QFF15 = 0x00008000, + RIS2_QFF16 = 0x00010000, + RIS2_QFF17 = 0x00020000, + RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), +}; + +/* TIC */ +enum TIC_BIT { + TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */ + TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */ + TIC_TFUE = 0x00000100, + TIC_TFWE = 0x00000200, +}; + +/* TIS */ +enum TIS_BIT { + TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */ + TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */ + TIS_TFUF = 0x00000100, + TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) +}; + +/* ISS */ +enum ISS_BIT { + ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */ + ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */ + ISS_ES = 0x00000040, + ISS_MS = 0x00000080, + ISS_TFUS = 0x00000100, + ISS_TFWS = 0x00000200, + ISS_RFWS = 0x00001000, + ISS_CGIS = 0x00002000, + ISS_DPS1 = 0x00020000, + ISS_DPS2 = 0x00040000, + ISS_DPS3 = 0x00080000, + ISS_DPS4 = 0x00100000, + ISS_DPS5 = 0x00200000, + ISS_DPS6 = 0x00400000, + ISS_DPS7 = 0x00800000, + ISS_DPS8 = 0x01000000, + ISS_DPS9 = 0x02000000, + ISS_DPS10 = 0x04000000, + ISS_DPS11 = 0x08000000, + ISS_DPS12 = 0x10000000, + ISS_DPS13 = 0x20000000, + ISS_DPS14 = 0x40000000, + ISS_DPS15 = 0x80000000, +}; + +/* CIE (R-Car Gen3 only) */ +enum CIE_BIT { + CIE_CRIE = 0x00000001, + CIE_CTIE = 0x00000100, + CIE_RQFM = 0x00010000, + CIE_CL0M = 0x00020000, + CIE_RFWL = 0x00040000, + CIE_RFFL = 0x00080000, +}; + +/* GCCR */ +enum GCCR_BIT { + GCCR_TCR = 0x00000003, + GCCR_TCR_NOREQ = 0x00000000, /* No request */ + GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */ + GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */ + GCCR_LTO = 0x00000004, + GCCR_LTI = 0x00000008, + GCCR_LPTC = 0x00000010, + GCCR_LMTT = 0x00000020, + GCCR_TCSS = 0x00000300, + GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */ + GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */ + GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */ +}; + +/* GTI */ +enum GTI_BIT { + GTI_TIV = 0x0FFFFFFF, +}; + +#define GTI_TIV_MAX GTI_TIV +#define GTI_TIV_MIN 0x20 + +/* GIC */ +enum GIC_BIT { + GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */ + GIC_PTME = 0x00000004, +}; + +/* GIS */ +enum GIS_BIT { + GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */ + GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), +}; + +/* GIE (R-Car Gen3 only) */ +enum GIE_BIT { + GIE_PTCS = 0x00000001, + GIE_PTOS = 0x00000002, + GIE_PTMS0 = 0x00000004, + GIE_PTMS1 = 0x00000008, + GIE_PTMS2 = 0x00000010, + GIE_PTMS3 = 0x00000020, + GIE_PTMS4 = 0x00000040, + GIE_PTMS5 = 0x00000080, + GIE_PTMS6 = 0x00000100, + GIE_PTMS7 = 0x00000200, + GIE_ATCS0 = 0x00010000, + GIE_ATCS1 = 0x00020000, + GIE_ATCS2 = 0x00040000, + GIE_ATCS3 = 0x00080000, + GIE_ATCS4 = 0x00100000, + GIE_ATCS5 = 0x00200000, + GIE_ATCS6 = 0x00400000, + GIE_ATCS7 = 0x00800000, + GIE_ATCS8 = 0x01000000, + GIE_ATCS9 = 0x02000000, + GIE_ATCS10 = 0x04000000, + GIE_ATCS11 = 0x08000000, + GIE_ATCS12 = 0x10000000, + GIE_ATCS13 = 0x20000000, + GIE_ATCS14 = 0x40000000, + GIE_ATCS15 = 0x80000000, +}; + +/* GID (R-Car Gen3 only) */ +enum GID_BIT { + GID_PTCD = 0x00000001, + GID_PTOD = 0x00000002, + GID_PTMD0 = 0x00000004, + GID_PTMD1 = 0x00000008, + GID_PTMD2 = 0x00000010, + GID_PTMD3 = 0x00000020, + GID_PTMD4 = 0x00000040, + GID_PTMD5 = 0x00000080, + GID_PTMD6 = 0x00000100, + GID_PTMD7 = 0x00000200, + GID_ATCD0 = 0x00010000, + GID_ATCD1 = 0x00020000, + GID_ATCD2 = 0x00040000, + GID_ATCD3 = 0x00080000, + GID_ATCD4 = 0x00100000, + GID_ATCD5 = 0x00200000, + GID_ATCD6 = 0x00400000, + GID_ATCD7 = 0x00800000, + GID_ATCD8 = 0x01000000, + GID_ATCD9 = 0x02000000, + GID_ATCD10 = 0x04000000, + GID_ATCD11 = 0x08000000, + GID_ATCD12 = 0x10000000, + GID_ATCD13 = 0x20000000, + GID_ATCD14 = 0x40000000, + GID_ATCD15 = 0x80000000, +}; + +/* RIE0 (R-Car Gen3 only) */ +enum RIE0_BIT { + RIE0_FRS0 = 0x00000001, + RIE0_FRS1 = 0x00000002, + RIE0_FRS2 = 0x00000004, + RIE0_FRS3 = 0x00000008, + RIE0_FRS4 = 0x00000010, + RIE0_FRS5 = 0x00000020, + RIE0_FRS6 = 0x00000040, + RIE0_FRS7 = 0x00000080, + RIE0_FRS8 = 0x00000100, + RIE0_FRS9 = 0x00000200, + RIE0_FRS10 = 0x00000400, + RIE0_FRS11 = 0x00000800, + RIE0_FRS12 = 0x00001000, + RIE0_FRS13 = 0x00002000, + RIE0_FRS14 = 0x00004000, + RIE0_FRS15 = 0x00008000, + RIE0_FRS16 = 0x00010000, + RIE0_FRS17 = 0x00020000, +}; + +/* RID0 (R-Car Gen3 only) */ +enum RID0_BIT { + RID0_FRD0 = 0x00000001, + RID0_FRD1 = 0x00000002, + RID0_FRD2 = 0x00000004, + RID0_FRD3 = 0x00000008, + RID0_FRD4 = 0x00000010, + RID0_FRD5 = 0x00000020, + RID0_FRD6 = 0x00000040, + RID0_FRD7 = 0x00000080, + RID0_FRD8 = 0x00000100, + RID0_FRD9 = 0x00000200, + RID0_FRD10 = 0x00000400, + RID0_FRD11 = 0x00000800, + RID0_FRD12 = 0x00001000, + RID0_FRD13 = 0x00002000, + RID0_FRD14 = 0x00004000, + RID0_FRD15 = 0x00008000, + RID0_FRD16 = 0x00010000, + RID0_FRD17 = 0x00020000, +}; + +/* RIE2 (R-Car Gen3 only) */ +enum RIE2_BIT { + RIE2_QFS0 = 0x00000001, + RIE2_QFS1 = 0x00000002, + RIE2_QFS2 = 0x00000004, + RIE2_QFS3 = 0x00000008, + RIE2_QFS4 = 0x00000010, + RIE2_QFS5 = 0x00000020, + RIE2_QFS6 = 0x00000040, + RIE2_QFS7 = 0x00000080, + RIE2_QFS8 = 0x00000100, + RIE2_QFS9 = 0x00000200, + RIE2_QFS10 = 0x00000400, + RIE2_QFS11 = 0x00000800, + RIE2_QFS12 = 0x00001000, + RIE2_QFS13 = 0x00002000, + RIE2_QFS14 = 0x00004000, + RIE2_QFS15 = 0x00008000, + RIE2_QFS16 = 0x00010000, + RIE2_QFS17 = 0x00020000, + RIE2_RFFS = 0x80000000, +}; + +/* RID2 (R-Car Gen3 only) */ +enum RID2_BIT { + RID2_QFD0 = 0x00000001, + RID2_QFD1 = 0x00000002, + RID2_QFD2 = 0x00000004, + RID2_QFD3 = 0x00000008, + RID2_QFD4 = 0x00000010, + RID2_QFD5 = 0x00000020, + RID2_QFD6 = 0x00000040, + RID2_QFD7 = 0x00000080, + RID2_QFD8 = 0x00000100, + RID2_QFD9 = 0x00000200, + RID2_QFD10 = 0x00000400, + RID2_QFD11 = 0x00000800, + RID2_QFD12 = 0x00001000, + RID2_QFD13 = 0x00002000, + RID2_QFD14 = 0x00004000, + RID2_QFD15 = 0x00008000, + RID2_QFD16 = 0x00010000, + RID2_QFD17 = 0x00020000, + RID2_RFFD = 0x80000000, +}; + +/* TIE (R-Car Gen3 only) */ +enum TIE_BIT { + TIE_FTS0 = 0x00000001, + TIE_FTS1 = 0x00000002, + TIE_FTS2 = 0x00000004, + TIE_FTS3 = 0x00000008, + TIE_TFUS = 0x00000100, + TIE_TFWS = 0x00000200, + TIE_MFUS = 0x00000400, + TIE_MFWS = 0x00000800, + TIE_TDPS0 = 0x00010000, + TIE_TDPS1 = 0x00020000, + TIE_TDPS2 = 0x00040000, + TIE_TDPS3 = 0x00080000, +}; + +/* TID (R-Car Gen3 only) */ +enum TID_BIT { + TID_FTD0 = 0x00000001, + TID_FTD1 = 0x00000002, + TID_FTD2 = 0x00000004, + TID_FTD3 = 0x00000008, + TID_TFUD = 0x00000100, + TID_TFWD = 0x00000200, + TID_MFUD = 0x00000400, + TID_MFWD = 0x00000800, + TID_TDPD0 = 0x00010000, + TID_TDPD1 = 0x00020000, + TID_TDPD2 = 0x00040000, + TID_TDPD3 = 0x00080000, +}; + +/* ECMR */ +enum ECMR_BIT { + ECMR_PRM = 0x00000001, + ECMR_DM = 0x00000002, + ECMR_TE = 0x00000020, + ECMR_RE = 0x00000040, + ECMR_MPDE = 0x00000200, + ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */ + ECMR_RXF = 0x00020000, + ECMR_PFR = 0x00040000, + ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */ + ECMR_RZPF = 0x00100000, + ECMR_DPAD = 0x00200000, + ECMR_RCSC = 0x00800000, + ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */ + ECMR_TRCCM = 0x04000000, +}; + +/* ECSR */ +enum ECSR_BIT { + ECSR_ICD = 0x00000001, + ECSR_MPD = 0x00000002, + ECSR_LCHNG = 0x00000004, + ECSR_PHYI = 0x00000008, + ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */ +}; + +/* ECSIPR */ +enum ECSIPR_BIT { + ECSIPR_ICDIP = 0x00000001, + ECSIPR_MPDIP = 0x00000002, + ECSIPR_LCHNGIP = 0x00000004, +}; + +/* PIR */ +enum PIR_BIT { + PIR_MDC = 0x00000001, + PIR_MMD = 0x00000002, + PIR_MDO = 0x00000004, + PIR_MDI = 0x00000008, +}; + +/* PSR */ +enum PSR_BIT { + PSR_LMON = 0x00000001, +}; + +/* PIPR */ +enum PIPR_BIT { + PIPR_PHYIP = 0x00000001, +}; + +/* MPR */ +enum MPR_BIT { + MPR_MP = 0x0000ffff, +}; + +/* GECMR */ +enum GECMR_BIT { + GECMR_SPEED = 0x00000001, + GECMR_SPEED_100 = 0x00000000, + GECMR_SPEED_1000 = 0x00000001, + GBETH_GECMR_SPEED = 0x00000030, + GBETH_GECMR_SPEED_10 = 0x00000000, + GBETH_GECMR_SPEED_100 = 0x00000010, + GBETH_GECMR_SPEED_1000 = 0x00000020, +}; + +/* The Ethernet AVB descriptor definitions. */ +struct ravb_desc { + __le16 ds; /* Descriptor size */ + u8 cc; /* Content control MSBs (reserved) */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descriptor pointer */ +}; + +#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */ + +enum DIE_DT { + /* Frame data */ + DT_FMID = 0x40, + DT_FSTART = 0x50, + DT_FEND = 0x60, + DT_FSINGLE = 0x70, + /* Chain control */ + DT_LINK = 0x80, + DT_LINKFIX = 0x90, + DT_EOS = 0xa0, + /* HW/SW arbitration */ + DT_FEMPTY = 0xc0, + DT_FEMPTY_IS = 0xd0, + DT_FEMPTY_IC = 0xe0, + DT_FEMPTY_ND = 0xf0, + DT_LEMPTY = 0x20, + DT_EEMPTY = 0x30, +}; + +struct ravb_rx_desc { + __le16 ds_cc; /* Descriptor size and content control LSBs */ + u8 msc; /* MAC status code */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ +}; + +struct ravb_ex_rx_desc { + __le16 ds_cc; /* Descriptor size and content control lower bits */ + u8 msc; /* MAC status code */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ + __le32 ts_n; /* Timestampe nsec */ + __le32 ts_sl; /* Timestamp low */ + __le16 ts_sh; /* Timestamp high */ + __le16 res; /* Reserved bits */ +}; + +enum RX_DS_CC_BIT { + RX_DS = 0x0fff, /* Data size */ + RX_TR = 0x1000, /* Truncation indication */ + RX_EI = 0x2000, /* Error indication */ + RX_PS = 0xc000, /* Padding selection */ +}; + +/* E-MAC status code */ +enum MSC_BIT { + MSC_CRC = 0x01, /* Frame CRC error */ + MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */ + MSC_RTSF = 0x04, /* Frame length error (frame too short) */ + MSC_RTLF = 0x08, /* Frame length error (frame too long) */ + MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */ + MSC_CRL = 0x20, /* Carrier lost */ + MSC_CEEF = 0x40, /* Carrier extension error */ + MSC_MC = 0x80, /* Multicast frame reception */ +}; + +struct ravb_tx_desc { + __le16 ds_tagl; /* Descriptor size and frame tag LSBs */ + u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ +}; + +enum TX_DS_TAGL_BIT { + TX_DS = 0x0fff, /* Data size */ + TX_TAGL = 0xf000, /* Frame tag LSBs */ +}; + +enum TX_TAGH_TSR_BIT { + TX_TAGH = 0x3f, /* Frame tag MSBs */ + TX_TSR = 0x40, /* Timestamp storage request */ +}; +enum RAVB_QUEUE { + RAVB_BE = 0, /* Best Effort Queue */ + RAVB_NC, /* Network Control Queue */ +}; + +enum CXR31_BIT { + CXR31_SEL_LINK0 = 0x00000001, + CXR31_SEL_LINK1 = 0x00000008, +}; + +enum CXR35_BIT { + CXR35_SEL_XMII = 0x00000003, + CXR35_SEL_XMII_RGMII = 0x00000000, + CXR35_SEL_XMII_MII = 0x00000002, + CXR35_HALFCYC_CLKSW = 0xffff0000, +}; + +enum CSR0_BIT { + CSR0_TPE = 0x00000010, + CSR0_RPE = 0x00000020, +}; + +#define DBAT_ENTRY_NUM 22 +#define RX_QUEUE_OFFSET 4 +#define NUM_RX_QUEUE 2 +#define NUM_TX_QUEUE 2 + +#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) + +#define GBETH_RX_BUFF_MAX 8192 +#define GBETH_RX_DESC_DATA_SIZE 4080 + +struct ravb_tstamp_skb { + struct list_head list; + struct sk_buff *skb; + u16 tag; +}; + +struct ravb_ptp_perout { + u32 target; + u32 period; +}; + +#define N_EXT_TS 1 +#define N_PER_OUT 1 + +struct ravb_ptp { + struct ptp_clock *clock; + struct ptp_clock_info info; + u32 default_addend; + u32 current_addend; + int extts[N_EXT_TS]; + struct ravb_ptp_perout perout[N_PER_OUT]; +}; + +struct ravb_hw_info { + void (*rx_ring_free)(struct net_device *ndev, int q); + void (*rx_ring_format)(struct net_device *ndev, int q); + void *(*alloc_rx_desc)(struct net_device *ndev, int q); + bool (*receive)(struct net_device *ndev, int *quota, int q); + void (*set_rate)(struct net_device *ndev); + int (*set_feature)(struct net_device *ndev, netdev_features_t features); + int (*dmac_init)(struct net_device *ndev); + void (*emac_init)(struct net_device *ndev); + const char (*gstrings_stats)[ETH_GSTRING_LEN]; + size_t gstrings_size; + netdev_features_t net_hw_features; + netdev_features_t net_features; + int stats_len; + size_t max_rx_len; + u32 tccr_mask; + u32 rx_max_buf_size; + unsigned aligned_tx: 1; + + /* hardware features */ + unsigned internal_delay:1; /* AVB-DMAC has internal delays */ + unsigned tx_counters:1; /* E-MAC has TX counters */ + unsigned carrier_counters:1; /* E-MAC has carrier counters */ + unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ + unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */ + unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */ + unsigned gptp:1; /* AVB-DMAC has gPTP support */ + unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned gptp_ref_clk:1; /* gPTP has separate reference clock */ + unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ + unsigned magic_pkt:1; /* E-MAC supports magic packet detection */ + unsigned half_duplex:1; /* E-MAC supports half duplex mode */ +}; + +struct ravb_private { + struct net_device *ndev; + struct platform_device *pdev; + void __iomem *addr; + struct clk *clk; + struct clk *refclk; + struct clk *gptp_clk; + struct mdiobb_ctrl mdiobb; + u32 num_rx_ring[NUM_RX_QUEUE]; + u32 num_tx_ring[NUM_TX_QUEUE]; + u32 desc_bat_size; + dma_addr_t desc_bat_dma; + struct ravb_desc *desc_bat; + dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; + dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; + struct ravb_rx_desc *gbeth_rx_ring; + struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; + struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; + void *tx_align[NUM_TX_QUEUE]; + struct sk_buff *rx_1st_skb; + struct sk_buff **rx_skb[NUM_RX_QUEUE]; + struct sk_buff **tx_skb[NUM_TX_QUEUE]; + u32 rx_over_errors; + u32 rx_fifo_errors; + struct net_device_stats stats[NUM_RX_QUEUE]; + u32 tstamp_tx_ctrl; + u32 tstamp_rx_ctrl; + struct list_head ts_skb_list; + u32 ts_skb_tag; + struct ravb_ptp ptp; + spinlock_t lock; /* Register access lock */ + u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */ + u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ + u32 cur_tx[NUM_TX_QUEUE]; + u32 dirty_tx[NUM_TX_QUEUE]; + struct napi_struct napi[NUM_RX_QUEUE]; + struct work_struct work; + /* MII transceiver section. */ + struct mii_bus *mii_bus; /* MDIO bus control */ + int link; + phy_interface_t phy_interface; + int msg_enable; + int speed; + int emac_irq; + int erra_irq; + int mgmta_irq; + int rx_irqs[NUM_RX_QUEUE]; + int tx_irqs[NUM_TX_QUEUE]; + + unsigned no_avb_link:1; + unsigned avb_link_active_low:1; + unsigned wol_enabled:1; + unsigned rxcidm:1; /* RX Clock Internal Delay Mode */ + unsigned txcidm:1; /* TX Clock Internal Delay Mode */ + unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */ + unsigned int num_tx_desc; /* TX descriptors per packet */ + + int duplex; + + const struct ravb_hw_info *info; + struct reset_control *rstc; +}; + +static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) +{ + struct ravb_private *priv = netdev_priv(ndev); + + return ioread32(priv->addr + reg); +} + +static inline void ravb_write(struct net_device *ndev, u32 data, + enum ravb_reg reg) +{ + struct ravb_private *priv = netdev_priv(ndev); + + iowrite32(data, priv->addr + reg); +} + +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set); +int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); + +void ravb_ptp_interrupt(struct net_device *ndev); +void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev); +void ravb_ptp_stop(struct net_device *ndev); + +#endif /* #ifndef __RAVB_H__ */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c new file mode 100644 index 0000000000..8fec0dbbbe --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -0,0 +1,3101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Renesas Ethernet AVB device driver + * + * Copyright (C) 2014-2019 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. + * + * Based on the SuperH Ethernet driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ravb.h" + +#define RAVB_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { + "ch0", /* RAVB_BE */ + "ch1", /* RAVB_NC */ +}; + +static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { + "ch18", /* RAVB_BE */ + "ch19", /* RAVB_NC */ +}; + +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set) +{ + ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); +} + +int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) +{ + int i; + + for (i = 0; i < 10000; i++) { + if ((ravb_read(ndev, reg) & mask) == value) + return 0; + udelay(10); + } + return -ETIMEDOUT; +} + +static int ravb_set_opmode(struct net_device *ndev, u32 opmode) +{ + u32 csr_ops = 1U << (opmode & CCC_OPC); + u32 ccc_mask = CCC_OPC; + int error; + + /* If gPTP active in config mode is supported it needs to be configured + * along with CSEL and operating mode in the same access. This is a + * hardware limitation. + */ + if (opmode & CCC_GAC) + ccc_mask |= CCC_GAC | CCC_CSEL; + + /* Set operating mode */ + ravb_modify(ndev, CCC, ccc_mask, opmode); + /* Check if the operating mode is changed to the requested one */ + error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); + if (error) { + netdev_err(ndev, "failed to switch device to requested mode (%u)\n", + opmode & CCC_OPC); + } + + return error; +} + +static void ravb_set_rate_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + switch (priv->speed) { + case 10: /* 10BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); + break; + case 100: /* 100BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); + break; + case 1000: /* 1000BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); + break; + } +} + +static void ravb_set_rate_rcar(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + switch (priv->speed) { + case 100: /* 100BASE */ + ravb_write(ndev, GECMR_SPEED_100, GECMR); + break; + case 1000: /* 1000BASE */ + ravb_write(ndev, GECMR_SPEED_1000, GECMR); + break; + } +} + +static void ravb_set_buffer_align(struct sk_buff *skb) +{ + u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); + + if (reserve) + skb_reserve(skb, RAVB_ALIGN - reserve); +} + +/* Get MAC address from the MAC address registers + * + * Ethernet AVB device doesn't have ROM for MAC address. + * This function gets the MAC address that was used by a bootloader. + */ +static void ravb_read_mac_address(struct device_node *np, + struct net_device *ndev) +{ + int ret; + + ret = of_get_ethdev_address(np, ndev); + if (ret) { + u32 mahr = ravb_read(ndev, MAHR); + u32 malr = ravb_read(ndev, MALR); + u8 addr[ETH_ALEN]; + + addr[0] = (mahr >> 24) & 0xFF; + addr[1] = (mahr >> 16) & 0xFF; + addr[2] = (mahr >> 8) & 0xFF; + addr[3] = (mahr >> 0) & 0xFF; + addr[4] = (malr >> 8) & 0xFF; + addr[5] = (malr >> 0) & 0xFF; + eth_hw_addr_set(ndev, addr); + } +} + +static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) +{ + struct ravb_private *priv = container_of(ctrl, struct ravb_private, + mdiobb); + + ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); +} + +/* MDC pin control */ +static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + ravb_mdio_ctrl(ctrl, PIR_MDC, level); +} + +/* Data I/O pin control */ +static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output) +{ + ravb_mdio_ctrl(ctrl, PIR_MMD, output); +} + +/* Set data bit */ +static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value) +{ + ravb_mdio_ctrl(ctrl, PIR_MDO, value); +} + +/* Get data bit */ +static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl) +{ + struct ravb_private *priv = container_of(ctrl, struct ravb_private, + mdiobb); + + return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; +} + +/* MDIO bus control struct */ +static const struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ravb_set_mdc, + .set_mdio_dir = ravb_set_mdio_dir, + .set_mdio_data = ravb_set_mdio_data, + .get_mdio_data = ravb_get_mdio_data, +}; + +/* Free TX skb function for AVB-IP */ +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats[q]; + unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_tx_desc *desc; + unsigned int entry; + int free_num = 0; + u32 size; + + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { + bool txed; + + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + num_tx_desc); + desc = &priv->tx_ring[q][entry]; + txed = desc->die_dt == DT_FEMPTY; + if (free_txed_only && !txed) + break; + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + size = le16_to_cpu(desc->ds_tagl) & TX_DS; + /* Free the original skb. */ + if (priv->tx_skb[q][entry / num_tx_desc]) { + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + /* Last packet descriptor? */ + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + if (txed) + stats->tx_packets++; + } + free_num++; + } + if (txed) + stats->tx_bytes += size; + desc->die_dt = DT_EEMPTY; + } + return free_num; +} + +static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + unsigned int i; + + if (!priv->gbeth_rx_ring) + return; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + } + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, + priv->rx_desc_dma[q]); + priv->gbeth_rx_ring = NULL; +} + +static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + unsigned int i; + + if (!priv->rx_ring[q]) + return; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + RX_BUF_SZ, + DMA_FROM_DEVICE); + } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], + priv->rx_desc_dma[q]); + priv->rx_ring[q] = NULL; +} + +/* Free skb's and DMA buffers for Ethernet AVB */ +static void ravb_ring_free(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; + unsigned int i; + + info->rx_ring_free(ndev, q); + + if (priv->tx_ring[q]) { + ravb_tx_free(ndev, q, false); + + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * num_tx_desc + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], + priv->tx_desc_dma[q]); + priv->tx_ring[q] = NULL; + } + + /* Free RX skb ringbuffer */ + if (priv->rx_skb[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) + dev_kfree_skb(priv->rx_skb[q][i]); + } + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + + /* Free TX skb ringbuffer. + * SKBs are freed by ravb_tx_free() call above. + */ + kfree(priv->tx_skb[q]); + priv->tx_skb[q] = NULL; +} + +static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_rx_desc *rx_desc; + unsigned int rx_ring_size; + dma_addr_t dma_addr; + unsigned int i; + + rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; + memset(priv->gbeth_rx_ring, 0, rx_ring_size); + /* Build RX ring buffer */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + /* RX descriptor */ + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); + rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->die_dt = DT_FEMPTY; + } + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_ex_rx_desc *rx_desc; + unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; + dma_addr_t dma_addr; + unsigned int i; + + memset(priv->rx_ring[q], 0, rx_ring_size); + /* Build RX ring buffer */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + /* RX descriptor */ + rx_desc = &priv->rx_ring[q][i]; + rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, + RX_BUF_SZ, + DMA_FROM_DEVICE); + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); + rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->die_dt = DT_FEMPTY; + } + rx_desc = &priv->rx_ring[q][i]; + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +/* Format skb and descriptor buffer for Ethernet AVB */ +static void ravb_ring_format(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; + unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + num_tx_desc; + unsigned int i; + + priv->cur_rx[q] = 0; + priv->cur_tx[q] = 0; + priv->dirty_rx[q] = 0; + priv->dirty_tx[q] = 0; + + info->rx_ring_format(ndev, q); + + memset(priv->tx_ring[q], 0, tx_ring_size); + /* Build TX ring buffer */ + for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; + i++, tx_desc++) { + tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } + } + tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); + tx_desc->die_dt = DT_LINKFIX; /* type */ + + /* RX descriptor base address for best effort */ + desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; + desc->die_dt = DT_LINKFIX; /* type */ + desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + + /* TX descriptor base address for best effort */ + desc = &priv->desc_bat[q]; + desc->die_dt = DT_LINKFIX; /* type */ + desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); +} + +static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->gbeth_rx_ring; +} + +static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->rx_ring[q]; +} + +/* Init skb and descriptor buffer for Ethernet AVB */ +static int ravb_ring_init(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; + struct sk_buff *skb; + unsigned int i; + + /* Allocate RX and TX skb rings */ + priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], + sizeof(*priv->rx_skb[q]), GFP_KERNEL); + priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], + sizeof(*priv->tx_skb[q]), GFP_KERNEL); + if (!priv->rx_skb[q] || !priv->tx_skb[q]) + goto error; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL); + if (!skb) + goto error; + ravb_set_buffer_align(skb); + priv->rx_skb[q][i] = skb; + } + + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + } + + /* Allocate all RX descriptors. */ + if (!info->alloc_rx_desc(ndev, q)) + goto error; + + priv->dirty_rx[q] = 0; + + /* Allocate all TX descriptors. */ + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * num_tx_desc + 1); + priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->tx_desc_dma[q], + GFP_KERNEL); + if (!priv->tx_ring[q]) + goto error; + + return 0; + +error: + ravb_ring_free(ndev, q); + + return -ENOMEM; +} + +static void ravb_emac_init_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); + } else { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, + CXR31_SEL_LINK0); + } + + /* Receive frame limit set register */ + ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); + + /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ + ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE | ECMR_RCPT | + ECMR_TXF | ECMR_RXF, ECMR); + + ravb_set_rate_gbeth(ndev); + + /* Set MAC address */ + ravb_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); + + /* E-MAC status register clear */ + ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); + ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); + + /* E-MAC interrupt enable register */ + ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); +} + +static void ravb_emac_init_rcar(struct net_device *ndev) +{ + /* Receive frame limit set register */ + ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); + + /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ + ravb_write(ndev, ECMR_ZPF | ECMR_DM | + (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | + ECMR_TE | ECMR_RE, ECMR); + + ravb_set_rate_rcar(ndev); + + /* Set MAC address */ + ravb_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + ravb_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); + + /* E-MAC status register clear */ + ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); + + /* E-MAC interrupt enable register */ + ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); +} + +/* E-MAC init function */ +static void ravb_emac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + info->emac_init(ndev); +} + +static int ravb_dmac_init_gbeth(struct net_device *ndev) +{ + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + + /* Set DMAC RX */ + ravb_write(ndev, 0x60000000, RCR); + + /* Set Max Frame Length (RTC) */ + ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC); + + /* Set FIFO size */ + ravb_write(ndev, 0x00222200, TGC); + + ravb_write(ndev, 0, TCCR); + + /* Frame receive */ + ravb_write(ndev, RIC0_FRE0, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0x0, RIC1); + /* Receive FIFO full error, descriptor empty */ + ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2); + + ravb_write(ndev, TIC_FTE0, TIC); + + return 0; +} + +static int ravb_dmac_init_rcar(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); + + /* Set AVB RX */ + ravb_write(ndev, + RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); + + /* Set FIFO size */ + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); + + /* Timestamp enable */ + ravb_write(ndev, TCCR_TFEN, TCCR); + + /* Interrupt init: */ + if (info->multi_irqs) { + /* Clear DIL.DPLx */ + ravb_write(ndev, 0, DIL); + /* Set queue specific interrupt */ + ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); + } + /* Frame receive */ + ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0, RIC1); + /* Receive FIFO full error, descriptor empty */ + ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); + /* Frame transmitted, timestamp FIFO updated */ + ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); + + return 0; +} + +/* Device init function for Ethernet AVB */ +static int ravb_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + /* Set CONFIG mode */ + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; + + error = info->dmac_init(ndev); + if (error) + return error; + + /* Setting the control will start the AVB-DMAC process. */ + return ravb_set_opmode(ndev, CCC_OPC_OPERATION); +} + +static void ravb_get_tx_tstamp(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_tstamp_skb *ts_skb, *ts_skb2; + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + struct timespec64 ts; + u16 tag, tfa_tag; + int count; + u32 tfa2; + + count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; + while (count--) { + tfa2 = ravb_read(ndev, TFA2); + tfa_tag = (tfa2 & TFA2_TST) >> 16; + ts.tv_nsec = (u64)ravb_read(ndev, TFA0); + ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) | + ravb_read(ndev, TFA1); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, + list) { + skb = ts_skb->skb; + tag = ts_skb->tag; + list_del(&ts_skb->list); + kfree(ts_skb); + if (tag == tfa_tag) { + skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); + break; + } else { + dev_kfree_skb_any(skb); + } + } + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); + } +} + +static void ravb_rx_csum(struct sk_buff *skb) +{ + u8 *hw_csum; + + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes + * appended to packet data + */ + if (unlikely(skb->len < sizeof(__sum16))) + return; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + skb_trim(skb, skb->len - sizeof(__sum16)); +} + +static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, + struct ravb_rx_desc *desc) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + + skb = priv->rx_skb[RAVB_BE][entry]; + priv->rx_skb[RAVB_BE][entry] = NULL; + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE); + + return skb; +} + +/* Packet receive function for Gigabit Ethernet */ +static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct net_device_stats *stats; + struct ravb_rx_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + u8 desc_status; + int boguscnt; + u16 pkt_len; + u8 die_dt; + int entry; + int limit; + + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + stats = &priv->stats[q]; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + desc = &priv->gbeth_rx_ring[entry]; + while (desc->die_dt != DT_FEMPTY) { + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + desc_status = desc->msc; + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; + + if (--boguscnt < 0) + break; + + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + + if (desc_status & MSC_MC) + stats->multicast++; + + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) { + stats->rx_errors++; + if (desc_status & MSC_CRC) + stats->rx_crc_errors++; + if (desc_status & MSC_RFE) + stats->rx_frame_errors++; + if (desc_status & (MSC_RTLF | MSC_RTSF)) + stats->rx_length_errors++; + if (desc_status & MSC_CEEF) + stats->rx_missed_errors++; + } else { + die_dt = desc->die_dt & 0xF0; + switch (die_dt) { + case DT_FSINGLE: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi[q], skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + break; + case DT_FSTART: + priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(priv->rx_1st_skb, pkt_len); + break; + case DT_FMID: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + break; + case DT_FEND: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + priv->rx_1st_skb->protocol = + eth_type_trans(priv->rx_1st_skb, ndev); + napi_gro_receive(&priv->napi[q], + priv->rx_1st_skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + break; + } + } + + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } + + /* Refill the RX ring buffers. */ + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + + if (!priv->rx_skb[q][entry]) { + skb = netdev_alloc_skb(ndev, info->max_rx_len); + if (!skb) + break; + ravb_set_buffer_align(skb); + dma_addr = dma_map_single(ndev->dev.parent, + skb->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->ds_cc = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + priv->rx_skb[q][entry] = skb; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEMPTY; + } + + *quota -= limit - (++boguscnt); + + return boguscnt <= 0; +} + +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - + priv->cur_rx[q]; + struct net_device_stats *stats = &priv->stats[q]; + struct ravb_ex_rx_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + struct timespec64 ts; + u8 desc_status; + u16 pkt_len; + int limit; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + desc = &priv->rx_ring[q][entry]; + while (desc->die_dt != DT_FEMPTY) { + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + desc_status = desc->msc; + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; + + if (--boguscnt < 0) + break; + + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + + if (desc_status & MSC_MC) + stats->multicast++; + + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | + MSC_CEEF)) { + stats->rx_errors++; + if (desc_status & MSC_CRC) + stats->rx_crc_errors++; + if (desc_status & MSC_RFE) + stats->rx_frame_errors++; + if (desc_status & (MSC_RTLF | MSC_RTSF)) + stats->rx_length_errors++; + if (desc_status & MSC_CEEF) + stats->rx_missed_errors++; + } else { + u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; + + skb = priv->rx_skb[q][entry]; + priv->rx_skb[q][entry] = NULL; + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + RX_BUF_SZ, + DMA_FROM_DEVICE); + get_ts &= (q == RAVB_NC) ? + RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : + ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + if (get_ts) { + struct skb_shared_hwtstamps *shhwtstamps; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << + 32) | le32_to_cpu(desc->ts_sl); + ts.tv_nsec = le32_to_cpu(desc->ts_n); + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); + } + + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + ravb_rx_csum(skb); + napi_gro_receive(&priv->napi[q], skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + } + + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; + desc = &priv->rx_ring[q][entry]; + } + + /* Refill the RX ring buffers. */ + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; + desc = &priv->rx_ring[q][entry]; + desc->ds_cc = cpu_to_le16(RX_BUF_SZ); + + if (!priv->rx_skb[q][entry]) { + skb = netdev_alloc_skb(ndev, info->max_rx_len); + if (!skb) + break; /* Better luck next round. */ + ravb_set_buffer_align(skb); + dma_addr = dma_map_single(ndev->dev.parent, skb->data, + le16_to_cpu(desc->ds_cc), + DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->ds_cc = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + priv->rx_skb[q][entry] = skb; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEMPTY; + } + + *quota -= limit - (++boguscnt); + + return boguscnt <= 0; +} + +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->receive(ndev, quota, q); +} + +static void ravb_rcv_snd_disable(struct net_device *ndev) +{ + /* Disable TX and RX */ + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); +} + +static void ravb_rcv_snd_enable(struct net_device *ndev) +{ + /* Enable TX and RX */ + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); +} + +/* function for waiting dma process finished */ +static int ravb_stop_dma(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + /* Wait for stopping the hardware TX process */ + error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); + + if (error) + return error; + + error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, + 0); + if (error) + return error; + + /* Stop the E-MAC's RX/TX processes. */ + ravb_rcv_snd_disable(ndev); + + /* Wait for stopping the RX DMA process */ + error = ravb_wait(ndev, CSR, CSR_RPO, 0); + if (error) + return error; + + /* Stop AVB-DMAC process */ + return ravb_set_opmode(ndev, CCC_OPC_CONFIG); +} + +/* E-MAC interrupt handler */ +static void ravb_emac_interrupt_unlocked(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ecsr, psr; + + ecsr = ravb_read(ndev, ECSR); + ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ + + if (ecsr & ECSR_MPD) + pm_wakeup_event(&priv->pdev->dev, 0); + if (ecsr & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (ecsr & ECSR_LCHNG) { + /* Link changed */ + if (priv->no_avb_link) + return; + psr = ravb_read(ndev, PSR); + if (priv->avb_link_active_low) + psr ^= PSR_LMON; + if (!(psr & PSR_LMON)) { + /* DIsable RX and TX */ + ravb_rcv_snd_disable(ndev); + } else { + /* Enable RX and TX */ + ravb_rcv_snd_enable(ndev); + } + } +} + +static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + + spin_lock(&priv->lock); + ravb_emac_interrupt_unlocked(ndev); + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + +/* Error interrupt handler */ +static void ravb_error_interrupt(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 eis, ris2; + + eis = ravb_read(ndev, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); + if (eis & EIS_QFS) { + ris2 = ravb_read(ndev, RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), + RIS2); + + /* Receive Descriptor Empty int */ + if (ris2 & RIS2_QFF0) + priv->stats[RAVB_BE].rx_over_errors++; + + /* Receive Descriptor Empty int */ + if (ris2 & RIS2_QFF1) + priv->stats[RAVB_NC].rx_over_errors++; + + /* Receive FIFO Overflow int */ + if (ris2 & RIS2_RFFF) + priv->rx_fifo_errors++; + } +} + +static bool ravb_queue_interrupt(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + u32 ris0 = ravb_read(ndev, RIS0); + u32 ric0 = ravb_read(ndev, RIC0); + u32 tis = ravb_read(ndev, TIS); + u32 tic = ravb_read(ndev, TIC); + + if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { + if (napi_schedule_prep(&priv->napi[q])) { + /* Mask RX and TX interrupts */ + if (!info->irq_en_dis) { + ravb_write(ndev, ric0 & ~BIT(q), RIC0); + ravb_write(ndev, tic & ~BIT(q), TIC); + } else { + ravb_write(ndev, BIT(q), RID0); + ravb_write(ndev, BIT(q), TID); + } + __napi_schedule(&priv->napi[q]); + } else { + netdev_warn(ndev, + "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", + ris0, ric0); + netdev_warn(ndev, + " tx status 0x%08x, tx mask 0x%08x.\n", + tis, tic); + } + return true; + } + return false; +} + +static bool ravb_timestamp_interrupt(struct net_device *ndev) +{ + u32 tis = ravb_read(ndev, TIS); + + if (tis & TIS_TFUF) { + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); + ravb_get_tx_tstamp(ndev); + return true; + } + return false; +} + +static irqreturn_t ravb_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Received and transmitted interrupts */ + if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { + int q; + + /* Timestamp updated */ + if (ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Network control and best effort queue RX/TX */ + if (info->nc_queues) { + for (q = RAVB_NC; q >= RAVB_BE; q--) { + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + } + } else { + if (ravb_queue_interrupt(ndev, RAVB_BE)) + result = IRQ_HANDLED; + } + } + + /* E-MAC status summary */ + if (iss & ISS_MS) { + ravb_emac_interrupt_unlocked(ndev); + result = IRQ_HANDLED; + } + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); + result = IRQ_HANDLED; + } + + spin_unlock(&priv->lock); + return result; +} + +/* Timestamp/Error/gPTP interrupt handler */ +static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Timestamp updated */ + if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); + result = IRQ_HANDLED; + } + + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + + spin_lock(&priv->lock); + + /* Network control/Best effort queue RX/TX */ + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_BE); +} + +static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_NC); +} + +static int ravb_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + bool gptp = info->gptp || info->ccc_gac; + struct ravb_rx_desc *desc; + unsigned long flags; + int q = napi - priv->napi; + int mask = BIT(q); + int quota = budget; + unsigned int entry; + + if (!gptp) { + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); + if (gptp || desc->die_dt != DT_FEMPTY) { + if (ravb_rx(ndev, "a, q)) + goto out; + } + + /* Processing TX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); + ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); + + napi_complete(napi); + + /* Re-enable RX/TX interrupts */ + spin_lock_irqsave(&priv->lock, flags); + if (!info->irq_en_dis) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } + spin_unlock_irqrestore(&priv->lock, flags); + + /* Receive error message handling */ + priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; + if (info->nc_queues) + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; + if (priv->rx_over_errors != ndev->stats.rx_over_errors) + ndev->stats.rx_over_errors = priv->rx_over_errors; + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) + ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; +out: + return budget - quota; +} + +static void ravb_set_duplex_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); +} + +/* PHY state control function */ +static void ravb_adjust_link(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct phy_device *phydev = ndev->phydev; + bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link) + ravb_rcv_snd_disable(ndev); + + if (phydev->link) { + if (info->half_duplex && phydev->duplex != priv->duplex) { + new_state = true; + priv->duplex = phydev->duplex; + ravb_set_duplex_gbeth(ndev); + } + + if (phydev->speed != priv->speed) { + new_state = true; + priv->speed = phydev->speed; + info->set_rate(ndev); + } + if (!priv->link) { + ravb_modify(ndev, ECMR, ECMR_TXF, 0); + new_state = true; + priv->link = phydev->link; + } + } else if (priv->link) { + new_state = true; + priv->link = 0; + priv->speed = 0; + if (info->half_duplex) + priv->duplex = -1; + } + + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link && phydev->link) + ravb_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); +} + +/* PHY init function */ +static int ravb_phy_init(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct phy_device *phydev; + struct device_node *pn; + phy_interface_t iface; + int err; + + priv->link = 0; + priv->speed = 0; + priv->duplex = -1; + + /* Try connecting to PHY */ + pn = of_parse_phandle(np, "phy-handle", 0); + if (!pn) { + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + return err; + } + pn = of_node_get(np); + } + + iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII + : priv->phy_interface; + phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); + of_node_put(pn); + if (!phydev) { + netdev_err(ndev, "failed to connect PHY\n"); + err = -ENOENT; + goto err_deregister_fixed_link; + } + + if (!info->half_duplex) { + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + + /* Half Duplex is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + } + + phy_attached_info(phydev); + + return 0; + +err_deregister_fixed_link: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + + return err; +} + +/* PHY control start function */ +static int ravb_phy_start(struct net_device *ndev) +{ + int error; + + error = ravb_phy_init(ndev); + if (error) + return error; + + phy_start(ndev->phydev); + + return 0; +} + +static u32 ravb_get_msglevel(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void ravb_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ravb_private *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = { + "rx_queue_0_current", + "tx_queue_0_current", + "rx_queue_0_dirty", + "tx_queue_0_dirty", + "rx_queue_0_packets", + "tx_queue_0_packets", + "rx_queue_0_bytes", + "tx_queue_0_bytes", + "rx_queue_0_mcast_packets", + "rx_queue_0_errors", + "rx_queue_0_crc_errors", + "rx_queue_0_frame_errors", + "rx_queue_0_length_errors", + "rx_queue_0_csum_offload_errors", + "rx_queue_0_over_errors", +}; + +static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_queue_0_current", + "tx_queue_0_current", + "rx_queue_0_dirty", + "tx_queue_0_dirty", + "rx_queue_0_packets", + "tx_queue_0_packets", + "rx_queue_0_bytes", + "tx_queue_0_bytes", + "rx_queue_0_mcast_packets", + "rx_queue_0_errors", + "rx_queue_0_crc_errors", + "rx_queue_0_frame_errors", + "rx_queue_0_length_errors", + "rx_queue_0_missed_errors", + "rx_queue_0_over_errors", + + "rx_queue_1_current", + "tx_queue_1_current", + "rx_queue_1_dirty", + "tx_queue_1_dirty", + "rx_queue_1_packets", + "tx_queue_1_packets", + "rx_queue_1_bytes", + "tx_queue_1_bytes", + "rx_queue_1_mcast_packets", + "rx_queue_1_errors", + "rx_queue_1_crc_errors", + "rx_queue_1_frame_errors", + "rx_queue_1_length_errors", + "rx_queue_1_missed_errors", + "rx_queue_1_over_errors", +}; + +static int ravb_get_sset_count(struct net_device *netdev, int sset) +{ + struct ravb_private *priv = netdev_priv(netdev); + const struct ravb_hw_info *info = priv->info; + + switch (sset) { + case ETH_SS_STATS: + return info->stats_len; + default: + return -EOPNOTSUPP; + } +} + +static void ravb_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, u64 *data) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int num_rx_q; + int i = 0; + int q; + + num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; + /* Device-specific stats */ + for (q = RAVB_BE; q < num_rx_q; q++) { + struct net_device_stats *stats = &priv->stats[q]; + + data[i++] = priv->cur_rx[q]; + data[i++] = priv->cur_tx[q]; + data[i++] = priv->dirty_rx[q]; + data[i++] = priv->dirty_tx[q]; + data[i++] = stats->rx_packets; + data[i++] = stats->tx_packets; + data[i++] = stats->rx_bytes; + data[i++] = stats->tx_bytes; + data[i++] = stats->multicast; + data[i++] = stats->rx_errors; + data[i++] = stats->rx_crc_errors; + data[i++] = stats->rx_frame_errors; + data[i++] = stats->rx_length_errors; + data[i++] = stats->rx_missed_errors; + data[i++] = stats->rx_over_errors; + } +} + +static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, info->gstrings_stats, info->gstrings_size); + break; + } +} + +static void ravb_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ring->rx_max_pending = BE_RX_RING_MAX; + ring->tx_max_pending = BE_TX_RING_MAX; + ring->rx_pending = priv->num_rx_ring[RAVB_BE]; + ring->tx_pending = priv->num_tx_ring[RAVB_BE]; +} + +static int ravb_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + if (ring->tx_pending > BE_TX_RING_MAX || + ring->rx_pending > BE_RX_RING_MAX || + ring->tx_pending < BE_TX_RING_MIN || + ring->rx_pending < BE_RX_RING_MIN) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + /* Stop PTP Clock driver */ + if (info->gptp) + ravb_ptp_stop(ndev); + /* Wait for DMA stopping */ + error = ravb_stop_dma(ndev); + if (error) { + netdev_err(ndev, + "cannot set ringparam! Any AVB processes are still running?\n"); + return error; + } + synchronize_irq(ndev->irq); + + /* Free all the skb's in the RX queue and the DMA buffers. */ + ravb_ring_free(ndev, RAVB_BE); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); + } + + /* Set new parameters */ + priv->num_rx_ring[RAVB_BE] = ring->rx_pending; + priv->num_tx_ring[RAVB_BE] = ring->tx_pending; + + if (netif_running(ndev)) { + error = ravb_dmac_init(ndev); + if (error) { + netdev_err(ndev, + "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return error; + } + + ravb_emac_init(ndev); + + /* Initialise PTP Clock driver */ + if (info->gptp) + ravb_ptp_init(ndev, priv->pdev); + + netif_device_attach(ndev); + } + + return 0; +} + +static int ravb_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *hw_info = priv->info; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_ALL); + if (hw_info->gptp || hw_info->ccc_gac) + info->phc_index = ptp_clock_index(priv->ptp.clock); + + return 0; +} + +static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; +} + +static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) + return -EOPNOTSUPP; + + priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); + + return 0; +} + +static const struct ethtool_ops ravb_ethtool_ops = { + .nway_reset = phy_ethtool_nway_reset, + .get_msglevel = ravb_get_msglevel, + .set_msglevel = ravb_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = ravb_get_strings, + .get_ethtool_stats = ravb_get_ethtool_stats, + .get_sset_count = ravb_get_sset_count, + .get_ringparam = ravb_get_ringparam, + .set_ringparam = ravb_set_ringparam, + .get_ts_info = ravb_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_wol = ravb_get_wol, + .set_wol = ravb_set_wol, +}; + +static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, + struct net_device *ndev, struct device *dev, + const char *ch) +{ + char *name; + int error; + + name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!name) + return -ENOMEM; + error = request_irq(irq, handler, 0, name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", name); + + return error; +} + +/* Network device open function for Ethernet AVB */ +static int ravb_open(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + napi_enable(&priv->napi[RAVB_BE]); + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); + + if (!info->multi_irqs) { + error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, + ndev->name, ndev); + if (error) { + netdev_err(ndev, "cannot request IRQ\n"); + goto out_napi_off; + } + } else { + error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, + dev, "ch22:multi"); + if (error) + goto out_napi_off; + error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, + dev, "ch24:emac"); + if (error) + goto out_free_irq; + error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch0:rx_be"); + if (error) + goto out_free_irq_emac; + error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch18:tx_be"); + if (error) + goto out_free_irq_be_rx; + error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch1:rx_nc"); + if (error) + goto out_free_irq_be_tx; + error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch19:tx_nc"); + if (error) + goto out_free_irq_nc_rx; + + if (info->err_mgmt_irqs) { + error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt, + ndev, dev, "err_a"); + if (error) + goto out_free_irq_nc_tx; + error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt, + ndev, dev, "mgmt_a"); + if (error) + goto out_free_irq_erra; + } + } + + /* Device init */ + error = ravb_dmac_init(ndev); + if (error) + goto out_free_irq_mgmta; + ravb_emac_init(ndev); + + /* Initialise PTP Clock driver */ + if (info->gptp) + ravb_ptp_init(ndev, priv->pdev); + + /* PHY control start */ + error = ravb_phy_start(ndev); + if (error) + goto out_ptp_stop; + + netif_tx_start_all_queues(ndev); + + return 0; + +out_ptp_stop: + /* Stop PTP Clock driver */ + if (info->gptp) + ravb_ptp_stop(ndev); + ravb_stop_dma(ndev); +out_free_irq_mgmta: + if (!info->multi_irqs) + goto out_free_irq; + if (info->err_mgmt_irqs) + free_irq(priv->mgmta_irq, ndev); +out_free_irq_erra: + if (info->err_mgmt_irqs) + free_irq(priv->erra_irq, ndev); +out_free_irq_nc_tx: + free_irq(priv->tx_irqs[RAVB_NC], ndev); +out_free_irq_nc_rx: + free_irq(priv->rx_irqs[RAVB_NC], ndev); +out_free_irq_be_tx: + free_irq(priv->tx_irqs[RAVB_BE], ndev); +out_free_irq_be_rx: + free_irq(priv->rx_irqs[RAVB_BE], ndev); +out_free_irq_emac: + free_irq(priv->emac_irq, ndev); +out_free_irq: + free_irq(ndev->irq, ndev); +out_napi_off: + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + return error; +} + +/* Timeout function for Ethernet AVB */ +static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct ravb_private *priv = netdev_priv(ndev); + + netif_err(priv, tx_err, ndev, + "transmit timed out, status %08x, resetting...\n", + ravb_read(ndev, ISS)); + + /* tx_errors count up */ + ndev->stats.tx_errors++; + + schedule_work(&priv->work); +} + +static void ravb_tx_timeout_work(struct work_struct *work) +{ + struct ravb_private *priv = container_of(work, struct ravb_private, + work); + const struct ravb_hw_info *info = priv->info; + struct net_device *ndev = priv->ndev; + int error; + + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + schedule_work(&priv->work); + return; + } + + netif_tx_stop_all_queues(ndev); + + /* Stop PTP Clock driver */ + if (info->gptp) + ravb_ptp_stop(ndev); + + /* Wait for DMA stopping */ + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } + + ravb_ring_free(ndev, RAVB_BE); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); + + /* Device init */ + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + goto out_unlock; + } + ravb_emac_init(ndev); + +out: + /* Initialise PTP Clock driver */ + if (info->gptp) + ravb_ptp_init(ndev, priv->pdev); + + netif_tx_start_all_queues(ndev); + +out_unlock: + rtnl_unlock(); +} + +/* Packet transmit function for Ethernet AVB */ +static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + u16 q = skb_get_queue_mapping(skb); + struct ravb_tstamp_skb *ts_skb; + struct ravb_tx_desc *desc; + unsigned long flags; + dma_addr_t dma_addr; + void *buffer; + u32 entry; + u32 len; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * + num_tx_desc) { + netif_err(priv, tx_queued, ndev, + "still transmitting with the full ring!\n"); + netif_stop_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + goto exit; + + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap; + + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + /* TX timestamp required */ + if (info->gptp || info->ccc_gac) { + if (q == RAVB_NC) { + ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); + if (!ts_skb) { + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } + goto unmap; + } + ts_skb->skb = skb_get(skb); + ts_skb->tag = priv->ts_skb_tag++; + priv->ts_skb_tag &= 0x3ff; + list_add_tail(&ts_skb->list, &priv->ts_skb_list); + + /* TAG and timestamp required flag */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); + } + + skb_tx_timestamp(skb); + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); + + priv->cur_tx[q] += num_tx_desc; + if (priv->cur_tx[q] - priv->dirty_tx[q] > + (priv->num_tx_ring[q] - 1) * num_tx_desc && + !ravb_tx_free(ndev, q, true)) + netif_stop_subqueue(ndev, q); + +exit: + spin_unlock_irqrestore(&priv->lock, flags); + return NETDEV_TX_OK; + +unmap: + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); +drop: + dev_kfree_skb_any(skb); + priv->tx_skb[q][entry / num_tx_desc] = NULL; + goto exit; +} + +static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + /* If skb needs TX timestamp, it is handled in network control queue */ + return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : + RAVB_BE; + +} + +static struct net_device_stats *ravb_get_stats(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct net_device_stats *nstats, *stats0, *stats1; + + nstats = &ndev->stats; + stats0 = &priv->stats[RAVB_BE]; + + if (info->tx_counters) { + nstats->tx_dropped += ravb_read(ndev, TROCR); + ravb_write(ndev, 0, TROCR); /* (write clear) */ + } + + if (info->carrier_counters) { + nstats->collisions += ravb_read(ndev, CXR41); + ravb_write(ndev, 0, CXR41); /* (write clear) */ + nstats->tx_carrier_errors += ravb_read(ndev, CXR42); + ravb_write(ndev, 0, CXR42); /* (write clear) */ + } + + nstats->rx_packets = stats0->rx_packets; + nstats->tx_packets = stats0->tx_packets; + nstats->rx_bytes = stats0->rx_bytes; + nstats->tx_bytes = stats0->tx_bytes; + nstats->multicast = stats0->multicast; + nstats->rx_errors = stats0->rx_errors; + nstats->rx_crc_errors = stats0->rx_crc_errors; + nstats->rx_frame_errors = stats0->rx_frame_errors; + nstats->rx_length_errors = stats0->rx_length_errors; + nstats->rx_missed_errors = stats0->rx_missed_errors; + nstats->rx_over_errors = stats0->rx_over_errors; + if (info->nc_queues) { + stats1 = &priv->stats[RAVB_NC]; + + nstats->rx_packets += stats1->rx_packets; + nstats->tx_packets += stats1->tx_packets; + nstats->rx_bytes += stats1->rx_bytes; + nstats->tx_bytes += stats1->tx_bytes; + nstats->multicast += stats1->multicast; + nstats->rx_errors += stats1->rx_errors; + nstats->rx_crc_errors += stats1->rx_crc_errors; + nstats->rx_frame_errors += stats1->rx_frame_errors; + nstats->rx_length_errors += stats1->rx_length_errors; + nstats->rx_missed_errors += stats1->rx_missed_errors; + nstats->rx_over_errors += stats1->rx_over_errors; + } + + return nstats; +} + +/* Update promiscuous bit */ +static void ravb_set_rx_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ravb_modify(ndev, ECMR, ECMR_PRM, + ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Device close function for Ethernet AVB */ +static int ravb_close(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct ravb_tstamp_skb *ts_skb, *ts_skb2; + + netif_tx_stop_all_queues(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Stop PTP Clock driver */ + if (info->gptp) + ravb_ptp_stop(ndev); + + /* Set the config mode to stop the AVB-DMAC's processes */ + if (ravb_stop_dma(ndev) < 0) + netdev_err(ndev, + "device will be stopped after h/w processes are done.\n"); + + /* Clear the timestamp list */ + if (info->gptp || info->ccc_gac) { + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { + list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); + kfree(ts_skb); + } + } + + /* PHY disconnect */ + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + } + + cancel_work_sync(&priv->work); + + if (info->multi_irqs) { + free_irq(priv->tx_irqs[RAVB_NC], ndev); + free_irq(priv->rx_irqs[RAVB_NC], ndev); + free_irq(priv->tx_irqs[RAVB_BE], ndev); + free_irq(priv->rx_irqs[RAVB_BE], ndev); + free_irq(priv->emac_irq, ndev); + if (info->err_mgmt_irqs) { + free_irq(priv->erra_irq, ndev); + free_irq(priv->mgmta_irq, ndev); + } + } + free_irq(ndev->irq, ndev); + + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + + /* Free all the skb's in the RX queue and the DMA buffers. */ + ravb_ring_free(ndev, RAVB_BE); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); + + return 0; +} + +static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : + HWTSTAMP_TX_OFF; + switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { + case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case RAVB_RXTSTAMP_TYPE_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Control hardware time stamping */ +static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct hwtstamp_config config; + u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; + u32 tstamp_tx_ctrl; + + if (copy_from_user(&config, req->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_tx_ctrl = 0; + break; + case HWTSTAMP_TX_ON: + tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_rx_ctrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; + } + + priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* ioctl to device function */ +static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + switch (cmd) { + case SIOCGHWTSTAMP: + return ravb_hwtstamp_get(ndev, req); + case SIOCSHWTSTAMP: + return ravb_hwtstamp_set(ndev, req); + } + + return phy_mii_ioctl(phydev, req, cmd); +} + +static int ravb_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + synchronize_irq(priv->emac_irq); + ravb_emac_init(ndev); + } + + netdev_update_features(ndev); + + return 0; +} + +static void ravb_set_rx_csum(struct net_device *ndev, bool enable) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX */ + ravb_rcv_snd_disable(ndev); + + /* Modify RX Checksum setting */ + ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); + + /* Enable TX and RX */ + ravb_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int ravb_set_features_gbeth(struct net_device *ndev, + netdev_features_t features) +{ + /* Place holder */ + return 0; +} + +static int ravb_set_features_rcar(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXCSUM) + ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); + + ndev->features = features; + + return 0; +} + +static int ravb_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->set_feature(ndev, features); +} + +static const struct net_device_ops ravb_netdev_ops = { + .ndo_open = ravb_open, + .ndo_stop = ravb_close, + .ndo_start_xmit = ravb_start_xmit, + .ndo_select_queue = ravb_select_queue, + .ndo_get_stats = ravb_get_stats, + .ndo_set_rx_mode = ravb_set_rx_mode, + .ndo_tx_timeout = ravb_tx_timeout, + .ndo_eth_ioctl = ravb_do_ioctl, + .ndo_change_mtu = ravb_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = ravb_set_features, +}; + +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + struct phy_device *phydev; + struct device_node *pn; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + +static const struct ravb_hw_info ravb_gen3_hw_info = { + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .irq_en_dis = 1, + .ccc_gac = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info ravb_gen2_hw_info = { + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, + .aligned_tx = 1, + .gptp = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info ravb_rzv2m_hw_info = { + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, + .multi_irqs = 1, + .err_mgmt_irqs = 1, + .gptp = 1, + .gptp_ref_clk = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info gbeth_hw_info = { + .rx_ring_free = ravb_rx_ring_free_gbeth, + .rx_ring_format = ravb_rx_ring_format_gbeth, + .alloc_rx_desc = ravb_alloc_rx_desc_gbeth, + .receive = ravb_rx_gbeth, + .set_rate = ravb_set_rate_gbeth, + .set_feature = ravb_set_features_gbeth, + .dmac_init = ravb_dmac_init_gbeth, + .emac_init = ravb_emac_init_gbeth, + .gstrings_stats = ravb_gstrings_stats_gbeth, + .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), + .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), + .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), + .tccr_mask = TCCR_TSRQ0, + .rx_max_buf_size = SZ_8K, + .aligned_tx = 1, + .tx_counters = 1, + .carrier_counters = 1, + .half_duplex = 1, +}; + +static const struct of_device_id ravb_match_table[] = { + { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, + { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, + { } +}; +MODULE_DEVICE_TABLE(of, ravb_match_table); + +static int ravb_set_gti(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct device *dev = ndev->dev.parent; + unsigned long rate; + uint64_t inc; + + if (info->gptp_ref_clk) + rate = clk_get_rate(priv->gptp_clk); + else + rate = clk_get_rate(priv->clk); + if (!rate) + return -EINVAL; + + inc = div64_ul(1000000000ULL << 20, rate); + + if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { + dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", + inc, GTI_TIV_MIN, GTI_TIV_MAX); + return -EINVAL; + } + + ravb_write(ndev, inc, GTI); + + return 0; +} + +static int ravb_set_config_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + if (info->gptp) { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else if (info->ccc_gac) { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + } else { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + } + + return error; +} + +/* Set tx and rx clock internal delay modes */ +static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + bool explicit_delay = false; + u32 delay; + + if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 1800, according to DT bindings */ + priv->rxcidm = !!delay; + explicit_delay = true; + } + if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 2000, according to DT bindings */ + priv->txcidm = !!delay; + explicit_delay = true; + } + + if (explicit_delay) + return; + + /* Fall back to legacy rgmii-*id behavior */ + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { + priv->rxcidm = 1; + priv->rgmii_override = 1; + } + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + priv->txcidm = 1; + priv->rgmii_override = 1; + } +} + +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 set = 0; + + if (priv->rxcidm) + set |= APSR_RDM; + if (priv->txcidm) + set |= APSR_TDM; + ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set); +} + +static int ravb_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + const struct ravb_hw_info *info; + struct reset_control *rstc; + struct ravb_private *priv; + struct net_device *ndev; + int error, irq, q; + struct resource *res; + int i; + + if (!np) { + dev_err(&pdev->dev, + "this driver is required to be instantiated from device tree\n"); + return -EINVAL; + } + + rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(rstc), + "failed to get cpg reset\n"); + + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), + NUM_TX_QUEUE, NUM_RX_QUEUE); + if (!ndev) + return -ENOMEM; + + info = of_device_get_match_data(&pdev->dev); + + ndev->features = info->net_features; + ndev->hw_features = info->net_hw_features; + + error = reset_control_deassert(rstc); + if (error) + goto out_free_netdev; + + pm_runtime_enable(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) + goto out_rpm_disable; + + if (info->multi_irqs) { + if (info->err_mgmt_irqs) + irq = platform_get_irq_byname(pdev, "dia"); + else + irq = platform_get_irq_byname(pdev, "ch22"); + } else { + irq = platform_get_irq(pdev, 0); + } + if (irq < 0) { + error = irq; + goto out_release; + } + ndev->irq = irq; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv = netdev_priv(ndev); + priv->info = info; + priv->rstc = rstc; + priv->ndev = ndev; + priv->pdev = pdev; + priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; + priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; + if (info->nc_queues) { + priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; + priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + } + + priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->addr)) { + error = PTR_ERR(priv->addr); + goto out_release; + } + + /* The Ether-specific entries in the device structure. */ + ndev->base_addr = res->start; + + spin_lock_init(&priv->lock); + INIT_WORK(&priv->work, ravb_tx_timeout_work); + + error = of_get_phy_mode(np, &priv->phy_interface); + if (error && error != -ENODEV) + goto out_release; + + priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); + priv->avb_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + if (info->multi_irqs) { + if (info->err_mgmt_irqs) + irq = platform_get_irq_byname(pdev, "line3"); + else + irq = platform_get_irq_byname(pdev, "ch24"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->emac_irq = irq; + for (i = 0; i < NUM_RX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->rx_irqs[i] = irq; + } + for (i = 0; i < NUM_TX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->tx_irqs[i] = irq; + } + + if (info->err_mgmt_irqs) { + irq = platform_get_irq_byname(pdev, "err_a"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->erra_irq = irq; + + irq = platform_get_irq_byname(pdev, "mgmt_a"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->mgmta_irq = irq; + } + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + error = PTR_ERR(priv->clk); + goto out_release; + } + + priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); + if (IS_ERR(priv->refclk)) { + error = PTR_ERR(priv->refclk); + goto out_release; + } + clk_prepare_enable(priv->refclk); + + if (info->gptp_ref_clk) { + priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); + if (IS_ERR(priv->gptp_clk)) { + error = PTR_ERR(priv->gptp_clk); + goto out_disable_refclk; + } + clk_prepare_enable(priv->gptp_clk); + } + + ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + + /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer + * Use two descriptor to handle such situation. First descriptor to + * handle aligned data buffer and second descriptor to handle the + * overflow data because of alignment. + */ + priv->num_tx_desc = info->aligned_tx ? 2 : 1; + + /* Set function */ + ndev->netdev_ops = &ravb_netdev_ops; + ndev->ethtool_ops = &ravb_ethtool_ops; + + /* Set AVB config mode */ + error = ravb_set_config_mode(ndev); + if (error) + goto out_disable_gptp_clk; + + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + error = ravb_set_gti(ndev); + if (error) + goto out_disable_gptp_clk; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } + + if (info->internal_delay) { + ravb_parse_delay_mode(np, ndev); + ravb_set_delay_mode(ndev); + } + + /* Allocate descriptor base address table */ + priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; + priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, + &priv->desc_bat_dma, GFP_KERNEL); + if (!priv->desc_bat) { + dev_err(&pdev->dev, + "Cannot allocate desc base address table (size %d bytes)\n", + priv->desc_bat_size); + error = -ENOMEM; + goto out_disable_gptp_clk; + } + for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) + priv->desc_bat[q].die_dt = DT_EOS; + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + /* Initialise HW timestamp list */ + INIT_LIST_HEAD(&priv->ts_skb_list); + + /* Initialise PTP Clock driver */ + if (info->ccc_gac) + ravb_ptp_init(ndev, pdev); + + /* Debug message level */ + priv->msg_enable = RAVB_DEF_MSG_ENABLE; + + /* Read and set MAC address */ + ravb_read_mac_address(np, ndev); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one\n"); + eth_hw_addr_random(ndev); + } + + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + dev_err(&pdev->dev, "failed to initialize MDIO\n"); + goto out_dma_free; + } + + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); + if (info->nc_queues) + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); + + /* Network device register */ + error = register_netdev(ndev); + if (error) + goto out_napi_del; + + device_set_wakeup_capable(&pdev->dev, 1); + + /* Print device information */ + netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + platform_set_drvdata(pdev, ndev); + + return 0; + +out_napi_del: + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + + netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); +out_dma_free: + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + + /* Stop PTP Clock driver */ + if (info->ccc_gac) + ravb_ptp_stop(ndev); +out_disable_gptp_clk: + clk_disable_unprepare(priv->gptp_clk); +out_disable_refclk: + clk_disable_unprepare(priv->refclk); +out_release: + pm_runtime_put(&pdev->dev); +out_rpm_disable: + pm_runtime_disable(&pdev->dev); + reset_control_assert(rstc); +out_free_netdev: + free_netdev(ndev); + return error; +} + +static int ravb_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + unregister_netdev(ndev); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + + ravb_mdio_release(priv); + + /* Stop PTP Clock driver */ + if (info->ccc_gac) + ravb_ptp_stop(ndev); + + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + + ravb_set_opmode(ndev, CCC_OPC_RESET); + + clk_disable_unprepare(priv->gptp_clk); + clk_disable_unprepare(priv->refclk); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int ravb_wol_setup(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Only allow ECI interrupts */ + synchronize_irq(priv->emac_irq); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); + + /* Enable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + return enable_irq_wake(priv->emac_irq); +} + +static int ravb_wol_restore(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); + napi_enable(&priv->napi[RAVB_BE]); + + /* Disable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, 0); + + ravb_close(ndev); + + return disable_irq_wake(priv->emac_irq); +} + +static int __maybe_unused ravb_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (priv->wol_enabled) + ret = ravb_wol_setup(ndev); + else + ret = ravb_close(ndev); + + if (priv->info->ccc_gac) + ravb_ptp_stop(ndev); + + return ret; +} + +static int __maybe_unused ravb_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int ret = 0; + + /* If WoL is enabled set reset mode to rearm the WoL logic */ + if (priv->wol_enabled) { + ret = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (ret) + return ret; + } + + /* All register have been reset to default values. + * Restore all registers which where setup at probe time and + * reopen device if it was running before system suspended. + */ + + /* Set AVB config mode */ + ret = ravb_set_config_mode(ndev); + if (ret) + return ret; + + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } + + if (info->internal_delay) + ravb_set_delay_mode(ndev); + + /* Restore descriptor base address table */ + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + if (priv->info->ccc_gac) + ravb_ptp_init(ndev, priv->pdev); + + if (netif_running(ndev)) { + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } + ret = ravb_open(ndev); + if (ret < 0) + return ret; + ravb_set_rx_mode(ndev); + netif_device_attach(ndev); + } + + return ret; +} + +static int __maybe_unused ravb_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops ravb_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) + SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) +}; + +static struct platform_driver ravb_driver = { + .probe = ravb_probe, + .remove = ravb_remove, + .driver = { + .name = "ravb", + .pm = &ravb_dev_pm_ops, + .of_match_table = ravb_match_table, + }, +}; + +module_platform_driver(ravb_driver); + +MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai"); +MODULE_DESCRIPTION("Renesas Ethernet AVB driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c new file mode 100644 index 0000000000..6e4ef7af27 --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* PTP 1588 clock using the Renesas Ethernet AVB + * + * Copyright (C) 2013-2015 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. + */ + +#include "ravb.h" + +static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request) +{ + struct net_device *ndev = priv->ndev; + int error; + + error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); + if (error) + return error; + + ravb_modify(ndev, GCCR, request, request); + return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); +} + +/* Caller must hold the lock */ +static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts) +{ + struct net_device *ndev = priv->ndev; + int error; + + error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE); + if (error) + return error; + + ts->tv_nsec = ravb_read(ndev, GCT0); + ts->tv_sec = ravb_read(ndev, GCT1) | + ((s64)ravb_read(ndev, GCT2) << 32); + + return 0; +} + +/* Caller must hold the lock */ +static int ravb_ptp_time_write(struct ravb_private *priv, + const struct timespec64 *ts) +{ + struct net_device *ndev = priv->ndev; + int error; + u32 gccr; + + error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET); + if (error) + return error; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LTO) + return -EBUSY; + ravb_write(ndev, ts->tv_nsec, GTO0); + ravb_write(ndev, ts->tv_sec, GTO1); + ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2); + ravb_write(ndev, gccr | GCCR_LTO, GCCR); + + return 0; +} + +/* Caller must hold the lock */ +static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns) +{ + struct net_device *ndev = priv->ndev; + /* When the comparison value (GPTC.PTCV) is in range of + * [x-1 to x+1] (x is the configured increment value in + * GTI.TIV), it may happen that a comparison match is + * not detected when the timer wraps around. + */ + u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1; + u32 gccr; + + if (ns < gti_ns_plus_1) + ns = gti_ns_plus_1; + else if (ns > 0 - gti_ns_plus_1) + ns = 0 - gti_ns_plus_1; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LPTC) + return -EBUSY; + ravb_write(ndev, ns, GPTC); + ravb_write(ndev, gccr | GCCR_LPTC, GCCR); + + return 0; +} + +/* PTP clock operations */ +static int ravb_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct net_device *ndev = priv->ndev; + unsigned long flags; + u32 addend; + u32 gccr; + + addend = (u32)adjust_by_scaled_ppm(priv->ptp.default_addend, + scaled_ppm); + + spin_lock_irqsave(&priv->lock, flags); + + priv->ptp.current_addend = addend; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LTI) { + spin_unlock_irqrestore(&priv->lock, flags); + return -EBUSY; + } + ravb_write(ndev, addend & GTI_TIV, GTI); + ravb_write(ndev, gccr | GCCR_LTI, GCCR); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct timespec64 ts; + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_read(priv, &ts); + if (!error) { + u64 now = ktime_to_ns(timespec64_to_ktime(ts)); + + ts = ns_to_timespec64(now + delta); + error = ravb_ptp_time_write(priv, &ts); + } + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_read(priv, ts); + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_write(priv, ts); + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_extts_request *req, int on) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + const struct ravb_hw_info *info = priv->info; + struct net_device *ndev = priv->ndev; + unsigned long flags; + + /* Reject requests with unsupported flags */ + if (req->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + if (req->index) + return -EINVAL; + + if (priv->ptp.extts[req->index] == on) + return 0; + priv->ptp.extts[req->index] = on; + + spin_lock_irqsave(&priv->lock, flags); + if (!info->irq_en_dis) + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + else if (on) + ravb_write(ndev, GIE_PTCS, GIE); + else + ravb_write(ndev, GID_PTCD, GID); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int ravb_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_perout_request *req, int on) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + const struct ravb_hw_info *info = priv->info; + struct net_device *ndev = priv->ndev; + struct ravb_ptp_perout *perout; + unsigned long flags; + int error = 0; + + /* Reject requests with unsupported flags */ + if (req->flags) + return -EOPNOTSUPP; + + if (req->index) + return -EINVAL; + + if (on) { + u64 start_ns; + u64 period_ns; + + start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec; + period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec; + + if (start_ns > U32_MAX) { + netdev_warn(ndev, + "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n"); + return -ERANGE; + } + + if (period_ns > U32_MAX) { + netdev_warn(ndev, + "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n"); + return -ERANGE; + } + + spin_lock_irqsave(&priv->lock, flags); + + perout = &priv->ptp.perout[req->index]; + perout->target = (u32)start_ns; + perout->period = (u32)period_ns; + error = ravb_ptp_update_compare(priv, (u32)start_ns); + if (!error) { + /* Unmask interrupt */ + if (!info->irq_en_dis) + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + else + ravb_write(ndev, GIE_PTMS0, GIE); + } + } else { + spin_lock_irqsave(&priv->lock, flags); + + perout = &priv->ptp.perout[req->index]; + perout->period = 0; + + /* Mask interrupt */ + if (!info->irq_en_dis) + ravb_modify(ndev, GIC, GIC_PTME, 0); + else + ravb_write(ndev, GID_PTMD0, GID); + } + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *req, int on) +{ + switch (req->type) { + case PTP_CLK_REQ_EXTTS: + return ravb_ptp_extts(ptp, &req->extts, on); + case PTP_CLK_REQ_PEROUT: + return ravb_ptp_perout(ptp, &req->perout, on); + default: + return -EOPNOTSUPP; + } +} + +static const struct ptp_clock_info ravb_ptp_info = { + .owner = THIS_MODULE, + .name = "ravb clock", + .max_adj = 50000000, + .n_ext_ts = N_EXT_TS, + .n_per_out = N_PER_OUT, + .adjfine = ravb_ptp_adjfine, + .adjtime = ravb_ptp_adjtime, + .gettime64 = ravb_ptp_gettime64, + .settime64 = ravb_ptp_settime64, + .enable = ravb_ptp_enable, +}; + +/* Caller must hold the lock */ +void ravb_ptp_interrupt(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 gis = ravb_read(ndev, GIS); + + gis &= ravb_read(ndev, GIC); + if (gis & GIS_PTCF) { + struct ptp_clock_event event; + + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ravb_read(ndev, GCPT); + ptp_clock_event(priv->ptp.clock, &event); + } + if (gis & GIS_PTMF) { + struct ravb_ptp_perout *perout = priv->ptp.perout; + + if (perout->period) { + perout->target += perout->period; + ravb_ptp_update_compare(priv, perout->target); + } + } + + ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); +} + +void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + priv->ptp.info = ravb_ptp_info; + + priv->ptp.default_addend = ravb_read(ndev, GTI); + priv->ptp.current_addend = priv->ptp.default_addend; + + spin_lock_irqsave(&priv->lock, flags); + ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); + ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); + spin_unlock_irqrestore(&priv->lock, flags); + + priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev); +} + +void ravb_ptp_stop(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ravb_write(ndev, 0, GIC); + ravb_write(ndev, 0, GIS); + + ptp_clock_unregister(priv->ptp.clock); +} diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c new file mode 100644 index 0000000000..c007e33c47 --- /dev/null +++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Renesas R-Car Gen4 gPTP device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "rcar_gen4_ptp.h" +#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info) + +static const struct rcar_gen4_ptp_reg_offset s4_offs = { + .enable = PTPTMEC, + .disable = PTPTMDC, + .increment = PTPTIVC0, + .config_t0 = PTPTOVC00, + .config_t1 = PTPTOVC10, + .config_t2 = PTPTOVC20, + .monitor_t0 = PTPGPTPTM00, + .monitor_t1 = PTPGPTPTM10, + .monitor_t2 = PTPGPTPTM20, +}; + +static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + bool neg_adj = scaled_ppm < 0 ? true : false; + s64 addend = ptp_priv->default_addend; + s64 diff; + + if (neg_adj) + scaled_ppm = -scaled_ppm; + diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC); + addend = neg_adj ? addend - diff : addend + diff; + + iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment); + + return 0; +} + +/* Caller must hold the lock */ +static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + + ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0); + ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) | + ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32); +} + +static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + unsigned long flags; + + spin_lock_irqsave(&ptp_priv->lock, flags); + _rcar_gen4_ptp_gettime(ptp, ts); + spin_unlock_irqrestore(&ptp_priv->lock, flags); + + return 0; +} + +/* Caller must hold the lock */ +static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + + iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable); + iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2); + iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1); + iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0); + iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable); + iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2); + iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1); + iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0); +} + +static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + unsigned long flags; + + spin_lock_irqsave(&ptp_priv->lock, flags); + _rcar_gen4_ptp_settime(ptp, ts); + spin_unlock_irqrestore(&ptp_priv->lock, flags); + + return 0; +} + +static int rcar_gen4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); + struct timespec64 ts; + unsigned long flags; + s64 now; + + spin_lock_irqsave(&ptp_priv->lock, flags); + _rcar_gen4_ptp_gettime(ptp, &ts); + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + _rcar_gen4_ptp_settime(ptp, &ts); + spin_unlock_irqrestore(&ptp_priv->lock, flags); + + return 0; +} + +static int rcar_gen4_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info rcar_gen4_ptp_info = { + .owner = THIS_MODULE, + .name = "rcar_gen4_ptp", + .max_adj = 50000000, + .adjfine = rcar_gen4_ptp_adjfine, + .adjtime = rcar_gen4_ptp_adjtime, + .gettime64 = rcar_gen4_ptp_gettime, + .settime64 = rcar_gen4_ptp_settime, + .enable = rcar_gen4_ptp_enable, +}; + +static void rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv, + enum rcar_gen4_ptp_reg_layout layout) +{ + WARN_ON(layout != RCAR_GEN4_PTP_REG_LAYOUT_S4); + + ptp_priv->offs = &s4_offs; +} + +int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, + enum rcar_gen4_ptp_reg_layout layout, u32 clock) +{ + if (ptp_priv->initialized) + return 0; + + spin_lock_init(&ptp_priv->lock); + + rcar_gen4_ptp_set_offs(ptp_priv, layout); + + ptp_priv->default_addend = clock; + iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment); + ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL); + if (IS_ERR(ptp_priv->clock)) + return PTR_ERR(ptp_priv->clock); + + iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable); + ptp_priv->initialized = true; + + return 0; +} + +int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv) +{ + iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable); + + return ptp_clock_unregister(ptp_priv->clock); +} + +struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev) +{ + struct rcar_gen4_ptp_private *ptp; + + ptp = devm_kzalloc(&pdev->dev, sizeof(*ptp), GFP_KERNEL); + if (!ptp) + return NULL; + + ptp->info = rcar_gen4_ptp_info; + + return ptp; +} diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h new file mode 100644 index 0000000000..b1bbea8d3a --- /dev/null +++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Renesas R-Car Gen4 gPTP device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#ifndef __RCAR_GEN4_PTP_H__ +#define __RCAR_GEN4_PTP_H__ + +#include + +#define PTPTIVC_INIT 0x19000000 /* 320MHz */ +#define RCAR_GEN4_PTP_CLOCK_S4 PTPTIVC_INIT +#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000 + +/* for rcar_gen4_ptp_init */ +enum rcar_gen4_ptp_reg_layout { + RCAR_GEN4_PTP_REG_LAYOUT_S4 +}; + +/* driver's definitions */ +#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0) +#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1) +#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2)) +#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL + +#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0) + +#define PTPRO 0 + +enum rcar_gen4_ptp_reg_s4 { + PTPTMEC = PTPRO + 0x0010, + PTPTMDC = PTPRO + 0x0014, + PTPTIVC0 = PTPRO + 0x0020, + PTPTOVC00 = PTPRO + 0x0030, + PTPTOVC10 = PTPRO + 0x0034, + PTPTOVC20 = PTPRO + 0x0038, + PTPGPTPTM00 = PTPRO + 0x0050, + PTPGPTPTM10 = PTPRO + 0x0054, + PTPGPTPTM20 = PTPRO + 0x0058, +}; + +struct rcar_gen4_ptp_reg_offset { + u16 enable; + u16 disable; + u16 increment; + u16 config_t0; + u16 config_t1; + u16 config_t2; + u16 monitor_t0; + u16 monitor_t1; + u16 monitor_t2; +}; + +struct rcar_gen4_ptp_private { + void __iomem *addr; + struct ptp_clock *clock; + struct ptp_clock_info info; + const struct rcar_gen4_ptp_reg_offset *offs; + spinlock_t lock; /* For multiple registers access */ + u32 tstamp_tx_ctrl; + u32 tstamp_rx_ctrl; + s64 default_addend; + bool initialized; +}; + +int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, + enum rcar_gen4_ptp_reg_layout layout, u32 clock); +int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv); +struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev); + +#endif /* #ifndef __RCAR_GEN4_PTP_H__ */ diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c new file mode 100644 index 0000000000..ae9d8722b7 --- /dev/null +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -0,0 +1,2013 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Renesas Ethernet Switch device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rswitch.h" + +static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) +{ + u32 val; + + return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, + 1, RSWITCH_TIMEOUT_US); +} + +static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) +{ + iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); +} + +/* Common Agent block (COMA) */ +static void rswitch_reset(struct rswitch_private *priv) +{ + iowrite32(RRC_RR, priv->addr + RRC); + iowrite32(RRC_RR_CLR, priv->addr + RRC); +} + +static void rswitch_clock_enable(struct rswitch_private *priv) +{ + iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); +} + +static void rswitch_clock_disable(struct rswitch_private *priv) +{ + iowrite32(RCDC_RCD, priv->addr + RCDC); +} + +static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port) +{ + u32 val = ioread32(coma_addr + RCEC); + + if (val & RCEC_RCE) + return (val & BIT(port)) ? true : false; + else + return false; +} + +static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable) +{ + u32 val; + + if (enable) { + val = ioread32(coma_addr + RCEC); + iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); + } else { + val = ioread32(coma_addr + RCDC); + iowrite32(val | BIT(port), coma_addr + RCDC); + } +} + +static int rswitch_bpool_config(struct rswitch_private *priv) +{ + u32 val; + + val = ioread32(priv->addr + CABPIRM); + if (val & CABPIRM_BPR) + return 0; + + iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); + + return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); +} + +static void rswitch_coma_init(struct rswitch_private *priv) +{ + iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); +} + +/* R-Switch-2 block (TOP) */ +static void rswitch_top_init(struct rswitch_private *priv) +{ + int i; + + for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) + iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); +} + +/* Forwarding engine block (MFWD) */ +static void rswitch_fwd_init(struct rswitch_private *priv) +{ + int i; + + /* For ETHA */ + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); + iowrite32(0, priv->addr + FWPBFC(i)); + } + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + iowrite32(priv->rdev[i]->rx_queue->index, + priv->addr + FWPBFCSDC(GWCA_INDEX, i)); + iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); + } + + /* For GWCA */ + iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); + iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); + iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); + iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); +} + +/* Gateway CPU agent block (GWCA) */ +static int rswitch_gwca_change_mode(struct rswitch_private *priv, + enum rswitch_gwca_mode mode) +{ + int ret; + + if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) + rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); + + iowrite32(mode, priv->addr + GWMC); + + ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); + + if (mode == GWMC_OPC_DISABLE) + rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); + + return ret; +} + +static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) +{ + iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); + + return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); +} + +static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) +{ + iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); + + return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); +} + +static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) +{ + u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; + int i; + + for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { + if (dis[i] & mask[i]) + return true; + } + + return false; +} + +static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) +{ + int i; + + for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { + dis[i] = ioread32(priv->addr + GWDIS(i)); + dis[i] &= ioread32(priv->addr + GWDIE(i)); + } +} + +static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable) +{ + u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); + + iowrite32(BIT(index % 32), priv->addr + offs); +} + +static void rswitch_ack_data_irq(struct rswitch_private *priv, int index) +{ + u32 offs = GWDIS(index / 32); + + iowrite32(BIT(index % 32), priv->addr + offs); +} + +static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num) +{ + int index = cur ? gq->cur : gq->dirty; + + if (index + num >= gq->ring_size) + index = (index + num) % gq->ring_size; + else + index += num; + + return index; +} + +static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) +{ + if (gq->cur >= gq->dirty) + return gq->cur - gq->dirty; + else + return gq->ring_size - gq->dirty + gq->cur; +} + +static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) +{ + struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; + + if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) + return true; + + return false; +} + +static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, + int start_index, int num) +{ + int i, index; + + for (i = 0; i < num; i++) { + index = (i + start_index) % gq->ring_size; + if (gq->skbs[index]) + continue; + gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, + PKT_BUF_SZ + RSWITCH_ALIGN - 1); + if (!gq->skbs[index]) + goto err; + } + + return 0; + +err: + for (i--; i >= 0; i--) { + index = (i + start_index) % gq->ring_size; + dev_kfree_skb(gq->skbs[index]); + gq->skbs[index] = NULL; + } + + return -ENOMEM; +} + +static void rswitch_gwca_queue_free(struct net_device *ndev, + struct rswitch_gwca_queue *gq) +{ + int i; + + if (!gq->dir_tx) { + dma_free_coherent(ndev->dev.parent, + sizeof(struct rswitch_ext_ts_desc) * + (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); + gq->rx_ring = NULL; + + for (i = 0; i < gq->ring_size; i++) + dev_kfree_skb(gq->skbs[i]); + } else { + dma_free_coherent(ndev->dev.parent, + sizeof(struct rswitch_ext_desc) * + (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); + gq->tx_ring = NULL; + } + + kfree(gq->skbs); + gq->skbs = NULL; +} + +static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + + dma_free_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), + gq->ts_ring, gq->ring_dma); + gq->ts_ring = NULL; +} + +static int rswitch_gwca_queue_alloc(struct net_device *ndev, + struct rswitch_private *priv, + struct rswitch_gwca_queue *gq, + bool dir_tx, int ring_size) +{ + int i, bit; + + gq->dir_tx = dir_tx; + gq->ring_size = ring_size; + gq->ndev = ndev; + + gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); + if (!gq->skbs) + return -ENOMEM; + + if (!dir_tx) { + rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); + + gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rswitch_ext_ts_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + } else { + gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rswitch_ext_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + } + + if (!gq->rx_ring && !gq->tx_ring) + goto out; + + i = gq->index / 32; + bit = BIT(gq->index % 32); + if (dir_tx) + priv->gwca.tx_irq_bits[i] |= bit; + else + priv->gwca.rx_irq_bits[i] |= bit; + + return 0; + +out: + rswitch_gwca_queue_free(ndev, gq); + + return -ENOMEM; +} + +static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) +{ + desc->dptrl = cpu_to_le32(lower_32_bits(addr)); + desc->dptrh = upper_32_bits(addr) & 0xff; +} + +static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) +{ + return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; +} + +static int rswitch_gwca_queue_format(struct net_device *ndev, + struct rswitch_private *priv, + struct rswitch_gwca_queue *gq) +{ + int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; + struct rswitch_ext_desc *desc; + struct rswitch_desc *linkfix; + dma_addr_t dma_addr; + int i; + + memset(gq->tx_ring, 0, ring_size); + for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { + if (!gq->dir_tx) { + dma_addr = dma_map_single(ndev->dev.parent, + gq->skbs[i]->data, PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto err; + + desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); + rswitch_desc_set_dptr(&desc->desc, dma_addr); + desc->desc.die_dt = DT_FEMPTY | DIE; + } else { + desc->desc.die_dt = DT_EEMPTY | DIE; + } + } + rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); + desc->desc.die_dt = DT_LINKFIX; + + linkfix = &priv->gwca.linkfix_table[gq->index]; + linkfix->die_dt = DT_LINKFIX; + rswitch_desc_set_dptr(linkfix, gq->ring_dma); + + iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, + priv->addr + GWDCC_OFFS(gq->index)); + + return 0; + +err: + if (!gq->dir_tx) { + for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { + dma_addr = rswitch_desc_get_dptr(&desc->desc); + dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, + DMA_FROM_DEVICE); + } + } + + return -ENOMEM; +} + +static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, + int start_index, int num) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_ts_desc *desc; + int i, index; + + for (i = 0; i < num; i++) { + index = (i + start_index) % gq->ring_size; + desc = &gq->ts_ring[index]; + desc->desc.die_dt = DT_FEMPTY_ND | DIE; + } +} + +static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, + struct rswitch_gwca_queue *gq, + int start_index, int num) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_ext_ts_desc *desc; + dma_addr_t dma_addr; + int i, index; + + for (i = 0; i < num; i++) { + index = (i + start_index) % gq->ring_size; + desc = &gq->rx_ring[index]; + if (!gq->dir_tx) { + dma_addr = dma_map_single(ndev->dev.parent, + gq->skbs[index]->data, PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto err; + + desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); + rswitch_desc_set_dptr(&desc->desc, dma_addr); + dma_wmb(); + desc->desc.die_dt = DT_FEMPTY | DIE; + desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); + } else { + desc->desc.die_dt = DT_EEMPTY | DIE; + } + } + + return 0; + +err: + if (!gq->dir_tx) { + for (i--; i >= 0; i--) { + index = (i + start_index) % gq->ring_size; + desc = &gq->rx_ring[index]; + dma_addr = rswitch_desc_get_dptr(&desc->desc); + dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, + DMA_FROM_DEVICE); + } + } + + return -ENOMEM; +} + +static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, + struct rswitch_private *priv, + struct rswitch_gwca_queue *gq) +{ + int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; + struct rswitch_ext_ts_desc *desc; + struct rswitch_desc *linkfix; + int err; + + memset(gq->rx_ring, 0, ring_size); + err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); + if (err < 0) + return err; + + desc = &gq->rx_ring[gq->ring_size]; /* Last */ + rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); + desc->desc.die_dt = DT_LINKFIX; + + linkfix = &priv->gwca.linkfix_table[gq->index]; + linkfix->die_dt = DT_LINKFIX; + rswitch_desc_set_dptr(linkfix, gq->ring_dma); + + iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | + GWDCC_ETS | GWDCC_EDE, + priv->addr + GWDCC_OFFS(gq->index)); + + return 0; +} + +static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) +{ + int i, num_queues = priv->gwca.num_queues; + struct rswitch_gwca *gwca = &priv->gwca; + struct device *dev = &priv->pdev->dev; + + gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; + gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, + &gwca->linkfix_table_dma, GFP_KERNEL); + if (!gwca->linkfix_table) + return -ENOMEM; + for (i = 0; i < num_queues; i++) + gwca->linkfix_table[i].die_dt = DT_EOS; + + return 0; +} + +static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) +{ + struct rswitch_gwca *gwca = &priv->gwca; + + if (gwca->linkfix_table) + dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, + gwca->linkfix_table, gwca->linkfix_table_dma); + gwca->linkfix_table = NULL; +} + +static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_ts_desc *desc; + + gq->ring_size = TS_RING_SIZE; + gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + + if (!gq->ts_ring) + return -ENOMEM; + + rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); + desc = &gq->ts_ring[gq->ring_size]; + desc->desc.die_dt = DT_LINKFIX; + rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); + INIT_LIST_HEAD(&priv->gwca.ts_info_list); + + return 0; +} + +static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq; + int index; + + index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); + if (index >= priv->gwca.num_queues) + return NULL; + set_bit(index, priv->gwca.used); + gq = &priv->gwca.queues[index]; + memset(gq, 0, sizeof(*gq)); + gq->index = index; + + return gq; +} + +static void rswitch_gwca_put(struct rswitch_private *priv, + struct rswitch_gwca_queue *gq) +{ + clear_bit(gq->index, priv->gwca.used); +} + +static int rswitch_txdmac_alloc(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_private *priv = rdev->priv; + int err; + + rdev->tx_queue = rswitch_gwca_get(priv); + if (!rdev->tx_queue) + return -EBUSY; + + err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); + if (err < 0) { + rswitch_gwca_put(priv, rdev->tx_queue); + return err; + } + + return 0; +} + +static void rswitch_txdmac_free(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + + rswitch_gwca_queue_free(ndev, rdev->tx_queue); + rswitch_gwca_put(rdev->priv, rdev->tx_queue); +} + +static int rswitch_txdmac_init(struct rswitch_private *priv, int index) +{ + struct rswitch_device *rdev = priv->rdev[index]; + + return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); +} + +static int rswitch_rxdmac_alloc(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_private *priv = rdev->priv; + int err; + + rdev->rx_queue = rswitch_gwca_get(priv); + if (!rdev->rx_queue) + return -EBUSY; + + err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); + if (err < 0) { + rswitch_gwca_put(priv, rdev->rx_queue); + return err; + } + + return 0; +} + +static void rswitch_rxdmac_free(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + + rswitch_gwca_queue_free(ndev, rdev->rx_queue); + rswitch_gwca_put(rdev->priv, rdev->rx_queue); +} + +static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) +{ + struct rswitch_device *rdev = priv->rdev[index]; + struct net_device *ndev = rdev->ndev; + + return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); +} + +static int rswitch_gwca_hw_init(struct rswitch_private *priv) +{ + int i, err; + + err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); + if (err < 0) + return err; + err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); + if (err < 0) + return err; + + err = rswitch_gwca_mcast_table_reset(priv); + if (err < 0) + return err; + err = rswitch_gwca_axi_ram_reset(priv); + if (err < 0) + return err; + + iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); + iowrite32(0, priv->addr + GWTTFC); + iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); + iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); + iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); + iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); + iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); + + iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + err = rswitch_rxdmac_init(priv, i); + if (err < 0) + return err; + err = rswitch_txdmac_init(priv, i); + if (err < 0) + return err; + } + + err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); + if (err < 0) + return err; + return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); +} + +static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) +{ + int err; + + err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); + if (err < 0) + return err; + err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); + if (err < 0) + return err; + + return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); +} + +static int rswitch_gwca_halt(struct rswitch_private *priv) +{ + int err; + + priv->gwca_halt = true; + err = rswitch_gwca_hw_deinit(priv); + dev_err(&priv->pdev->dev, "halted (%d)\n", err); + + return err; +} + +static bool rswitch_rx(struct net_device *ndev, int *quota) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_gwca_queue *gq = rdev->rx_queue; + struct rswitch_ext_ts_desc *desc; + int limit, boguscnt, num, ret; + struct sk_buff *skb; + dma_addr_t dma_addr; + u16 pkt_len; + u32 get_ts; + + if (*quota <= 0) + return true; + + boguscnt = min_t(int, gq->ring_size, *quota); + limit = boguscnt; + + desc = &gq->rx_ring[gq->cur]; + while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { + dma_rmb(); + pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; + skb = gq->skbs[gq->cur]; + gq->skbs[gq->cur] = NULL; + dma_addr = rswitch_desc_get_dptr(&desc->desc); + dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); + get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + if (get_ts) { + struct skb_shared_hwtstamps *shhwtstamps; + struct timespec64 ts; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + ts.tv_sec = __le32_to_cpu(desc->ts_sec); + ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); + } + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&rdev->napi, skb); + rdev->ndev->stats.rx_packets++; + rdev->ndev->stats.rx_bytes += pkt_len; + + gq->cur = rswitch_next_queue_index(gq, true, 1); + desc = &gq->rx_ring[gq->cur]; + + if (--boguscnt <= 0) + break; + } + + num = rswitch_get_num_cur_queues(gq); + ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); + if (ret < 0) + goto err; + ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); + if (ret < 0) + goto err; + gq->dirty = rswitch_next_queue_index(gq, false, num); + + *quota -= limit - boguscnt; + + return boguscnt <= 0; + +err: + rswitch_gwca_halt(rdev->priv); + + return 0; +} + +static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_gwca_queue *gq = rdev->tx_queue; + struct rswitch_ext_desc *desc; + dma_addr_t dma_addr; + struct sk_buff *skb; + int free_num = 0; + int size; + + for (; rswitch_get_num_cur_queues(gq) > 0; + gq->dirty = rswitch_next_queue_index(gq, false, 1)) { + desc = &gq->tx_ring[gq->dirty]; + if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) + break; + + dma_rmb(); + size = le16_to_cpu(desc->desc.info_ds) & TX_DS; + skb = gq->skbs[gq->dirty]; + if (skb) { + dma_addr = rswitch_desc_get_dptr(&desc->desc); + dma_unmap_single(ndev->dev.parent, dma_addr, + size, DMA_TO_DEVICE); + dev_kfree_skb_any(gq->skbs[gq->dirty]); + gq->skbs[gq->dirty] = NULL; + free_num++; + } + desc->desc.die_dt = DT_EEMPTY; + rdev->ndev->stats.tx_packets++; + rdev->ndev->stats.tx_bytes += size; + } + + return free_num; +} + +static int rswitch_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct rswitch_private *priv; + struct rswitch_device *rdev; + unsigned long flags; + int quota = budget; + + rdev = netdev_priv(ndev); + priv = rdev->priv; + +retry: + rswitch_tx_free(ndev, true); + + if (rswitch_rx(ndev, "a)) + goto out; + else if (rdev->priv->gwca_halt) + goto err; + else if (rswitch_is_queue_rxed(rdev->rx_queue)) + goto retry; + + netif_wake_subqueue(ndev, 0); + + if (napi_complete_done(napi, budget - quota)) { + spin_lock_irqsave(&priv->lock, flags); + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&priv->lock, flags); + } + +out: + return budget - quota; + +err: + napi_complete(napi); + + return 0; +} + +static void rswitch_queue_interrupt(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + + if (napi_schedule_prep(&rdev->napi)) { + spin_lock(&rdev->priv->lock); + rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); + rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock(&rdev->priv->lock); + __napi_schedule(&rdev->napi); + } +} + +static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) +{ + struct rswitch_gwca_queue *gq; + int i, index, bit; + + for (i = 0; i < priv->gwca.num_queues; i++) { + gq = &priv->gwca.queues[i]; + index = gq->index / 32; + bit = BIT(gq->index % 32); + if (!(dis[index] & bit)) + continue; + + rswitch_ack_data_irq(priv, gq->index); + rswitch_queue_interrupt(gq->ndev); + } + + return IRQ_HANDLED; +} + +static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) +{ + struct rswitch_private *priv = dev_id; + u32 dis[RSWITCH_NUM_IRQ_REGS]; + irqreturn_t ret = IRQ_NONE; + + rswitch_get_data_irq_status(priv, dis); + + if (rswitch_is_any_data_irq(priv, dis, true) || + rswitch_is_any_data_irq(priv, dis, false)) + ret = rswitch_data_irq(priv, dis); + + return ret; +} + +static int rswitch_gwca_request_irqs(struct rswitch_private *priv) +{ + char *resource_name, *irq_name; + int i, ret, irq; + + for (i = 0; i < GWCA_NUM_IRQS; i++) { + resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); + if (!resource_name) + return -ENOMEM; + + irq = platform_get_irq_byname(priv->pdev, resource_name); + kfree(resource_name); + if (irq < 0) + return irq; + + irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, + GWCA_IRQ_NAME, i); + if (!irq_name) + return -ENOMEM; + + ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, + 0, irq_name, priv); + if (ret < 0) + return ret; + } + + return 0; +} + +static void rswitch_ts(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_gwca_ts_info *ts_info, *ts_info2; + struct skb_shared_hwtstamps shhwtstamps; + struct rswitch_ts_desc *desc; + struct timespec64 ts; + u32 tag, port; + int num; + + desc = &gq->ts_ring[gq->cur]; + while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { + dma_rmb(); + + port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); + tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); + + list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { + if (!(ts_info->port == port && ts_info->tag == tag)) + continue; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + ts.tv_sec = __le32_to_cpu(desc->ts_sec); + ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(ts_info->skb, &shhwtstamps); + dev_consume_skb_irq(ts_info->skb); + list_del(&ts_info->list); + kfree(ts_info); + break; + } + + gq->cur = rswitch_next_queue_index(gq, true, 1); + desc = &gq->ts_ring[gq->cur]; + } + + num = rswitch_get_num_cur_queues(gq); + rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); + gq->dirty = rswitch_next_queue_index(gq, false, num); +} + +static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) +{ + struct rswitch_private *priv = dev_id; + + if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { + iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); + rswitch_ts(priv); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) +{ + int irq; + + irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); + if (irq < 0) + return irq; + + return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, + 0, GWCA_TS_IRQ_NAME, priv); +} + +/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ +static int rswitch_etha_change_mode(struct rswitch_etha *etha, + enum rswitch_etha_mode mode) +{ + int ret; + + if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) + rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); + + iowrite32(mode, etha->addr + EAMC); + + ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); + + if (mode == EAMC_OPC_DISABLE) + rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); + + return ret; +} + +static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) +{ + u32 mrmac0 = ioread32(etha->addr + MRMAC0); + u32 mrmac1 = ioread32(etha->addr + MRMAC1); + u8 *mac = ða->mac_addr[0]; + + mac[0] = (mrmac0 >> 8) & 0xFF; + mac[1] = (mrmac0 >> 0) & 0xFF; + mac[2] = (mrmac1 >> 24) & 0xFF; + mac[3] = (mrmac1 >> 16) & 0xFF; + mac[4] = (mrmac1 >> 8) & 0xFF; + mac[5] = (mrmac1 >> 0) & 0xFF; +} + +static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) +{ + iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); + iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + etha->addr + MRMAC1); +} + +static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) +{ + iowrite32(MLVC_PLV, etha->addr + MLVC); + + return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); +} + +static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) +{ + u32 val; + + rswitch_etha_write_mac_address(etha, mac); + + switch (etha->speed) { + case 100: + val = MPIC_LSC_100M; + break; + case 1000: + val = MPIC_LSC_1G; + break; + case 2500: + val = MPIC_LSC_2_5G; + break; + default: + return; + } + + iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); +} + +static void rswitch_etha_enable_mii(struct rswitch_etha *etha) +{ + rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, + MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); + rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); +} + +static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) +{ + int err; + + err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); + if (err < 0) + return err; + err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); + if (err < 0) + return err; + + iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); + rswitch_rmac_setting(etha, mac); + rswitch_etha_enable_mii(etha); + + err = rswitch_etha_wait_link_verification(etha); + if (err < 0) + return err; + + err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); + if (err < 0) + return err; + + return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); +} + +static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, + int phyad, int devad, int regad, int data) +{ + int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; + u32 val; + int ret; + + if (devad == 0xffffffff) + return -ENODEV; + + writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); + + val = MPSM_PSME | MPSM_MFF_C45; + iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); + + ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); + if (ret) + return ret; + + rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); + + if (read) { + writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); + + ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); + if (ret) + return ret; + + ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; + + rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); + } else { + iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, + etha->addr + MPSM); + + ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); + } + + return ret; +} + +static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, + int regad) +{ + struct rswitch_etha *etha = bus->priv; + + return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); +} + +static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, + int regad, u16 val) +{ + struct rswitch_etha *etha = bus->priv; + + return rswitch_etha_set_access(etha, false, addr, devad, regad, val); +} + +/* Call of_node_put(port) after done */ +static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) +{ + struct device_node *ports, *port; + int err = 0; + u32 index; + + ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, + "ethernet-ports"); + if (!ports) + return NULL; + + for_each_child_of_node(ports, port) { + err = of_property_read_u32(port, "reg", &index); + if (err < 0) { + port = NULL; + goto out; + } + if (index == rdev->etha->index) { + if (!of_device_is_available(port)) + port = NULL; + break; + } + } + +out: + of_node_put(ports); + + return port; +} + +static int rswitch_etha_get_params(struct rswitch_device *rdev) +{ + u32 max_speed; + int err; + + if (!rdev->np_port) + return 0; /* ignored */ + + err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); + if (err) + return err; + + err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); + if (!err) { + rdev->etha->speed = max_speed; + return 0; + } + + /* if no "max-speed" property, let's use default speed */ + switch (rdev->etha->phy_interface) { + case PHY_INTERFACE_MODE_MII: + rdev->etha->speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_SGMII: + rdev->etha->speed = SPEED_1000; + break; + case PHY_INTERFACE_MODE_USXGMII: + rdev->etha->speed = SPEED_2500; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int rswitch_mii_register(struct rswitch_device *rdev) +{ + struct device_node *mdio_np; + struct mii_bus *mii_bus; + int err; + + mii_bus = mdiobus_alloc(); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "rswitch_mii"; + sprintf(mii_bus->id, "etha%d", rdev->etha->index); + mii_bus->priv = rdev->etha; + mii_bus->read_c45 = rswitch_etha_mii_read_c45; + mii_bus->write_c45 = rswitch_etha_mii_write_c45; + mii_bus->parent = &rdev->priv->pdev->dev; + + mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); + err = of_mdiobus_register(mii_bus, mdio_np); + if (err < 0) { + mdiobus_free(mii_bus); + goto out; + } + + rdev->etha->mii = mii_bus; + +out: + of_node_put(mdio_np); + + return err; +} + +static void rswitch_mii_unregister(struct rswitch_device *rdev) +{ + if (rdev->etha->mii) { + mdiobus_unregister(rdev->etha->mii); + mdiobus_free(rdev->etha->mii); + rdev->etha->mii = NULL; + } +} + +static void rswitch_adjust_link(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (phydev->link != rdev->etha->link) { + phy_print_status(phydev); + if (phydev->link) + phy_power_on(rdev->serdes); + else if (rdev->serdes->power_count) + phy_power_off(rdev->serdes); + + rdev->etha->link = phydev->link; + + if (!rdev->priv->etha_no_runtime_change && + phydev->speed != rdev->etha->speed) { + rdev->etha->speed = phydev->speed; + + rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); + phy_set_speed(rdev->serdes, rdev->etha->speed); + } + } +} + +static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, + struct phy_device *phydev) +{ + if (!rdev->priv->etha_no_runtime_change) + return; + + switch (rdev->etha->speed) { + case SPEED_2500: + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); + break; + case SPEED_1000: + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); + break; + case SPEED_100: + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + break; + default: + break; + } + + phy_set_max_speed(phydev, rdev->etha->speed); +} + +static int rswitch_phy_device_init(struct rswitch_device *rdev) +{ + struct phy_device *phydev; + struct device_node *phy; + int err = -ENOENT; + + if (!rdev->np_port) + return -ENODEV; + + phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); + if (!phy) + return -ENODEV; + + /* Set phydev->host_interfaces before calling of_phy_connect() to + * configure the PHY with the information of host_interfaces. + */ + phydev = of_phy_find_device(phy); + if (!phydev) + goto out; + __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); + + phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, + rdev->etha->phy_interface); + if (!phydev) + goto out; + + phy_set_max_speed(phydev, SPEED_2500); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + rswitch_phy_remove_link_mode(rdev, phydev); + + phy_attached_info(phydev); + + err = 0; +out: + of_node_put(phy); + + return err; +} + +static void rswitch_phy_device_deinit(struct rswitch_device *rdev) +{ + if (rdev->ndev->phydev) + phy_disconnect(rdev->ndev->phydev); +} + +static int rswitch_serdes_set_params(struct rswitch_device *rdev) +{ + int err; + + err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, + rdev->etha->phy_interface); + if (err < 0) + return err; + + return phy_set_speed(rdev->serdes, rdev->etha->speed); +} + +static int rswitch_ether_port_init_one(struct rswitch_device *rdev) +{ + int err; + + if (!rdev->etha->operated) { + err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); + if (err < 0) + return err; + if (rdev->priv->etha_no_runtime_change) + rdev->etha->operated = true; + } + + err = rswitch_mii_register(rdev); + if (err < 0) + return err; + + err = rswitch_phy_device_init(rdev); + if (err < 0) + goto err_phy_device_init; + + rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); + if (IS_ERR(rdev->serdes)) { + err = PTR_ERR(rdev->serdes); + goto err_serdes_phy_get; + } + + err = rswitch_serdes_set_params(rdev); + if (err < 0) + goto err_serdes_set_params; + + return 0; + +err_serdes_set_params: +err_serdes_phy_get: + rswitch_phy_device_deinit(rdev); + +err_phy_device_init: + rswitch_mii_unregister(rdev); + + return err; +} + +static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) +{ + rswitch_phy_device_deinit(rdev); + rswitch_mii_unregister(rdev); +} + +static int rswitch_ether_port_init_all(struct rswitch_private *priv) +{ + int i, err; + + rswitch_for_each_enabled_port(priv, i) { + err = rswitch_ether_port_init_one(priv->rdev[i]); + if (err) + goto err_init_one; + } + + rswitch_for_each_enabled_port(priv, i) { + err = phy_init(priv->rdev[i]->serdes); + if (err) + goto err_serdes; + } + + return 0; + +err_serdes: + rswitch_for_each_enabled_port_continue_reverse(priv, i) + phy_exit(priv->rdev[i]->serdes); + i = RSWITCH_NUM_PORTS; + +err_init_one: + rswitch_for_each_enabled_port_continue_reverse(priv, i) + rswitch_ether_port_deinit_one(priv->rdev[i]); + + return err; +} + +static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) +{ + int i; + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + phy_exit(priv->rdev[i]->serdes); + rswitch_ether_port_deinit_one(priv->rdev[i]); + } +} + +static int rswitch_open(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + unsigned long flags; + + phy_start(ndev->phydev); + + napi_enable(&rdev->napi); + netif_start_queue(ndev); + + spin_lock_irqsave(&rdev->priv->lock, flags); + rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&rdev->priv->lock, flags); + + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + + bitmap_set(rdev->priv->opened_ports, rdev->port, 1); + + return 0; +}; + +static int rswitch_stop(struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_gwca_ts_info *ts_info, *ts_info2; + unsigned long flags; + + netif_tx_stop_all_queues(ndev); + bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); + + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); + + list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { + if (ts_info->port != rdev->port) + continue; + dev_kfree_skb_irq(ts_info->skb); + list_del(&ts_info->list); + kfree(ts_info); + } + + spin_lock_irqsave(&rdev->priv->lock, flags); + rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); + rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock_irqrestore(&rdev->priv->lock, flags); + + phy_stop(ndev->phydev); + napi_disable(&rdev->napi); + + return 0; +}; + +static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_gwca_queue *gq = rdev->tx_queue; + netdev_tx_t ret = NETDEV_TX_OK; + struct rswitch_ext_desc *desc; + dma_addr_t dma_addr; + + if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { + netif_stop_subqueue(ndev, 0); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + return ret; + + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto err_kfree; + + gq->skbs[gq->cur] = skb; + desc = &gq->tx_ring[gq->cur]; + rswitch_desc_set_dptr(&desc->desc, dma_addr); + desc->desc.info_ds = cpu_to_le16(skb->len); + + desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | + INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct rswitch_gwca_ts_info *ts_info; + + ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); + if (!ts_info) + goto err_unmap; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + rdev->ts_tag++; + desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); + + ts_info->skb = skb_get(skb); + ts_info->port = rdev->port; + ts_info->tag = rdev->ts_tag; + list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); + + skb_tx_timestamp(skb); + } + + dma_wmb(); + + desc->desc.die_dt = DT_FSINGLE | DIE; + wmb(); /* gq->cur must be incremented after die_dt was set */ + + gq->cur = rswitch_next_queue_index(gq, true, 1); + rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); + + return ret; + +err_unmap: + dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); + +err_kfree: + dev_kfree_skb_any(skb); + + return ret; +} + +static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) +{ + return &ndev->stats; +} + +static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + struct rcar_gen4_ptp_private *ptp_priv; + struct hwtstamp_config config; + + ptp_priv = rdev->priv->ptp_priv; + + config.flags = 0; + config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : + HWTSTAMP_TX_OFF; + switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { + case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case RCAR_GEN4_RXTSTAMP_TYPE_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + } + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; +} + +static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; + struct hwtstamp_config config; + u32 tstamp_tx_ctrl; + + if (copy_from_user(&config, req->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_tx_ctrl = 0; + break; + case HWTSTAMP_TX_ON: + tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_rx_ctrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; + break; + } + + rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; +} + +static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) +{ + if (!netif_running(ndev)) + return -EINVAL; + + switch (cmd) { + case SIOCGHWTSTAMP: + return rswitch_hwstamp_get(ndev, req); + case SIOCSHWTSTAMP: + return rswitch_hwstamp_set(ndev, req); + default: + return phy_mii_ioctl(ndev->phydev, req, cmd); + } +} + +static const struct net_device_ops rswitch_netdev_ops = { + .ndo_open = rswitch_open, + .ndo_stop = rswitch_stop, + .ndo_start_xmit = rswitch_start_xmit, + .ndo_get_stats = rswitch_get_stats, + .ndo_eth_ioctl = rswitch_eth_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +{ + struct rswitch_device *rdev = netdev_priv(ndev); + + info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops rswitch_ethtool_ops = { + .get_ts_info = rswitch_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct of_device_id renesas_eth_sw_of_table[] = { + { .compatible = "renesas,r8a779f0-ether-switch", }, + { } +}; +MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); + +static void rswitch_etha_init(struct rswitch_private *priv, int index) +{ + struct rswitch_etha *etha = &priv->etha[index]; + + memset(etha, 0, sizeof(*etha)); + etha->index = index; + etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; + etha->coma_addr = priv->addr; + + /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. + * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply + * both the numerator and the denominator by 10. + */ + etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; +} + +static int rswitch_device_alloc(struct rswitch_private *priv, int index) +{ + struct platform_device *pdev = priv->pdev; + struct rswitch_device *rdev; + struct net_device *ndev; + int err; + + if (index >= RSWITCH_NUM_PORTS) + return -EINVAL; + + ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + ether_setup(ndev); + + rdev = netdev_priv(ndev); + rdev->ndev = ndev; + rdev->priv = priv; + priv->rdev[index] = rdev; + rdev->port = index; + rdev->etha = &priv->etha[index]; + rdev->addr = priv->addr; + + ndev->base_addr = (unsigned long)rdev->addr; + snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); + ndev->netdev_ops = &rswitch_netdev_ops; + ndev->ethtool_ops = &rswitch_ethtool_ops; + + netif_napi_add(ndev, &rdev->napi, rswitch_poll); + + rdev->np_port = rswitch_get_port_node(rdev); + rdev->disabled = !rdev->np_port; + err = of_get_ethdev_address(rdev->np_port, ndev); + of_node_put(rdev->np_port); + if (err) { + if (is_valid_ether_addr(rdev->etha->mac_addr)) + eth_hw_addr_set(ndev, rdev->etha->mac_addr); + else + eth_hw_addr_random(ndev); + } + + err = rswitch_etha_get_params(rdev); + if (err < 0) + goto out_get_params; + + if (rdev->priv->gwca.speed < rdev->etha->speed) + rdev->priv->gwca.speed = rdev->etha->speed; + + err = rswitch_rxdmac_alloc(ndev); + if (err < 0) + goto out_rxdmac; + + err = rswitch_txdmac_alloc(ndev); + if (err < 0) + goto out_txdmac; + + return 0; + +out_txdmac: + rswitch_rxdmac_free(ndev); + +out_rxdmac: +out_get_params: + netif_napi_del(&rdev->napi); + free_netdev(ndev); + + return err; +} + +static void rswitch_device_free(struct rswitch_private *priv, int index) +{ + struct rswitch_device *rdev = priv->rdev[index]; + struct net_device *ndev = rdev->ndev; + + rswitch_txdmac_free(ndev); + rswitch_rxdmac_free(ndev); + netif_napi_del(&rdev->napi); + free_netdev(ndev); +} + +static int rswitch_init(struct rswitch_private *priv) +{ + int i, err; + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_etha_init(priv, i); + + rswitch_clock_enable(priv); + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_etha_read_mac_address(&priv->etha[i]); + + rswitch_reset(priv); + + rswitch_clock_enable(priv); + rswitch_top_init(priv); + err = rswitch_bpool_config(priv); + if (err < 0) + return err; + + rswitch_coma_init(priv); + + err = rswitch_gwca_linkfix_alloc(priv); + if (err < 0) + return -ENOMEM; + + err = rswitch_gwca_ts_queue_alloc(priv); + if (err < 0) + goto err_ts_queue_alloc; + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + err = rswitch_device_alloc(priv, i); + if (err < 0) { + for (i--; i >= 0; i--) + rswitch_device_free(priv, i); + goto err_device_alloc; + } + } + + rswitch_fwd_init(priv); + + err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4, + RCAR_GEN4_PTP_CLOCK_S4); + if (err < 0) + goto err_ptp_register; + + err = rswitch_gwca_request_irqs(priv); + if (err < 0) + goto err_gwca_request_irq; + + err = rswitch_gwca_ts_request_irqs(priv); + if (err < 0) + goto err_gwca_ts_request_irq; + + err = rswitch_gwca_hw_init(priv); + if (err < 0) + goto err_gwca_hw_init; + + err = rswitch_ether_port_init_all(priv); + if (err) + goto err_ether_port_init_all; + + rswitch_for_each_enabled_port(priv, i) { + err = register_netdev(priv->rdev[i]->ndev); + if (err) { + rswitch_for_each_enabled_port_continue_reverse(priv, i) + unregister_netdev(priv->rdev[i]->ndev); + goto err_register_netdev; + } + } + + rswitch_for_each_enabled_port(priv, i) + netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", + priv->rdev[i]->ndev->dev_addr); + + return 0; + +err_register_netdev: + rswitch_ether_port_deinit_all(priv); + +err_ether_port_init_all: + rswitch_gwca_hw_deinit(priv); + +err_gwca_hw_init: +err_gwca_ts_request_irq: +err_gwca_request_irq: + rcar_gen4_ptp_unregister(priv->ptp_priv); + +err_ptp_register: + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_device_free(priv, i); + +err_device_alloc: + rswitch_gwca_ts_queue_free(priv); + +err_ts_queue_alloc: + rswitch_gwca_linkfix_free(priv); + + return err; +} + +static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { + { .soc_id = "r8a779f0", .revision = "ES1.0" }, + { /* Sentinel */ } +}; + +static int renesas_eth_sw_probe(struct platform_device *pdev) +{ + const struct soc_device_attribute *attr; + struct rswitch_private *priv; + struct resource *res; + int ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); + if (!res) { + dev_err(&pdev->dev, "invalid resource\n"); + return -EINVAL; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + spin_lock_init(&priv->lock); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + attr = soc_device_match(rswitch_soc_no_speed_change); + if (attr) + priv->etha_no_runtime_change = true; + + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + if (!priv->ptp_priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + priv->pdev = pdev; + priv->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->addr)) + return PTR_ERR(priv->addr); + + priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret < 0) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) + return ret; + } + + priv->gwca.index = AGENT_INDEX_GWCA; + priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, + RSWITCH_MAX_NUM_QUEUES); + priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, + sizeof(*priv->gwca.queues), GFP_KERNEL); + if (!priv->gwca.queues) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + ret = rswitch_init(priv); + if (ret < 0) { + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; + } + + device_set_wakeup_capable(&pdev->dev, 1); + + return ret; +} + +static void rswitch_deinit(struct rswitch_private *priv) +{ + int i; + + rswitch_gwca_hw_deinit(priv); + rcar_gen4_ptp_unregister(priv->ptp_priv); + + rswitch_for_each_enabled_port(priv, i) { + struct rswitch_device *rdev = priv->rdev[i]; + + unregister_netdev(rdev->ndev); + rswitch_ether_port_deinit_one(rdev); + phy_exit(priv->rdev[i]->serdes); + } + + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_device_free(priv, i); + + rswitch_gwca_ts_queue_free(priv); + rswitch_gwca_linkfix_free(priv); + + rswitch_clock_disable(priv); +} + +static int renesas_eth_sw_remove(struct platform_device *pdev) +{ + struct rswitch_private *priv = platform_get_drvdata(pdev); + + rswitch_deinit(priv); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver renesas_eth_sw_driver_platform = { + .probe = renesas_eth_sw_probe, + .remove = renesas_eth_sw_remove, + .driver = { + .name = "renesas_eth_sw", + .of_match_table = renesas_eth_sw_of_table, + } +}; +module_platform_driver(renesas_eth_sw_driver_platform); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h new file mode 100644 index 0000000000..04f49a7a58 --- /dev/null +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -0,0 +1,1022 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Renesas Ethernet Switch device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#ifndef __RSWITCH_H__ +#define __RSWITCH_H__ + +#include +#include "rcar_gen4_ptp.h" + +#define RSWITCH_MAX_NUM_QUEUES 128 + +#define RSWITCH_NUM_PORTS 3 +#define rswitch_for_each_enabled_port(priv, i) \ + for (i = 0; i < RSWITCH_NUM_PORTS; i++) \ + if (priv->rdev[i]->disabled) \ + continue; \ + else + +#define rswitch_for_each_enabled_port_continue_reverse(priv, i) \ + for (i--; i >= 0; i--) \ + if (priv->rdev[i]->disabled) \ + continue; \ + else + +#define TX_RING_SIZE 1024 +#define RX_RING_SIZE 1024 +#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS) + +#define PKT_BUF_SZ 1584 +#define RSWITCH_ALIGN 128 +#define RSWITCH_MAX_CTAG_PCP 7 + +#define RSWITCH_TIMEOUT_US 100000 + +#define RSWITCH_TOP_OFFSET 0x00008000 +#define RSWITCH_COMA_OFFSET 0x00009000 +#define RSWITCH_ETHA_OFFSET 0x0000a000 /* with RMAC */ +#define RSWITCH_ETHA_SIZE 0x00002000 /* with RMAC */ +#define RSWITCH_GWCA0_OFFSET 0x00010000 +#define RSWITCH_GWCA1_OFFSET 0x00012000 + +/* TODO: hardcoded ETHA/GWCA settings for now */ +#define GWCA_IRQ_RESOURCE_NAME "gwca0_rxtx%d" +#define GWCA_IRQ_NAME "rswitch: gwca0_rxtx%d" +#define GWCA_NUM_IRQS 8 +#define GWCA_INDEX 0 +#define AGENT_INDEX_GWCA 3 +#define GWCA_IPV_NUM 0 +#define GWRO RSWITCH_GWCA0_OFFSET + +#define GWCA_TS_IRQ_RESOURCE_NAME "gwca0_rxts0" +#define GWCA_TS_IRQ_NAME "rswitch: gwca0_rxts0" +#define GWCA_TS_IRQ_BIT BIT(0) + +#define FWRO 0 +#define TPRO RSWITCH_TOP_OFFSET +#define CARO RSWITCH_COMA_OFFSET +#define TARO 0 +#define RMRO 0x1000 +enum rswitch_reg { + FWGC = FWRO + 0x0000, + FWTTC0 = FWRO + 0x0010, + FWTTC1 = FWRO + 0x0014, + FWLBMC = FWRO + 0x0018, + FWCEPTC = FWRO + 0x0020, + FWCEPRC0 = FWRO + 0x0024, + FWCEPRC1 = FWRO + 0x0028, + FWCEPRC2 = FWRO + 0x002c, + FWCLPTC = FWRO + 0x0030, + FWCLPRC = FWRO + 0x0034, + FWCMPTC = FWRO + 0x0040, + FWEMPTC = FWRO + 0x0044, + FWSDMPTC = FWRO + 0x0050, + FWSDMPVC = FWRO + 0x0054, + FWLBWMC0 = FWRO + 0x0080, + FWPC00 = FWRO + 0x0100, + FWPC10 = FWRO + 0x0104, + FWPC20 = FWRO + 0x0108, + FWCTGC00 = FWRO + 0x0400, + FWCTGC10 = FWRO + 0x0404, + FWCTTC00 = FWRO + 0x0408, + FWCTTC10 = FWRO + 0x040c, + FWCTTC200 = FWRO + 0x0410, + FWCTSC00 = FWRO + 0x0420, + FWCTSC10 = FWRO + 0x0424, + FWCTSC20 = FWRO + 0x0428, + FWCTSC30 = FWRO + 0x042c, + FWCTSC40 = FWRO + 0x0430, + FWTWBFC0 = FWRO + 0x1000, + FWTWBFVC0 = FWRO + 0x1004, + FWTHBFC0 = FWRO + 0x1400, + FWTHBFV0C0 = FWRO + 0x1404, + FWTHBFV1C0 = FWRO + 0x1408, + FWFOBFC0 = FWRO + 0x1800, + FWFOBFV0C0 = FWRO + 0x1804, + FWFOBFV1C0 = FWRO + 0x1808, + FWRFC0 = FWRO + 0x1c00, + FWRFVC0 = FWRO + 0x1c04, + FWCFC0 = FWRO + 0x2000, + FWCFMC00 = FWRO + 0x2004, + FWIP4SC = FWRO + 0x4008, + FWIP6SC = FWRO + 0x4018, + FWIP6OC = FWRO + 0x401c, + FWL2SC = FWRO + 0x4020, + FWSFHEC = FWRO + 0x4030, + FWSHCR0 = FWRO + 0x4040, + FWSHCR1 = FWRO + 0x4044, + FWSHCR2 = FWRO + 0x4048, + FWSHCR3 = FWRO + 0x404c, + FWSHCR4 = FWRO + 0x4050, + FWSHCR5 = FWRO + 0x4054, + FWSHCR6 = FWRO + 0x4058, + FWSHCR7 = FWRO + 0x405c, + FWSHCR8 = FWRO + 0x4060, + FWSHCR9 = FWRO + 0x4064, + FWSHCR10 = FWRO + 0x4068, + FWSHCR11 = FWRO + 0x406c, + FWSHCR12 = FWRO + 0x4070, + FWSHCR13 = FWRO + 0x4074, + FWSHCRR = FWRO + 0x4078, + FWLTHHEC = FWRO + 0x4090, + FWLTHHC = FWRO + 0x4094, + FWLTHTL0 = FWRO + 0x40a0, + FWLTHTL1 = FWRO + 0x40a4, + FWLTHTL2 = FWRO + 0x40a8, + FWLTHTL3 = FWRO + 0x40ac, + FWLTHTL4 = FWRO + 0x40b0, + FWLTHTL5 = FWRO + 0x40b4, + FWLTHTL6 = FWRO + 0x40b8, + FWLTHTL7 = FWRO + 0x40bc, + FWLTHTL80 = FWRO + 0x40c0, + FWLTHTL9 = FWRO + 0x40d0, + FWLTHTLR = FWRO + 0x40d4, + FWLTHTIM = FWRO + 0x40e0, + FWLTHTEM = FWRO + 0x40e4, + FWLTHTS0 = FWRO + 0x4100, + FWLTHTS1 = FWRO + 0x4104, + FWLTHTS2 = FWRO + 0x4108, + FWLTHTS3 = FWRO + 0x410c, + FWLTHTS4 = FWRO + 0x4110, + FWLTHTSR0 = FWRO + 0x4120, + FWLTHTSR1 = FWRO + 0x4124, + FWLTHTSR2 = FWRO + 0x4128, + FWLTHTSR3 = FWRO + 0x412c, + FWLTHTSR40 = FWRO + 0x4130, + FWLTHTSR5 = FWRO + 0x4140, + FWLTHTR = FWRO + 0x4150, + FWLTHTRR0 = FWRO + 0x4154, + FWLTHTRR1 = FWRO + 0x4158, + FWLTHTRR2 = FWRO + 0x415c, + FWLTHTRR3 = FWRO + 0x4160, + FWLTHTRR4 = FWRO + 0x4164, + FWLTHTRR5 = FWRO + 0x4168, + FWLTHTRR6 = FWRO + 0x416c, + FWLTHTRR7 = FWRO + 0x4170, + FWLTHTRR8 = FWRO + 0x4174, + FWLTHTRR9 = FWRO + 0x4180, + FWLTHTRR10 = FWRO + 0x4190, + FWIPHEC = FWRO + 0x4214, + FWIPHC = FWRO + 0x4218, + FWIPTL0 = FWRO + 0x4220, + FWIPTL1 = FWRO + 0x4224, + FWIPTL2 = FWRO + 0x4228, + FWIPTL3 = FWRO + 0x422c, + FWIPTL4 = FWRO + 0x4230, + FWIPTL5 = FWRO + 0x4234, + FWIPTL6 = FWRO + 0x4238, + FWIPTL7 = FWRO + 0x4240, + FWIPTL8 = FWRO + 0x4250, + FWIPTLR = FWRO + 0x4254, + FWIPTIM = FWRO + 0x4260, + FWIPTEM = FWRO + 0x4264, + FWIPTS0 = FWRO + 0x4270, + FWIPTS1 = FWRO + 0x4274, + FWIPTS2 = FWRO + 0x4278, + FWIPTS3 = FWRO + 0x427c, + FWIPTS4 = FWRO + 0x4280, + FWIPTSR0 = FWRO + 0x4284, + FWIPTSR1 = FWRO + 0x4288, + FWIPTSR2 = FWRO + 0x428c, + FWIPTSR3 = FWRO + 0x4290, + FWIPTSR4 = FWRO + 0x42a0, + FWIPTR = FWRO + 0x42b0, + FWIPTRR0 = FWRO + 0x42b4, + FWIPTRR1 = FWRO + 0x42b8, + FWIPTRR2 = FWRO + 0x42bc, + FWIPTRR3 = FWRO + 0x42c0, + FWIPTRR4 = FWRO + 0x42c4, + FWIPTRR5 = FWRO + 0x42c8, + FWIPTRR6 = FWRO + 0x42cc, + FWIPTRR7 = FWRO + 0x42d0, + FWIPTRR8 = FWRO + 0x42e0, + FWIPTRR9 = FWRO + 0x42f0, + FWIPHLEC = FWRO + 0x4300, + FWIPAGUSPC = FWRO + 0x4500, + FWIPAGC = FWRO + 0x4504, + FWIPAGM0 = FWRO + 0x4510, + FWIPAGM1 = FWRO + 0x4514, + FWIPAGM2 = FWRO + 0x4518, + FWIPAGM3 = FWRO + 0x451c, + FWIPAGM4 = FWRO + 0x4520, + FWMACHEC = FWRO + 0x4620, + FWMACHC = FWRO + 0x4624, + FWMACTL0 = FWRO + 0x4630, + FWMACTL1 = FWRO + 0x4634, + FWMACTL2 = FWRO + 0x4638, + FWMACTL3 = FWRO + 0x463c, + FWMACTL4 = FWRO + 0x4640, + FWMACTL5 = FWRO + 0x4650, + FWMACTLR = FWRO + 0x4654, + FWMACTIM = FWRO + 0x4660, + FWMACTEM = FWRO + 0x4664, + FWMACTS0 = FWRO + 0x4670, + FWMACTS1 = FWRO + 0x4674, + FWMACTSR0 = FWRO + 0x4678, + FWMACTSR1 = FWRO + 0x467c, + FWMACTSR2 = FWRO + 0x4680, + FWMACTSR3 = FWRO + 0x4690, + FWMACTR = FWRO + 0x46a0, + FWMACTRR0 = FWRO + 0x46a4, + FWMACTRR1 = FWRO + 0x46a8, + FWMACTRR2 = FWRO + 0x46ac, + FWMACTRR3 = FWRO + 0x46b0, + FWMACTRR4 = FWRO + 0x46b4, + FWMACTRR5 = FWRO + 0x46c0, + FWMACTRR6 = FWRO + 0x46d0, + FWMACHLEC = FWRO + 0x4700, + FWMACAGUSPC = FWRO + 0x4880, + FWMACAGC = FWRO + 0x4884, + FWMACAGM0 = FWRO + 0x4888, + FWMACAGM1 = FWRO + 0x488c, + FWVLANTEC = FWRO + 0x4900, + FWVLANTL0 = FWRO + 0x4910, + FWVLANTL1 = FWRO + 0x4914, + FWVLANTL2 = FWRO + 0x4918, + FWVLANTL3 = FWRO + 0x4920, + FWVLANTL4 = FWRO + 0x4930, + FWVLANTLR = FWRO + 0x4934, + FWVLANTIM = FWRO + 0x4940, + FWVLANTEM = FWRO + 0x4944, + FWVLANTS = FWRO + 0x4950, + FWVLANTSR0 = FWRO + 0x4954, + FWVLANTSR1 = FWRO + 0x4958, + FWVLANTSR2 = FWRO + 0x4960, + FWVLANTSR3 = FWRO + 0x4970, + FWPBFC0 = FWRO + 0x4a00, + FWPBFCSDC00 = FWRO + 0x4a04, + FWL23URL0 = FWRO + 0x4e00, + FWL23URL1 = FWRO + 0x4e04, + FWL23URL2 = FWRO + 0x4e08, + FWL23URL3 = FWRO + 0x4e0c, + FWL23URLR = FWRO + 0x4e10, + FWL23UTIM = FWRO + 0x4e20, + FWL23URR = FWRO + 0x4e30, + FWL23URRR0 = FWRO + 0x4e34, + FWL23URRR1 = FWRO + 0x4e38, + FWL23URRR2 = FWRO + 0x4e3c, + FWL23URRR3 = FWRO + 0x4e40, + FWL23URMC0 = FWRO + 0x4f00, + FWPMFGC0 = FWRO + 0x5000, + FWPGFC0 = FWRO + 0x5100, + FWPGFIGSC0 = FWRO + 0x5104, + FWPGFENC0 = FWRO + 0x5108, + FWPGFENM0 = FWRO + 0x510c, + FWPGFCSTC00 = FWRO + 0x5110, + FWPGFCSTC10 = FWRO + 0x5114, + FWPGFCSTM00 = FWRO + 0x5118, + FWPGFCSTM10 = FWRO + 0x511c, + FWPGFCTC0 = FWRO + 0x5120, + FWPGFCTM0 = FWRO + 0x5124, + FWPGFHCC0 = FWRO + 0x5128, + FWPGFSM0 = FWRO + 0x512c, + FWPGFGC0 = FWRO + 0x5130, + FWPGFGL0 = FWRO + 0x5500, + FWPGFGL1 = FWRO + 0x5504, + FWPGFGLR = FWRO + 0x5518, + FWPGFGR = FWRO + 0x5510, + FWPGFGRR0 = FWRO + 0x5514, + FWPGFGRR1 = FWRO + 0x5518, + FWPGFRIM = FWRO + 0x5520, + FWPMTRFC0 = FWRO + 0x5600, + FWPMTRCBSC0 = FWRO + 0x5604, + FWPMTRC0RC0 = FWRO + 0x5608, + FWPMTREBSC0 = FWRO + 0x560c, + FWPMTREIRC0 = FWRO + 0x5610, + FWPMTRFM0 = FWRO + 0x5614, + FWFTL0 = FWRO + 0x6000, + FWFTL1 = FWRO + 0x6004, + FWFTLR = FWRO + 0x6008, + FWFTOC = FWRO + 0x6010, + FWFTOPC = FWRO + 0x6014, + FWFTIM = FWRO + 0x6020, + FWFTR = FWRO + 0x6030, + FWFTRR0 = FWRO + 0x6034, + FWFTRR1 = FWRO + 0x6038, + FWFTRR2 = FWRO + 0x603c, + FWSEQNGC0 = FWRO + 0x6100, + FWSEQNGM0 = FWRO + 0x6104, + FWSEQNRC = FWRO + 0x6200, + FWCTFDCN0 = FWRO + 0x6300, + FWLTHFDCN0 = FWRO + 0x6304, + FWIPFDCN0 = FWRO + 0x6308, + FWLTWFDCN0 = FWRO + 0x630c, + FWPBFDCN0 = FWRO + 0x6310, + FWMHLCN0 = FWRO + 0x6314, + FWIHLCN0 = FWRO + 0x6318, + FWICRDCN0 = FWRO + 0x6500, + FWWMRDCN0 = FWRO + 0x6504, + FWCTRDCN0 = FWRO + 0x6508, + FWLTHRDCN0 = FWRO + 0x650c, + FWIPRDCN0 = FWRO + 0x6510, + FWLTWRDCN0 = FWRO + 0x6514, + FWPBRDCN0 = FWRO + 0x6518, + FWPMFDCN0 = FWRO + 0x6700, + FWPGFDCN0 = FWRO + 0x6780, + FWPMGDCN0 = FWRO + 0x6800, + FWPMYDCN0 = FWRO + 0x6804, + FWPMRDCN0 = FWRO + 0x6808, + FWFRPPCN0 = FWRO + 0x6a00, + FWFRDPCN0 = FWRO + 0x6a04, + FWEIS00 = FWRO + 0x7900, + FWEIE00 = FWRO + 0x7904, + FWEID00 = FWRO + 0x7908, + FWEIS1 = FWRO + 0x7a00, + FWEIE1 = FWRO + 0x7a04, + FWEID1 = FWRO + 0x7a08, + FWEIS2 = FWRO + 0x7a10, + FWEIE2 = FWRO + 0x7a14, + FWEID2 = FWRO + 0x7a18, + FWEIS3 = FWRO + 0x7a20, + FWEIE3 = FWRO + 0x7a24, + FWEID3 = FWRO + 0x7a28, + FWEIS4 = FWRO + 0x7a30, + FWEIE4 = FWRO + 0x7a34, + FWEID4 = FWRO + 0x7a38, + FWEIS5 = FWRO + 0x7a40, + FWEIE5 = FWRO + 0x7a44, + FWEID5 = FWRO + 0x7a48, + FWEIS60 = FWRO + 0x7a50, + FWEIE60 = FWRO + 0x7a54, + FWEID60 = FWRO + 0x7a58, + FWEIS61 = FWRO + 0x7a60, + FWEIE61 = FWRO + 0x7a64, + FWEID61 = FWRO + 0x7a68, + FWEIS62 = FWRO + 0x7a70, + FWEIE62 = FWRO + 0x7a74, + FWEID62 = FWRO + 0x7a78, + FWEIS63 = FWRO + 0x7a80, + FWEIE63 = FWRO + 0x7a84, + FWEID63 = FWRO + 0x7a88, + FWEIS70 = FWRO + 0x7a90, + FWEIE70 = FWRO + 0x7A94, + FWEID70 = FWRO + 0x7a98, + FWEIS71 = FWRO + 0x7aa0, + FWEIE71 = FWRO + 0x7aa4, + FWEID71 = FWRO + 0x7aa8, + FWEIS72 = FWRO + 0x7ab0, + FWEIE72 = FWRO + 0x7ab4, + FWEID72 = FWRO + 0x7ab8, + FWEIS73 = FWRO + 0x7ac0, + FWEIE73 = FWRO + 0x7ac4, + FWEID73 = FWRO + 0x7ac8, + FWEIS80 = FWRO + 0x7ad0, + FWEIE80 = FWRO + 0x7ad4, + FWEID80 = FWRO + 0x7ad8, + FWEIS81 = FWRO + 0x7ae0, + FWEIE81 = FWRO + 0x7ae4, + FWEID81 = FWRO + 0x7ae8, + FWEIS82 = FWRO + 0x7af0, + FWEIE82 = FWRO + 0x7af4, + FWEID82 = FWRO + 0x7af8, + FWEIS83 = FWRO + 0x7b00, + FWEIE83 = FWRO + 0x7b04, + FWEID83 = FWRO + 0x7b08, + FWMIS0 = FWRO + 0x7c00, + FWMIE0 = FWRO + 0x7c04, + FWMID0 = FWRO + 0x7c08, + FWSCR0 = FWRO + 0x7d00, + FWSCR1 = FWRO + 0x7d04, + FWSCR2 = FWRO + 0x7d08, + FWSCR3 = FWRO + 0x7d0c, + FWSCR4 = FWRO + 0x7d10, + FWSCR5 = FWRO + 0x7d14, + FWSCR6 = FWRO + 0x7d18, + FWSCR7 = FWRO + 0x7d1c, + FWSCR8 = FWRO + 0x7d20, + FWSCR9 = FWRO + 0x7d24, + FWSCR10 = FWRO + 0x7d28, + FWSCR11 = FWRO + 0x7d2c, + FWSCR12 = FWRO + 0x7d30, + FWSCR13 = FWRO + 0x7d34, + FWSCR14 = FWRO + 0x7d38, + FWSCR15 = FWRO + 0x7d3c, + FWSCR16 = FWRO + 0x7d40, + FWSCR17 = FWRO + 0x7d44, + FWSCR18 = FWRO + 0x7d48, + FWSCR19 = FWRO + 0x7d4c, + FWSCR20 = FWRO + 0x7d50, + FWSCR21 = FWRO + 0x7d54, + FWSCR22 = FWRO + 0x7d58, + FWSCR23 = FWRO + 0x7d5c, + FWSCR24 = FWRO + 0x7d60, + FWSCR25 = FWRO + 0x7d64, + FWSCR26 = FWRO + 0x7d68, + FWSCR27 = FWRO + 0x7d6c, + FWSCR28 = FWRO + 0x7d70, + FWSCR29 = FWRO + 0x7d74, + FWSCR30 = FWRO + 0x7d78, + FWSCR31 = FWRO + 0x7d7c, + FWSCR32 = FWRO + 0x7d80, + FWSCR33 = FWRO + 0x7d84, + FWSCR34 = FWRO + 0x7d88, + FWSCR35 = FWRO + 0x7d8c, + FWSCR36 = FWRO + 0x7d90, + FWSCR37 = FWRO + 0x7d94, + FWSCR38 = FWRO + 0x7d98, + FWSCR39 = FWRO + 0x7d9c, + FWSCR40 = FWRO + 0x7da0, + FWSCR41 = FWRO + 0x7da4, + FWSCR42 = FWRO + 0x7da8, + FWSCR43 = FWRO + 0x7dac, + FWSCR44 = FWRO + 0x7db0, + FWSCR45 = FWRO + 0x7db4, + FWSCR46 = FWRO + 0x7db8, + + TPEMIMC0 = TPRO + 0x0000, + TPEMIMC1 = TPRO + 0x0004, + TPEMIMC2 = TPRO + 0x0008, + TPEMIMC3 = TPRO + 0x000c, + TPEMIMC4 = TPRO + 0x0010, + TPEMIMC5 = TPRO + 0x0014, + TPEMIMC60 = TPRO + 0x0080, + TPEMIMC70 = TPRO + 0x0100, + TSIM = TPRO + 0x0700, + TFIM = TPRO + 0x0704, + TCIM = TPRO + 0x0708, + TGIM0 = TPRO + 0x0710, + TGIM1 = TPRO + 0x0714, + TEIM0 = TPRO + 0x0720, + TEIM1 = TPRO + 0x0724, + TEIM2 = TPRO + 0x0728, + + RIPV = CARO + 0x0000, + RRC = CARO + 0x0004, + RCEC = CARO + 0x0008, + RCDC = CARO + 0x000c, + RSSIS = CARO + 0x0010, + RSSIE = CARO + 0x0014, + RSSID = CARO + 0x0018, + CABPIBWMC = CARO + 0x0020, + CABPWMLC = CARO + 0x0040, + CABPPFLC0 = CARO + 0x0050, + CABPPWMLC0 = CARO + 0x0060, + CABPPPFLC00 = CARO + 0x00a0, + CABPULC = CARO + 0x0100, + CABPIRM = CARO + 0x0140, + CABPPCM = CARO + 0x0144, + CABPLCM = CARO + 0x0148, + CABPCPM = CARO + 0x0180, + CABPMCPM = CARO + 0x0200, + CARDNM = CARO + 0x0280, + CARDMNM = CARO + 0x0284, + CARDCN = CARO + 0x0290, + CAEIS0 = CARO + 0x0300, + CAEIE0 = CARO + 0x0304, + CAEID0 = CARO + 0x0308, + CAEIS1 = CARO + 0x0310, + CAEIE1 = CARO + 0x0314, + CAEID1 = CARO + 0x0318, + CAMIS0 = CARO + 0x0340, + CAMIE0 = CARO + 0x0344, + CAMID0 = CARO + 0x0348, + CAMIS1 = CARO + 0x0350, + CAMIE1 = CARO + 0x0354, + CAMID1 = CARO + 0x0358, + CASCR = CARO + 0x0380, + + EAMC = TARO + 0x0000, + EAMS = TARO + 0x0004, + EAIRC = TARO + 0x0010, + EATDQSC = TARO + 0x0014, + EATDQC = TARO + 0x0018, + EATDQAC = TARO + 0x001c, + EATPEC = TARO + 0x0020, + EATMFSC0 = TARO + 0x0040, + EATDQDC0 = TARO + 0x0060, + EATDQM0 = TARO + 0x0080, + EATDQMLM0 = TARO + 0x00a0, + EACTQC = TARO + 0x0100, + EACTDQDC = TARO + 0x0104, + EACTDQM = TARO + 0x0108, + EACTDQMLM = TARO + 0x010c, + EAVCC = TARO + 0x0130, + EAVTC = TARO + 0x0134, + EATTFC = TARO + 0x0138, + EACAEC = TARO + 0x0200, + EACC = TARO + 0x0204, + EACAIVC0 = TARO + 0x0220, + EACAULC0 = TARO + 0x0240, + EACOEM = TARO + 0x0260, + EACOIVM0 = TARO + 0x0280, + EACOULM0 = TARO + 0x02a0, + EACGSM = TARO + 0x02c0, + EATASC = TARO + 0x0300, + EATASENC0 = TARO + 0x0320, + EATASCTENC = TARO + 0x0340, + EATASENM0 = TARO + 0x0360, + EATASCTENM = TARO + 0x0380, + EATASCSTC0 = TARO + 0x03a0, + EATASCSTC1 = TARO + 0x03a4, + EATASCSTM0 = TARO + 0x03a8, + EATASCSTM1 = TARO + 0x03ac, + EATASCTC = TARO + 0x03b0, + EATASCTM = TARO + 0x03b4, + EATASGL0 = TARO + 0x03c0, + EATASGL1 = TARO + 0x03c4, + EATASGLR = TARO + 0x03c8, + EATASGR = TARO + 0x03d0, + EATASGRR = TARO + 0x03d4, + EATASHCC = TARO + 0x03e0, + EATASRIRM = TARO + 0x03e4, + EATASSM = TARO + 0x03e8, + EAUSMFSECN = TARO + 0x0400, + EATFECN = TARO + 0x0404, + EAFSECN = TARO + 0x0408, + EADQOECN = TARO + 0x040c, + EADQSECN = TARO + 0x0410, + EACKSECN = TARO + 0x0414, + EAEIS0 = TARO + 0x0500, + EAEIE0 = TARO + 0x0504, + EAEID0 = TARO + 0x0508, + EAEIS1 = TARO + 0x0510, + EAEIE1 = TARO + 0x0514, + EAEID1 = TARO + 0x0518, + EAEIS2 = TARO + 0x0520, + EAEIE2 = TARO + 0x0524, + EAEID2 = TARO + 0x0528, + EASCR = TARO + 0x0580, + + MPSM = RMRO + 0x0000, + MPIC = RMRO + 0x0004, + MPIM = RMRO + 0x0008, + MIOC = RMRO + 0x0010, + MIOM = RMRO + 0x0014, + MXMS = RMRO + 0x0018, + MTFFC = RMRO + 0x0020, + MTPFC = RMRO + 0x0024, + MTPFC2 = RMRO + 0x0028, + MTPFC30 = RMRO + 0x0030, + MTATC0 = RMRO + 0x0050, + MTIM = RMRO + 0x0060, + MRGC = RMRO + 0x0080, + MRMAC0 = RMRO + 0x0084, + MRMAC1 = RMRO + 0x0088, + MRAFC = RMRO + 0x008c, + MRSCE = RMRO + 0x0090, + MRSCP = RMRO + 0x0094, + MRSCC = RMRO + 0x0098, + MRFSCE = RMRO + 0x009c, + MRFSCP = RMRO + 0x00a0, + MTRC = RMRO + 0x00a4, + MRIM = RMRO + 0x00a8, + MRPFM = RMRO + 0x00ac, + MPFC0 = RMRO + 0x0100, + MLVC = RMRO + 0x0180, + MEEEC = RMRO + 0x0184, + MLBC = RMRO + 0x0188, + MXGMIIC = RMRO + 0x0190, + MPCH = RMRO + 0x0194, + MANC = RMRO + 0x0198, + MANM = RMRO + 0x019c, + MPLCA1 = RMRO + 0x01a0, + MPLCA2 = RMRO + 0x01a4, + MPLCA3 = RMRO + 0x01a8, + MPLCA4 = RMRO + 0x01ac, + MPLCAM = RMRO + 0x01b0, + MHDC1 = RMRO + 0x01c0, + MHDC2 = RMRO + 0x01c4, + MEIS = RMRO + 0x0200, + MEIE = RMRO + 0x0204, + MEID = RMRO + 0x0208, + MMIS0 = RMRO + 0x0210, + MMIE0 = RMRO + 0x0214, + MMID0 = RMRO + 0x0218, + MMIS1 = RMRO + 0x0220, + MMIE1 = RMRO + 0x0224, + MMID1 = RMRO + 0x0228, + MMIS2 = RMRO + 0x0230, + MMIE2 = RMRO + 0x0234, + MMID2 = RMRO + 0x0238, + MMPFTCT = RMRO + 0x0300, + MAPFTCT = RMRO + 0x0304, + MPFRCT = RMRO + 0x0308, + MFCICT = RMRO + 0x030c, + MEEECT = RMRO + 0x0310, + MMPCFTCT0 = RMRO + 0x0320, + MAPCFTCT0 = RMRO + 0x0330, + MPCFRCT0 = RMRO + 0x0340, + MHDCC = RMRO + 0x0350, + MROVFC = RMRO + 0x0354, + MRHCRCEC = RMRO + 0x0358, + MRXBCE = RMRO + 0x0400, + MRXBCP = RMRO + 0x0404, + MRGFCE = RMRO + 0x0408, + MRGFCP = RMRO + 0x040c, + MRBFC = RMRO + 0x0410, + MRMFC = RMRO + 0x0414, + MRUFC = RMRO + 0x0418, + MRPEFC = RMRO + 0x041c, + MRNEFC = RMRO + 0x0420, + MRFMEFC = RMRO + 0x0424, + MRFFMEFC = RMRO + 0x0428, + MRCFCEFC = RMRO + 0x042c, + MRFCEFC = RMRO + 0x0430, + MRRCFEFC = RMRO + 0x0434, + MRUEFC = RMRO + 0x043c, + MROEFC = RMRO + 0x0440, + MRBOEC = RMRO + 0x0444, + MTXBCE = RMRO + 0x0500, + MTXBCP = RMRO + 0x0504, + MTGFCE = RMRO + 0x0508, + MTGFCP = RMRO + 0x050c, + MTBFC = RMRO + 0x0510, + MTMFC = RMRO + 0x0514, + MTUFC = RMRO + 0x0518, + MTEFC = RMRO + 0x051c, + + GWMC = GWRO + 0x0000, + GWMS = GWRO + 0x0004, + GWIRC = GWRO + 0x0010, + GWRDQSC = GWRO + 0x0014, + GWRDQC = GWRO + 0x0018, + GWRDQAC = GWRO + 0x001c, + GWRGC = GWRO + 0x0020, + GWRMFSC0 = GWRO + 0x0040, + GWRDQDC0 = GWRO + 0x0060, + GWRDQM0 = GWRO + 0x0080, + GWRDQMLM0 = GWRO + 0x00a0, + GWMTIRM = GWRO + 0x0100, + GWMSTLS = GWRO + 0x0104, + GWMSTLR = GWRO + 0x0108, + GWMSTSS = GWRO + 0x010c, + GWMSTSR = GWRO + 0x0110, + GWMAC0 = GWRO + 0x0120, + GWMAC1 = GWRO + 0x0124, + GWVCC = GWRO + 0x0130, + GWVTC = GWRO + 0x0134, + GWTTFC = GWRO + 0x0138, + GWTDCAC00 = GWRO + 0x0140, + GWTDCAC10 = GWRO + 0x0144, + GWTSDCC0 = GWRO + 0x0160, + GWTNM = GWRO + 0x0180, + GWTMNM = GWRO + 0x0184, + GWAC = GWRO + 0x0190, + GWDCBAC0 = GWRO + 0x0194, + GWDCBAC1 = GWRO + 0x0198, + GWIICBSC = GWRO + 0x019c, + GWMDNC = GWRO + 0x01a0, + GWTRC0 = GWRO + 0x0200, + GWTPC0 = GWRO + 0x0300, + GWARIRM = GWRO + 0x0380, + GWDCC0 = GWRO + 0x0400, + GWAARSS = GWRO + 0x0800, + GWAARSR0 = GWRO + 0x0804, + GWAARSR1 = GWRO + 0x0808, + GWIDAUAS0 = GWRO + 0x0840, + GWIDASM0 = GWRO + 0x0880, + GWIDASAM00 = GWRO + 0x0900, + GWIDASAM10 = GWRO + 0x0904, + GWIDACAM00 = GWRO + 0x0980, + GWIDACAM10 = GWRO + 0x0984, + GWGRLC = GWRO + 0x0a00, + GWGRLULC = GWRO + 0x0a04, + GWRLIVC0 = GWRO + 0x0a80, + GWRLULC0 = GWRO + 0x0a84, + GWIDPC = GWRO + 0x0b00, + GWIDC0 = GWRO + 0x0c00, + GWDIS0 = GWRO + 0x1100, + GWDIE0 = GWRO + 0x1104, + GWDID0 = GWRO + 0x1108, + GWTSDIS = GWRO + 0x1180, + GWTSDIE = GWRO + 0x1184, + GWTSDID = GWRO + 0x1188, + GWEIS0 = GWRO + 0x1190, + GWEIE0 = GWRO + 0x1194, + GWEID0 = GWRO + 0x1198, + GWEIS1 = GWRO + 0x11a0, + GWEIE1 = GWRO + 0x11a4, + GWEID1 = GWRO + 0x11a8, + GWEIS20 = GWRO + 0x1200, + GWEIE20 = GWRO + 0x1204, + GWEID20 = GWRO + 0x1208, + GWEIS3 = GWRO + 0x1280, + GWEIE3 = GWRO + 0x1284, + GWEID3 = GWRO + 0x1288, + GWEIS4 = GWRO + 0x1290, + GWEIE4 = GWRO + 0x1294, + GWEID4 = GWRO + 0x1298, + GWEIS5 = GWRO + 0x12a0, + GWEIE5 = GWRO + 0x12a4, + GWEID5 = GWRO + 0x12a8, + GWSCR0 = GWRO + 0x1800, + GWSCR1 = GWRO + 0x1900, +}; + +/* ETHA/RMAC */ +enum rswitch_etha_mode { + EAMC_OPC_RESET, + EAMC_OPC_DISABLE, + EAMC_OPC_CONFIG, + EAMC_OPC_OPERATION, +}; + +#define EAMS_OPS_MASK EAMC_OPC_OPERATION + +#define EAVCC_VEM_SC_TAG (0x3 << 16) + +#define MPIC_PIS_MII 0x00 +#define MPIC_PIS_GMII 0x02 +#define MPIC_PIS_XGMII 0x04 +#define MPIC_LSC_SHIFT 3 +#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT) +#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT) +#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT) + +#define MDIO_READ_C45 0x03 +#define MDIO_WRITE_C45 0x01 + +#define MPSM_PSME BIT(0) +#define MPSM_MFF_C45 BIT(2) +#define MPSM_PRD_SHIFT 16 +#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT) + +/* Completion flags */ +#define MMIS1_PAACS BIT(2) /* Address */ +#define MMIS1_PWACS BIT(1) /* Write */ +#define MMIS1_PRACS BIT(0) /* Read */ +#define MMIS1_CLEAR_FLAGS 0xf + +#define MPIC_PSMCS_SHIFT 16 +#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT) +#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT) + +#define MPIC_PSMHT_SHIFT 24 +#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT) +#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT) + +#define MLVC_PLV BIT(16) + +/* GWCA */ +enum rswitch_gwca_mode { + GWMC_OPC_RESET, + GWMC_OPC_DISABLE, + GWMC_OPC_CONFIG, + GWMC_OPC_OPERATION, +}; + +#define GWMS_OPS_MASK GWMC_OPC_OPERATION + +#define GWMTIRM_MTIOG BIT(0) +#define GWMTIRM_MTR BIT(1) + +#define GWVCC_VEM_SC_TAG (0x3 << 16) + +#define GWARIRM_ARIOG BIT(0) +#define GWARIRM_ARR BIT(1) + +#define GWDCC_BALR BIT(24) +#define GWDCC_DCP_MASK GENMASK(18, 16) +#define GWDCC_DCP(prio) FIELD_PREP(GWDCC_DCP_MASK, (prio)) +#define GWDCC_DQT BIT(11) +#define GWDCC_ETS BIT(9) +#define GWDCC_EDE BIT(8) + +#define GWTRC(queue) (GWTRC0 + (queue) / 32 * 4) +#define GWTPC_PPPL(ipv) BIT(ipv) +#define GWDCC_OFFS(queue) (GWDCC0 + (queue) * 4) + +#define GWDIS(i) (GWDIS0 + (i) * 0x10) +#define GWDIE(i) (GWDIE0 + (i) * 0x10) +#define GWDID(i) (GWDID0 + (i) * 0x10) + +/* COMA */ +#define RRC_RR BIT(0) +#define RRC_RR_CLR 0 +#define RCEC_ACE_DEFAULT (BIT(0) | BIT(AGENT_INDEX_GWCA)) +#define RCEC_RCE BIT(16) +#define RCDC_RCD BIT(16) + +#define CABPIRM_BPIOG BIT(0) +#define CABPIRM_BPR BIT(1) + +#define CABPPFLC_INIT_VALUE 0x00800080 + +/* MFWD */ +#define FWPC0_LTHTA BIT(0) +#define FWPC0_IP4UE BIT(3) +#define FWPC0_IP4TE BIT(4) +#define FWPC0_IP4OE BIT(5) +#define FWPC0_L2SE BIT(9) +#define FWPC0_IP4EA BIT(10) +#define FWPC0_IPDSA BIT(12) +#define FWPC0_IPHLA BIT(18) +#define FWPC0_MACSDA BIT(20) +#define FWPC0_MACHLA BIT(26) +#define FWPC0_MACHMA BIT(27) +#define FWPC0_VLANSA BIT(28) + +#define FWPC0(i) (FWPC00 + (i) * 0x10) +#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \ + FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \ + FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \ + FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA) +#define FWPC1(i) (FWPC10 + (i) * 0x10) +#define FWPC1_DDE BIT(0) + +#define FWPBFC(i) (FWPBFC0 + (i) * 0x10) + +#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04) + +/* TOP */ +#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4) + +/* Descriptors */ +enum RX_DS_CC_BIT { + RX_DS = 0x0fff, /* Data size */ + RX_TR = 0x1000, /* Truncation indication */ + RX_EI = 0x2000, /* Error indication */ + RX_PS = 0xc000, /* Padding selection */ +}; + +enum TX_DS_TAGL_BIT { + TX_DS = 0x0fff, /* Data size */ + TX_TAGL = 0xf000, /* Frame tag LSBs */ +}; + +enum DIE_DT { + /* Frame data */ + DT_FSINGLE = 0x80, + DT_FSTART = 0x90, + DT_FMID = 0xa0, + DT_FEND = 0xb0, + + /* Chain control */ + DT_LEMPTY = 0xc0, + DT_EEMPTY = 0xd0, + DT_LINKFIX = 0x00, + DT_LINK = 0xe0, + DT_EOS = 0xf0, + /* HW/SW arbitration */ + DT_FEMPTY = 0x40, + DT_FEMPTY_IS = 0x10, + DT_FEMPTY_IC = 0x20, + DT_FEMPTY_ND = 0x30, + DT_FEMPTY_START = 0x50, + DT_FEMPTY_MID = 0x60, + DT_FEMPTY_END = 0x70, + + DT_MASK = 0xf0, + DIE = 0x08, /* Descriptor Interrupt Enable */ +}; + +/* Both transmission and reception */ +#define INFO1_FMT BIT(2) +#define INFO1_TXC BIT(3) + +/* For transmission */ +#define INFO1_TSUN(val) ((u64)(val) << 8ULL) +#define INFO1_IPV(prio) ((u64)(prio) << 28ULL) +#define INFO1_CSD0(index) ((u64)(index) << 32ULL) +#define INFO1_CSD1(index) ((u64)(index) << 40ULL) +#define INFO1_DV(port_vector) ((u64)(port_vector) << 48ULL) + +/* For reception */ +#define INFO1_SPN(port) ((u64)(port) << 36ULL) + +/* For timestamp descriptor in dptrl (Byte 4 to 7) */ +#define TS_DESC_TSUN(dptrl) ((dptrl) & GENMASK(7, 0)) +#define TS_DESC_SPN(dptrl) (((dptrl) & GENMASK(10, 8)) >> 8) +#define TS_DESC_DPN(dptrl) (((dptrl) & GENMASK(17, 16)) >> 16) +#define TS_DESC_TN(dptrl) ((dptrl) & BIT(24)) + +struct rswitch_desc { + __le16 info_ds; /* Descriptor size */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __u8 dptrh; /* Descriptor pointer MSB */ + __le32 dptrl; /* Descriptor pointer LSW */ +} __packed; + +struct rswitch_ts_desc { + struct rswitch_desc desc; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +struct rswitch_ext_desc { + struct rswitch_desc desc; + __le64 info1; +} __packed; + +struct rswitch_ext_ts_desc { + struct rswitch_desc desc; + __le64 info1; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +struct rswitch_etha { + int index; + void __iomem *addr; + void __iomem *coma_addr; + bool external_phy; + struct mii_bus *mii; + phy_interface_t phy_interface; + u32 psmcs; + u8 mac_addr[MAX_ADDR_LEN]; + int link; + int speed; + + /* This hardware could not be initialized twice so that marked + * this flag to avoid multiple initialization. + */ + bool operated; +}; + +/* The datasheet said descriptor "chain" and/or "queue". For consistency of + * name, this driver calls "queue". + */ +struct rswitch_gwca_queue { + union { + struct rswitch_ext_desc *tx_ring; + struct rswitch_ext_ts_desc *rx_ring; + struct rswitch_ts_desc *ts_ring; + }; + + /* Common */ + dma_addr_t ring_dma; + int ring_size; + int cur; + int dirty; + + /* For [rt]_ring */ + int index; + bool dir_tx; + struct sk_buff **skbs; + struct net_device *ndev; /* queue to ndev for irq */ +}; + +struct rswitch_gwca_ts_info { + struct sk_buff *skb; + struct list_head list; + + int port; + u8 tag; +}; + +#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32)) +struct rswitch_gwca { + int index; + struct rswitch_desc *linkfix_table; + dma_addr_t linkfix_table_dma; + u32 linkfix_table_size; + struct rswitch_gwca_queue *queues; + int num_queues; + struct rswitch_gwca_queue ts_queue; + struct list_head ts_info_list; + DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); + u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; + u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS]; + int speed; +}; + +#define NUM_QUEUES_PER_NDEV 2 +struct rswitch_device { + struct rswitch_private *priv; + struct net_device *ndev; + struct napi_struct napi; + void __iomem *addr; + struct rswitch_gwca_queue *tx_queue; + struct rswitch_gwca_queue *rx_queue; + u8 ts_tag; + bool disabled; + + int port; + struct rswitch_etha *etha; + struct device_node *np_port; + struct phy *serdes; +}; + +struct rswitch_mfwd_mac_table_entry { + int queue_index; + unsigned char addr[MAX_ADDR_LEN]; +}; + +struct rswitch_mfwd { + struct rswitch_mac_table_entry *mac_table_entries; + int num_mac_table_entries; +}; + +struct rswitch_private { + struct platform_device *pdev; + void __iomem *addr; + struct rcar_gen4_ptp_private *ptp_priv; + + struct rswitch_device *rdev[RSWITCH_NUM_PORTS]; + DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS); + + struct rswitch_gwca gwca; + struct rswitch_etha etha[RSWITCH_NUM_PORTS]; + struct rswitch_mfwd mfwd; + + spinlock_t lock; /* lock interrupt registers' control */ + struct clk *clk; + + bool etha_no_runtime_change; + bool gwca_halt; +}; + +#endif /* #ifndef __RSWITCH_H__ */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c new file mode 100644 index 0000000000..274ea16c0a --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -0,0 +1,3578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* SuperH Ethernet device driver + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2014 Renesas Solutions Corp. + * Copyright (C) 2013-2017 Cogent Embedded, Inc. + * Copyright (C) 2014 Codethink Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sh_eth.h" + +#define SH_ETH_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR| \ + NETIF_MSG_TX_ERR) + +#define SH_ETH_OFFSET_INVALID ((u16)~0) + +#define SH_ETH_OFFSET_DEFAULTS \ + [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID + +/* use some intentionally tricky logic here to initialize the whole struct to + * 0xffff, but then override certain fields, requiring us to indicate that we + * "know" that there are overrides in this structure, and we'll need to disable + * that warning from W=1 builds. GCC has supported this option since 4.2.X, but + * the macros available to do this only define GCC 8. + */ +__diag_push(); +__diag_ignore(GCC, 8, "-Woverride-init", + "logic to initialize all and then override some is OK"); +static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [PSR] = 0x0528, + [PIPR] = 0x052c, + [RFLR] = 0x0508, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [GECMR] = 0x05b0, + [BCULR] = 0x05b4, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [TROCR] = 0x0700, + [CDCR] = 0x0708, + [LCCR] = 0x0710, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [CERCR] = 0x0768, + [CEECR] = 0x0770, + [MAFCR] = 0x0778, + [RMII_MII] = 0x0790, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_VTAG0] = 0x0058, + [TSU_VTAG1] = 0x005c, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a4, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, +}; + +static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0300, + [RFLR] = 0x0308, + [ECSR] = 0x0310, + [ECSIPR] = 0x0318, + [PIR] = 0x0320, + [PSR] = 0x0328, + [RDMLR] = 0x0340, + [IPGR] = 0x0350, + [APR] = 0x0354, + [MPR] = 0x0358, + [RFCF] = 0x0360, + [TPAUSER] = 0x0364, + [TPAUSECR] = 0x0368, + [MAHR] = 0x03c0, + [MALR] = 0x03c8, + [TROCR] = 0x03d0, + [CDCR] = 0x03d4, + [LCCR] = 0x03d8, + [CNDCR] = 0x03dc, + [CEFCR] = 0x03e4, + [FRECR] = 0x03e8, + [TSFRCR] = 0x03ec, + [TLFRCR] = 0x03f0, + [RFCR] = 0x03f4, + [MAFCR] = 0x03f8, + + [EDMR] = 0x0200, + [EDTRR] = 0x0208, + [EDRRR] = 0x0210, + [TDLAR] = 0x0218, + [RDLAR] = 0x0220, + [EESR] = 0x0228, + [EESIPR] = 0x0230, + [TRSCER] = 0x0238, + [RMFCR] = 0x0240, + [TFTR] = 0x0248, + [FDR] = 0x0250, + [RMCR] = 0x0258, + [TFUCR] = 0x0264, + [RFOCR] = 0x0268, + [RMIIMODE] = 0x026c, + [FCFTR] = 0x0270, + [TRIMD] = 0x027c, +}; + +static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0100, + [RFLR] = 0x0108, + [ECSR] = 0x0110, + [ECSIPR] = 0x0118, + [PIR] = 0x0120, + [PSR] = 0x0128, + [RDMLR] = 0x0140, + [IPGR] = 0x0150, + [APR] = 0x0154, + [MPR] = 0x0158, + [TPAUSER] = 0x0164, + [RFCF] = 0x0160, + [TPAUSECR] = 0x0168, + [BCFRR] = 0x016c, + [MAHR] = 0x01c0, + [MALR] = 0x01c8, + [TROCR] = 0x01d0, + [CDCR] = 0x01d4, + [LCCR] = 0x01d8, + [CNDCR] = 0x01dc, + [CEFCR] = 0x01e4, + [FRECR] = 0x01e8, + [TSFRCR] = 0x01ec, + [TLFRCR] = 0x01f0, + [RFCR] = 0x01f4, + [MAFCR] = 0x01f8, + [RTRATE] = 0x01fc, + + [EDMR] = 0x0000, + [EDTRR] = 0x0008, + [EDRRR] = 0x0010, + [TDLAR] = 0x0018, + [RDLAR] = 0x0020, + [EESR] = 0x0028, + [EESIPR] = 0x0030, + [TRSCER] = 0x0038, + [RMFCR] = 0x0040, + [TFTR] = 0x0048, + [FDR] = 0x0050, + [RMCR] = 0x0058, + [TFUCR] = 0x0064, + [RFOCR] = 0x0068, + [FCFTR] = 0x0070, + [RPADIR] = 0x0078, + [TRIMD] = 0x007c, + [RBWAR] = 0x00c8, + [RDFAR] = 0x00cc, + [TBRAR] = 0x00d4, + [TDFAR] = 0x00d8, +}; + +static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDMR] = 0x0000, + [EDTRR] = 0x0004, + [EDRRR] = 0x0008, + [TDLAR] = 0x000c, + [RDLAR] = 0x0010, + [EESR] = 0x0014, + [EESIPR] = 0x0018, + [TRSCER] = 0x001c, + [RMFCR] = 0x0020, + [TFTR] = 0x0024, + [FDR] = 0x0028, + [RMCR] = 0x002c, + [EDOCR] = 0x0030, + [FCFTR] = 0x0034, + [RPADIR] = 0x0038, + [TRIMD] = 0x003c, + [RBWAR] = 0x0040, + [RDFAR] = 0x0044, + [TBRAR] = 0x004c, + [TDFAR] = 0x0050, + + [ECMR] = 0x0160, + [ECSR] = 0x0164, + [ECSIPR] = 0x0168, + [PIR] = 0x016c, + [MAHR] = 0x0170, + [MALR] = 0x0174, + [RFLR] = 0x0178, + [PSR] = 0x017c, + [TROCR] = 0x0180, + [CDCR] = 0x0184, + [LCCR] = 0x0188, + [CNDCR] = 0x018c, + [CEFCR] = 0x0194, + [FRECR] = 0x0198, + [TSFRCR] = 0x019c, + [TLFRCR] = 0x01a0, + [RFCR] = 0x01a4, + [MAFCR] = 0x01a8, + [IPGR] = 0x01b4, + [APR] = 0x01b8, + [MPR] = 0x01bc, + [TPAUSER] = 0x01c4, + [BCFR] = 0x01cc, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a4, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, + + [TSU_ADRH0] = 0x0100, +}; +__diag_pop(); + +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); + +static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); +} + +static u32 sh_eth_read(struct net_device *ndev, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->addr + offset); +} + +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, + u32 set) +{ + sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set, + enum_index); +} + +static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) +{ + return mdp->reg_offset[enum_index]; +} + +static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) +{ + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->tsu_addr + offset); +} + +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->tsu_addr + offset); +} + +static void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN + u32 *p = (u32 *)src; + u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif +} + +static void sh_eth_select_mii(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 value; + + switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID: + value = 0x3; + break; + case PHY_INTERFACE_MODE_GMII: + value = 0x2; + break; + case PHY_INTERFACE_MODE_MII: + value = 0x1; + break; + case PHY_INTERFACE_MODE_RMII: + value = 0x0; + break; + default: + netdev_warn(ndev, + "PHY interface mode was not setup. Set to MII.\n"); + value = 0x1; + break; + } + + sh_eth_write(ndev, value, RMII_MII); +} + +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); +} + +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); + mdelay(1); +} + +static int sh_eth_soft_reset(struct net_device *ndev) +{ + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); + mdelay(3); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); + + return 0; +} + +static int sh_eth_check_soft_reset(struct net_device *ndev) +{ + int cnt; + + for (cnt = 100; cnt > 0; cnt--) { + if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) + return 0; + mdelay(1); + } + + netdev_err(ndev, "Device reset failed\n"); + return -ETIMEDOUT; +} + +static int sh_eth_soft_reset_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); + + ret = sh_eth_check_soft_reset(ndev); + if (ret) + return ret; + + /* Table Init */ + sh_eth_write(ndev, 0, TDLAR); + sh_eth_write(ndev, 0, TDFAR); + sh_eth_write(ndev, 0, TDFXR); + sh_eth_write(ndev, 0, TDFFR); + sh_eth_write(ndev, 0, RDLAR); + sh_eth_write(ndev, 0, RDFAR); + sh_eth_write(ndev, 0, RDFXR); + sh_eth_write(ndev, 0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->csmr) + sh_eth_write(ndev, 0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + + return ret; +} + +static void sh_eth_set_rate_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (WARN_ON(!mdp->cd->gecmr)) + return; + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + } +} + +#ifdef CONFIG_OF +/* R7S72100 */ +static struct sh_eth_cpu_data r7s72100_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000070f, + + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, + + .no_psr = 1, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .csmr = 1, + .rx_csum = 1, + .tsu = 1, + .no_tx_cntrs = 1, +}; + +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) +{ + sh_eth_chip_reset(ndev); + + sh_eth_select_mii(ndev); +} + +/* R8A7740 */ +static struct sh_eth_cpu_data r8a7740_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset_r8a7740, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .gecmr = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .csmr = 1, + .rx_csum = 1, + .tsu = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* There is CPU dependent code */ +static void sh_eth_set_rate_rcar(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_modify(ndev, ECMR, ECMR_ELB, 0); + break; + case 100:/* 100BASE */ + sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); + break; + } +} + +/* R-Car Gen1 */ +static struct sh_eth_cpu_data rcar_gen1_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + .fdr_value = 0x00000f0f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_xdfar = 1, +}; + +/* R-Car Gen2 and RZ/G1 */ +static struct sh_eth_cpu_data rcar_gen2_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + .fdr_value = 0x00000f0f, + + .trscer_err_mask = TRSCER_RMAFCE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_xdfar = 1, + .rmiimode = 1, + .magic = 1, +}; + +/* R8A77980 */ +static struct sh_eth_cpu_data r8a77980_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | + EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .gecmr = 1, + .bculr = 1, + .hw_swap = 1, + .nbst = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .csmr = 1, + .rx_csum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* R7S9210 */ +static struct sh_eth_cpu_data r7s9210_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | + EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | + EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | + EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | + EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .fdr_value = 0x0000070f, + + .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .no_ade = 1, + .xdfar_rw = 1, +}; +#endif /* CONFIG_OF */ + +static void sh_eth_set_rate_sh7724(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_modify(ndev, ECMR, ECMR_RTM, 0); + break; + case 100:/* 100BASE */ + sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); + break; + } +} + +/* SH7724 */ +static struct sh_eth_cpu_data sh7724_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7724, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, +}; + +static void sh_eth_set_rate_sh7757(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0, RTRATE); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 1, RTRATE); + break; + } +} + +/* SH7757 */ +static struct sh_eth_cpu_data sh7757_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_ade = 1, + .rpadir = 1, + .rtrate = 1, + .dual_port = 1, +}; + +#define SH_GIGA_ETH_BASE 0xfee00000UL +#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) +#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) +static void sh_eth_chip_reset_giga(struct net_device *ndev) +{ + u32 mahr[2], malr[2]; + int i; + + /* save MAHR and MALR */ + for (i = 0; i < 2; i++) { + malr[i] = ioread32((void *)GIGA_MALR(i)); + mahr[i] = ioread32((void *)GIGA_MAHR(i)); + } + + sh_eth_chip_reset(ndev); + + /* restore MAHR and MALR */ + for (i = 0; i < 2; i++) { + iowrite32(malr[i], (void *)GIGA_MALR(i)); + iowrite32(mahr[i], (void *)GIGA_MAHR(i)); + } +} + +static void sh_eth_set_rate_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (WARN_ON(!mdp->cd->gecmr)) + return; + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0x00000000, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 0x00000010, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, 0x00000020, GECMR); + break; + } +} + +/* SH7757(GETHERC) */ +static struct sh_eth_cpu_data sh7757_data_giga = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset_giga, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_giga, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000072f, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .gecmr = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .cexcr = 1, + .dual_port = 1, +}; + +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .gecmr = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .csmr = 1, + .rx_csum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .gecmr = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, + .magic = 1, + .cexcr = 1, + .rx_csum = 1, + .dual_port = 1, +}; + +static struct sh_eth_cpu_data sh7619_data = { + .soft_reset = sh_eth_soft_reset, + + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; + +static struct sh_eth_cpu_data sh771x_data = { + .soft_reset = sh_eth_soft_reset, + + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .trscer_err_mask = TRSCER_RMAFCE, + + .tsu = 1, + .dual_port = 1, +}; + +static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) +{ + if (!cd->ecsr_value) + cd->ecsr_value = DEFAULT_ECSR_INIT; + + if (!cd->ecsipr_value) + cd->ecsipr_value = DEFAULT_ECSIPR_INIT; + + if (!cd->fcftr_value) + cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | + DEFAULT_FIFO_F_D_RFD; + + if (!cd->fdr_value) + cd->fdr_value = DEFAULT_FDR_INIT; + + if (!cd->tx_check) + cd->tx_check = DEFAULT_TX_CHECK; + + if (!cd->eesr_err_check) + cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->trscer_err_mask) + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; +} + +static void sh_eth_set_receive_align(struct sk_buff *skb) +{ + uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); + + if (reserve) + skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); +} + +/* Program the hardware MAC address from dev->dev_addr. */ +static void update_mac_address(struct net_device *ndev) +{ + sh_eth_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + sh_eth_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); +} + +/* Get MAC address from SuperH MAC address register + * + * SuperH's Ethernet device doesn't have 'ROM' to MAC address. + * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). + * When you want use this device, you must set MAC address in bootloader. + * + */ +static void read_mac_address(struct net_device *ndev, unsigned char *mac) +{ + if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { + eth_hw_addr_set(ndev, mac); + } else { + u32 mahr = sh_eth_read(ndev, MAHR); + u32 malr = sh_eth_read(ndev, MALR); + u8 addr[ETH_ALEN]; + + addr[0] = (mahr >> 24) & 0xFF; + addr[1] = (mahr >> 16) & 0xFF; + addr[2] = (mahr >> 8) & 0xFF; + addr[3] = (mahr >> 0) & 0xFF; + addr[4] = (malr >> 8) & 0xFF; + addr[5] = (malr >> 0) & 0xFF; + eth_hw_addr_set(ndev, addr); + } +} + +struct bb_info { + void (*set_gate)(void *addr); + struct mdiobb_ctrl ctrl; + void *addr; +}; + +static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + u32 pir; + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + pir = ioread32(bitbang->addr); + if (set) + pir |= mask; + else + pir &= ~mask; + iowrite32(pir, bitbang->addr); +} + +/* Data I/O pin control */ +static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MMD, bit); +} + +/* Set bit data*/ +static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MDO, bit); +} + +/* Get bit data*/ +static int sh_get_mdio(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + return (ioread32(bitbang->addr) & PIR_MDI) != 0; +} + +/* MDC pin control */ +static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MDC, bit); +} + +/* mdio bus control struct */ +static const struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = sh_mdc_ctrl, + .set_mdio_dir = sh_mmd_ctrl, + .set_mdio_data = sh_set_mdio, + .get_mdio_data = sh_get_mdio, +}; + +/* free Tx skb function */ +static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + int free_num = 0; + int entry; + bool sent; + + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { + entry = mdp->dirty_tx % mdp->num_tx_ring; + txdesc = &mdp->tx_ring[entry]; + sent = !(txdesc->status & cpu_to_le32(TD_TACT)); + if (sent_only && !sent) + break; + /* TACT bit must be checked before all the following reads */ + dma_rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, le32_to_cpu(txdesc->status)); + /* Free the original skb. */ + if (mdp->tx_skbuff[entry]) { + dma_unmap_single(&mdp->pdev->dev, + le32_to_cpu(txdesc->addr), + le32_to_cpu(txdesc->len) >> 16, + DMA_TO_DEVICE); + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); + mdp->tx_skbuff[entry] = NULL; + free_num++; + } + txdesc->status = cpu_to_le32(TD_TFP); + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_le32(TD_TDLE); + + if (sent) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; + } + } + return free_num; +} + +/* free skb and descriptor buffer */ +static void sh_eth_ring_free(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ringsize, i; + + if (mdp->rx_ring) { + for (i = 0; i < mdp->num_rx_ring; i++) { + if (mdp->rx_skbuff[i]) { + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; + + dma_unmap_single(&mdp->pdev->dev, + le32_to_cpu(rxdesc->addr), + ALIGN(mdp->rx_buf_sz, 32), + DMA_FROM_DEVICE); + } + } + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, + mdp->rx_desc_dma); + mdp->rx_ring = NULL; + } + + /* Free Rx skb ringbuffer */ + if (mdp->rx_skbuff) { + for (i = 0; i < mdp->num_rx_ring; i++) + dev_kfree_skb(mdp->rx_skbuff[i]); + } + kfree(mdp->rx_skbuff); + mdp->rx_skbuff = NULL; + + if (mdp->tx_ring) { + sh_eth_tx_free(ndev, false); + + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, + mdp->tx_desc_dma); + mdp->tx_ring = NULL; + } + + /* Free Tx skb ringbuffer */ + kfree(mdp->tx_skbuff); + mdp->tx_skbuff = NULL; +} + +/* format skb and descriptor buffer */ +static void sh_eth_ring_format(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + struct sk_buff *skb; + struct sh_eth_rxdesc *rxdesc = NULL; + struct sh_eth_txdesc *txdesc = NULL; + int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; + int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; + dma_addr_t dma_addr; + u32 buf_len; + + mdp->cur_rx = 0; + mdp->cur_tx = 0; + mdp->dirty_rx = 0; + mdp->dirty_tx = 0; + + memset(mdp->rx_ring, 0, rx_ringsize); + + /* build Rx ring buffer */ + for (i = 0; i < mdp->num_rx_ring; i++) { + /* skb */ + mdp->rx_skbuff[i] = NULL; + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; + sh_eth_set_receive_align(skb); + + /* The size of the buffer is a multiple of 32 bytes. */ + buf_len = ALIGN(mdp->rx_buf_sz, 32); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[i] = skb; + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->len = cpu_to_le32(buf_len << 16); + rxdesc->addr = cpu_to_le32(dma_addr); + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); + + /* Rx descriptor address set */ + if (i == 0) { + sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); + if (mdp->cd->xdfar_rw) + sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); + } + } + + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); + + /* Mark the last entry as wrapping the ring. */ + if (rxdesc) + rxdesc->status |= cpu_to_le32(RD_RDLE); + + memset(mdp->tx_ring, 0, tx_ringsize); + + /* build Tx ring buffer */ + for (i = 0; i < mdp->num_tx_ring; i++) { + mdp->tx_skbuff[i] = NULL; + txdesc = &mdp->tx_ring[i]; + txdesc->status = cpu_to_le32(TD_TFP); + txdesc->len = cpu_to_le32(0); + if (i == 0) { + /* Tx descriptor address set */ + sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); + if (mdp->cd->xdfar_rw) + sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); + } + } + + txdesc->status |= cpu_to_le32(TD_TDLE); +} + +/* Get skb and descriptor buffer */ +static int sh_eth_ring_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int rx_ringsize, tx_ringsize; + + /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the + * card needs room to do 8 byte alignment, +2 so we can reserve + * the first 2 bytes, and +16 gets room for the status word from the + * card. + */ + mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : + (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); + if (mdp->cd->rpadir) + mdp->rx_buf_sz += NET_IP_ALIGN; + + /* Allocate RX and TX skb rings */ + mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), + GFP_KERNEL); + if (!mdp->rx_skbuff) + return -ENOMEM; + + mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), + GFP_KERNEL); + if (!mdp->tx_skbuff) + goto ring_free; + + /* Allocate all Rx descriptors. */ + rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, + &mdp->rx_desc_dma, GFP_KERNEL); + if (!mdp->rx_ring) + goto ring_free; + + mdp->dirty_rx = 0; + + /* Allocate all Tx descriptors. */ + tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, + &mdp->tx_desc_dma, GFP_KERNEL); + if (!mdp->tx_ring) + goto ring_free; + return 0; + +ring_free: + /* Free Rx and Tx skb ring buffer and DMA buffer */ + sh_eth_ring_free(ndev); + + return -ENOMEM; +} + +static int sh_eth_dev_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + /* Soft Reset */ + ret = mdp->cd->soft_reset(ndev); + if (ret) + return ret; + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* Descriptor format */ + sh_eth_ring_format(ndev); + if (mdp->cd->rpadir) + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); + + /* all sh_eth int mask */ + sh_eth_write(ndev, 0, EESIPR); + +#if defined(__LITTLE_ENDIAN) + if (mdp->cd->hw_swap) + sh_eth_write(ndev, EDMR_EL, EDMR); + else +#endif + sh_eth_write(ndev, 0, EDMR); + + /* FIFO size set */ + sh_eth_write(ndev, mdp->cd->fdr_value, FDR); + sh_eth_write(ndev, 0, TFTR); + + /* Frame recv control (enable multiple-packets per rx irq) */ + sh_eth_write(ndev, RMCR_RNC, RMCR); + + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); + + /* DMA transfer burst mode */ + if (mdp->cd->nbst) + sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST); + + /* Burst cycle count upper-limit */ + if (mdp->cd->bculr) + sh_eth_write(ndev, 0x800, BCULR); + + sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); + + if (!mdp->cd->no_trimd) + sh_eth_write(ndev, 0, TRIMD); + + /* Recv frame limit set register */ + sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, + RFLR); + + sh_eth_modify(ndev, EESR, 0, 0); + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + + /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | + ECMR_TE | ECMR_RE, ECMR); + + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + + /* E-MAC Status Register clear */ + sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); + + /* E-MAC Interrupt Enable register */ + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + + /* Set MAC address */ + update_mac_address(ndev); + + /* mask reset */ + if (mdp->cd->apr) + sh_eth_write(ndev, 1, APR); + if (mdp->cd->mpr) + sh_eth_write(ndev, 1, MPR); + if (mdp->cd->tpauser) + sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); + + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); + + return ret; +} + +static void sh_eth_dev_exit(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Deactivate all TX descriptors, so DMA should stop at next + * packet boundary if it's currently running + */ + for (i = 0; i < mdp->num_tx_ring; i++) + mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); + + /* Disable TX FIFO egress to MAC */ + sh_eth_rcv_snd_disable(ndev); + + /* Stop RX DMA at next packet boundary */ + sh_eth_write(ndev, 0, EDRRR); + + /* Aside from TX DMA, we can't tell when the hardware is + * really stopped, so we need to reset to make sure. + * Before doing that, wait for long enough to *probably* + * finish transmitting the last packet and poll stats. + */ + msleep(2); /* max frame time at 10 Mbps < 1250 us */ + sh_eth_get_stats(ndev); + mdp->cd->soft_reset(ndev); + + /* Set the RMII mode again if required */ + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* Set MAC address again */ + update_mac_address(ndev); +} + +static void sh_eth_rx_csum(struct sk_buff *skb) +{ + u8 *hw_csum; + + /* The hardware checksum is 2 bytes appended to packet data */ + if (unlikely(skb->len < sizeof(__sum16))) + return; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + skb_trim(skb, skb->len - sizeof(__sum16)); +} + +/* Packet receive function */ +static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + + int entry = mdp->cur_rx % mdp->num_rx_ring; + int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; + int limit; + struct sk_buff *skb; + u32 desc_status; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; + dma_addr_t dma_addr; + u16 pkt_len; + u32 buf_len; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + rxdesc = &mdp->rx_ring[entry]; + while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + dma_rmb(); + desc_status = le32_to_cpu(rxdesc->status); + pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; + + if (--boguscnt < 0) + break; + + netif_info(mdp, rx_status, ndev, + "rx entry %d status 0x%08x len %d\n", + entry, desc_status, pkt_len); + + if (!(desc_status & RDFEND)) + ndev->stats.rx_length_errors++; + + /* In case of almost all GETHER/ETHERs, the Receive Frame State + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the + * driver needs right shifting by 16. + */ + if (mdp->cd->csmr) + desc_status >>= 16; + + skb = mdp->rx_skbuff[entry]; + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | + RD_RFS5 | RD_RFS6 | RD_RFS10)) { + ndev->stats.rx_errors++; + if (desc_status & RD_RFS1) + ndev->stats.rx_crc_errors++; + if (desc_status & RD_RFS2) + ndev->stats.rx_frame_errors++; + if (desc_status & RD_RFS3) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS4) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS6) + ndev->stats.rx_missed_errors++; + if (desc_status & RD_RFS10) + ndev->stats.rx_over_errors++; + } else if (skb) { + dma_addr = le32_to_cpu(rxdesc->addr); + if (!mdp->cd->hw_swap) + sh_eth_soft_swap( + phys_to_virt(ALIGN(dma_addr, 4)), + pkt_len + 2); + mdp->rx_skbuff[entry] = NULL; + if (mdp->cd->rpadir) + skb_reserve(skb, NET_IP_ALIGN); + dma_unmap_single(&mdp->pdev->dev, dma_addr, + ALIGN(mdp->rx_buf_sz, 32), + DMA_FROM_DEVICE); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + sh_eth_rx_csum(skb); + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += pkt_len; + if (desc_status & RD_RFS8) + ndev->stats.multicast++; + } + entry = (++mdp->cur_rx) % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { + entry = mdp->dirty_rx % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + /* The size of the buffer is 32 byte boundary. */ + buf_len = ALIGN(mdp->rx_buf_sz, 32); + rxdesc->len = cpu_to_le32(buf_len << 16); + + if (mdp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; /* Better luck next round. */ + sh_eth_set_receive_align(skb); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, + buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[entry] = skb; + + skb_checksum_none_assert(skb); + rxdesc->addr = cpu_to_le32(dma_addr); + } + dma_wmb(); /* RACT bit must be set after all the above writes */ + if (entry >= mdp->num_rx_ring - 1) + rxdesc->status |= + cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); + else + rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); + } + + /* Restart Rx engine if stopped. */ + /* If we don't need to check status, don't. -KDU */ + if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { + /* fix the values for the next receiving if RDE is set */ + if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { + u32 count = (sh_eth_read(ndev, RDFAR) - + sh_eth_read(ndev, RDLAR)) >> 4; + + mdp->cur_rx = count; + mdp->dirty_rx = count; + } + sh_eth_write(ndev, EDRRR_R, EDRRR); + } + + *quota -= limit - boguscnt - 1; + + return *quota <= 0; +} + +static void sh_eth_rcv_snd_disable(struct net_device *ndev) +{ + /* disable tx and rx */ + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); +} + +static void sh_eth_rcv_snd_enable(struct net_device *ndev) +{ + /* enable tx and rx */ + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); +} + +/* E-MAC interrupt handler */ +static void sh_eth_emac_interrupt(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 felic_stat; + u32 link_stat; + + felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) + return; + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + if (!(link_stat & PSR_LMON)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); + /* clear int */ + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + } + } +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 mask; + + if (intr_status & EESR_TWB) { + /* Unused write back interrupt */ + if (intr_status & EESR_TABT) { /* Transmit Abort int */ + ndev->stats.tx_aborted_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); + } + } + + if (intr_status & EESR_RABT) { + /* Receive Abort int */ + if (intr_status & EESR_RFRMER) { + /* Receive Frame Overflow int */ + ndev->stats.rx_frame_errors++; + } + } + + if (intr_status & EESR_TDE) { + /* Transmit Descriptor Empty int */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); + } + + if (intr_status & EESR_TFE) { + /* FIFO under flow */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); + } + + if (intr_status & EESR_RDE) { + /* Receive Descriptor Empty int */ + ndev->stats.rx_over_errors++; + } + + if (intr_status & EESR_RFE) { + /* Receive FIFO Overflow int */ + ndev->stats.rx_fifo_errors++; + } + + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { + /* Address Error */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Address Error\n"); + } + + mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; + if (mdp->cd->no_ade) + mask &= ~EESR_ADE; + if (intr_status & mask) { + /* Tx error */ + u32 edtrr = sh_eth_read(ndev, EDTRR); + + /* dmesg */ + netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); + /* dirty buffer free */ + sh_eth_tx_free(ndev, true); + + /* SH7712 BUG */ + if (edtrr ^ mdp->cd->edtrr_trns) { + /* tx dma start */ + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); + } + /* wakeup */ + netif_wake_queue(ndev); + } +} + +static irqreturn_t sh_eth_interrupt(int irq, void *netdev) +{ + struct net_device *ndev = netdev; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + irqreturn_t ret = IRQ_NONE; + u32 intr_status, intr_enable; + + spin_lock(&mdp->lock); + + /* Get interrupt status */ + intr_status = sh_eth_read(ndev, EESR); + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_emac_interrupt() in order + * to quench it as it doesn't get cleared by just writing 1 to the ECI + * bit... + */ + intr_enable = sh_eth_read(ndev, EESIPR); + intr_status &= intr_enable | EESIPR_ECIIP; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | + cd->eesr_err_check)) + ret = IRQ_HANDLED; + else + goto out; + + if (unlikely(!mdp->irq_enabled)) { + sh_eth_write(ndev, 0, EESIPR); + goto out; + } + + if (intr_status & EESR_RX_CHECK) { + if (napi_schedule_prep(&mdp->napi)) { + /* Mask Rx interrupts */ + sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, + EESIPR); + __napi_schedule(&mdp->napi); + } else { + netdev_warn(ndev, + "ignoring interrupt, status 0x%08x, mask 0x%08x.\n", + intr_status, intr_enable); + } + } + + /* Tx Check */ + if (intr_status & cd->tx_check) { + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + + sh_eth_tx_free(ndev, true); + netif_wake_queue(ndev); + } + + /* E-MAC interrupt */ + if (intr_status & EESR_ECI) + sh_eth_emac_interrupt(ndev); + + if (intr_status & cd->eesr_err_check) { + /* Clear error interrupts */ + sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); + + sh_eth_error(ndev, intr_status); + } + +out: + spin_unlock(&mdp->lock); + + return ret; +} + +static int sh_eth_poll(struct napi_struct *napi, int budget) +{ + struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, + napi); + struct net_device *ndev = napi->dev; + int quota = budget; + u32 intr_status; + + for (;;) { + intr_status = sh_eth_read(ndev, EESR); + if (!(intr_status & EESR_RX_CHECK)) + break; + /* Clear Rx interrupts */ + sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); + + if (sh_eth_rx(ndev, intr_status, "a)) + goto out; + } + + napi_complete(napi); + + /* Reenable Rx interrupts */ + if (mdp->irq_enabled) + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +out: + return budget - quota; +} + +/* PHY state control function */ +static void sh_eth_adjust_link(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + + if (phydev->link) { + if (phydev->duplex != mdp->duplex) { + new_state = 1; + mdp->duplex = phydev->duplex; + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + } + + if (phydev->speed != mdp->speed) { + new_state = 1; + mdp->speed = phydev->speed; + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + } + if (!mdp->link) { + sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); + new_state = 1; + mdp->link = phydev->link; + } + } else if (mdp->link) { + new_state = 1; + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + } + + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); + + if (new_state && netif_msg_link(mdp)) + phy_print_status(phydev); +} + +/* PHY init function */ +static int sh_eth_phy_init(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev; + + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + + /* Try connect to PHY */ + if (np) { + struct device_node *pn; + + pn = of_parse_phandle(np, "phy-handle", 0); + phydev = of_phy_connect(ndev, pn, + sh_eth_adjust_link, 0, + mdp->phy_interface); + + of_node_put(pn); + if (!phydev) + phydev = ERR_PTR(-ENOENT); + } else { + char phy_id[MII_BUS_ID_SIZE + 3]; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + mdp->mii_bus->id, mdp->phy_id); + + phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, + mdp->phy_interface); + } + + if (IS_ERR(phydev)) { + netdev_err(ndev, "failed to connect PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +/* PHY control start function */ +static int sh_eth_phy_start(struct net_device *ndev) +{ + int ret; + + ret = sh_eth_phy_init(ndev); + if (ret) + return ret; + + phy_start(ndev->phydev); + + return 0; +} + +/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the + * version must be bumped as well. Just adding registers up to that + * limit is fine, as long as the existing register indices don't + * change. + */ +#define SH_ETH_REG_DUMP_VERSION 1 +#define SH_ETH_REG_DUMP_MAX_REGS 256 + +static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + u32 *valid_map; + size_t len; + + BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS); + + /* Dump starts with a bitmap that tells ethtool which + * registers are defined for this chip. + */ + len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); + if (buf) { + valid_map = buf; + buf += len; + } else { + valid_map = NULL; + } + + /* Add a register to the dump, if it has a defined offset. + * This automatically skips most undefined registers, but for + * some it is also necessary to check a capability flag in + * struct sh_eth_cpu_data. + */ +#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) +#define add_reg_from(reg, read_expr) do { \ + if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ + if (buf) { \ + mark_reg_valid(reg); \ + *buf++ = read_expr; \ + } \ + ++len; \ + } \ + } while (0) +#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) +#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) + + add_reg(EDSR); + add_reg(EDMR); + add_reg(EDTRR); + add_reg(EDRRR); + add_reg(EESR); + add_reg(EESIPR); + add_reg(TDLAR); + if (!cd->no_xdfar) + add_reg(TDFAR); + add_reg(TDFXR); + add_reg(TDFFR); + add_reg(RDLAR); + if (!cd->no_xdfar) + add_reg(RDFAR); + add_reg(RDFXR); + add_reg(RDFFR); + add_reg(TRSCER); + add_reg(RMFCR); + add_reg(TFTR); + add_reg(FDR); + add_reg(RMCR); + add_reg(TFUCR); + add_reg(RFOCR); + if (cd->rmiimode) + add_reg(RMIIMODE); + add_reg(FCFTR); + if (cd->rpadir) + add_reg(RPADIR); + if (!cd->no_trimd) + add_reg(TRIMD); + add_reg(ECMR); + add_reg(ECSR); + add_reg(ECSIPR); + add_reg(PIR); + if (!cd->no_psr) + add_reg(PSR); + add_reg(RDMLR); + add_reg(RFLR); + add_reg(IPGR); + if (cd->apr) + add_reg(APR); + if (cd->mpr) + add_reg(MPR); + add_reg(RFCR); + add_reg(RFCF); + if (cd->tpauser) + add_reg(TPAUSER); + add_reg(TPAUSECR); + if (cd->gecmr) + add_reg(GECMR); + if (cd->bculr) + add_reg(BCULR); + add_reg(MAHR); + add_reg(MALR); + if (!cd->no_tx_cntrs) { + add_reg(TROCR); + add_reg(CDCR); + add_reg(LCCR); + add_reg(CNDCR); + } + add_reg(CEFCR); + add_reg(FRECR); + add_reg(TSFRCR); + add_reg(TLFRCR); + if (cd->cexcr) { + add_reg(CERCR); + add_reg(CEECR); + } + add_reg(MAFCR); + if (cd->rtrate) + add_reg(RTRATE); + if (cd->csmr) + add_reg(CSMR); + if (cd->select_mii) + add_reg(RMII_MII); + if (cd->tsu) { + add_tsu_reg(ARSTR); + add_tsu_reg(TSU_CTRST); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } + add_tsu_reg(TSU_FWSLC); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } + add_tsu_reg(TSU_ADSBSY); + add_tsu_reg(TSU_TEN); + add_tsu_reg(TSU_POST1); + add_tsu_reg(TSU_POST2); + add_tsu_reg(TSU_POST3); + add_tsu_reg(TSU_POST4); + /* This is the start of a table, not just a single register. */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32(mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); + } + len += SH_ETH_TSU_CAM_ENTRIES * 2; + } + +#undef mark_reg_valid +#undef add_reg_from +#undef add_reg +#undef add_tsu_reg + + return len * 4; +} + +static int sh_eth_get_regs_len(struct net_device *ndev) +{ + return __sh_eth_get_regs(ndev, NULL); +} + +static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + regs->version = SH_ETH_REG_DUMP_VERSION; + + pm_runtime_get_sync(&mdp->pdev->dev); + __sh_eth_get_regs(ndev, buf); + pm_runtime_put_sync(&mdp->pdev->dev); +} + +static u32 sh_eth_get_msglevel(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + return mdp->msg_enable; +} + +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + mdp->msg_enable = value; +} + +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_current", "tx_current", + "rx_dirty", "tx_dirty", +}; +#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) + +static int sh_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SH_ETH_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void sh_eth_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i = 0; + + /* device-specific stats */ + data[i++] = mdp->cur_rx; + data[i++] = mdp->cur_tx; + data[i++] = mdp->dirty_rx; + data[i++] = mdp->dirty_tx; +} + +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +} + +static void sh_eth_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + ring->rx_max_pending = RX_RING_MAX; + ring->tx_max_pending = TX_RING_MAX; + ring->rx_pending = mdp->num_rx_ring; + ring->tx_pending = mdp->num_tx_ring; +} + +static int sh_eth_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + if (ring->tx_pending > TX_RING_MAX || + ring->rx_pending > RX_RING_MAX || + ring->tx_pending < TX_RING_MIN || + ring->rx_pending < RX_RING_MIN) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + netif_tx_disable(ndev); + + /* Serialise with the interrupt handler and NAPI, then + * disable interrupts. We have to clear the + * irq_enabled flag first to ensure that interrupts + * won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_synchronize(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* Free all the skbuffs in the Rx queue and the DMA buffers. */ + sh_eth_ring_free(ndev); + } + + /* Set new parameters */ + mdp->num_rx_ring = ring->rx_pending; + mdp->num_tx_ring = ring->tx_pending; + + if (netif_running(ndev)) { + ret = sh_eth_ring_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", + __func__); + return ret; + } + ret = sh_eth_dev_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", + __func__); + return ret; + } + + netif_device_attach(ndev); + } + + return 0; +} + +static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (mdp->cd->magic) { + wol->supported = WAKE_MAGIC; + wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); + + return 0; +} + +static const struct ethtool_ops sh_eth_ethtool_ops = { + .get_regs_len = sh_eth_get_regs_len, + .get_regs = sh_eth_get_regs, + .nway_reset = phy_ethtool_nway_reset, + .get_msglevel = sh_eth_get_msglevel, + .set_msglevel = sh_eth_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sh_eth_get_strings, + .get_ethtool_stats = sh_eth_get_ethtool_stats, + .get_sset_count = sh_eth_get_sset_count, + .get_ringparam = sh_eth_get_ringparam, + .set_ringparam = sh_eth_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, +}; + +/* network device open function */ +static int sh_eth_open(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + pm_runtime_get_sync(&mdp->pdev->dev); + + napi_enable(&mdp->napi); + + ret = request_irq(ndev->irq, sh_eth_interrupt, + mdp->cd->irq_flags, ndev->name, ndev); + if (ret) { + netdev_err(ndev, "Can not assign IRQ number\n"); + goto out_napi_off; + } + + /* Descriptor set */ + ret = sh_eth_ring_init(ndev); + if (ret) + goto out_free_irq; + + /* device init */ + ret = sh_eth_dev_init(ndev); + if (ret) + goto out_free_irq; + + /* PHY control start*/ + ret = sh_eth_phy_start(ndev); + if (ret) + goto out_free_irq; + + netif_start_queue(ndev); + + mdp->is_opened = 1; + + return ret; + +out_free_irq: + free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); + pm_runtime_put_sync(&mdp->pdev->dev); + return ret; +} + +/* Timeout function */ +static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + int i; + + netif_stop_queue(ndev); + + netif_err(mdp, timer, ndev, + "transmit timed out, status %8.8x, resetting...\n", + sh_eth_read(ndev, EESR)); + + /* tx_errors count up */ + ndev->stats.tx_errors++; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < mdp->num_rx_ring; i++) { + rxdesc = &mdp->rx_ring[i]; + rxdesc->status = cpu_to_le32(0); + rxdesc->addr = cpu_to_le32(0xBADF00D0); + dev_kfree_skb(mdp->rx_skbuff[i]); + mdp->rx_skbuff[i] = NULL; + } + for (i = 0; i < mdp->num_tx_ring; i++) { + dev_kfree_skb(mdp->tx_skbuff[i]); + mdp->tx_skbuff[i] = NULL; + } + + /* device init */ + sh_eth_dev_init(ndev); + + netif_start_queue(ndev); +} + +/* Packet transmit function */ +static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + dma_addr_t dma_addr; + u32 entry; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { + if (!sh_eth_tx_free(ndev, true)) { + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mdp->lock, flags); + return NETDEV_TX_BUSY; + } + } + spin_unlock_irqrestore(&mdp->lock, flags); + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + entry = mdp->cur_tx % mdp->num_tx_ring; + mdp->tx_skbuff[entry] = skb; + txdesc = &mdp->tx_ring[entry]; + /* soft swap. */ + if (!mdp->cd->hw_swap) + sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + txdesc->addr = cpu_to_le32(dma_addr); + txdesc->len = cpu_to_le32(skb->len << 16); + + dma_wmb(); /* TACT bit must be set after all the above writes */ + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); + else + txdesc->status |= cpu_to_le32(TD_TACT); + + wmb(); /* cur_tx must be incremented after TACT bit was set */ + mdp->cur_tx++; + + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); + + return NETDEV_TX_OK; +} + +/* The statistics registers have write-clear behaviour, which means we + * will lose any increment between the read and write. We mitigate + * this by only clearing when we read a non-zero value, so we will + * never falsely report a total of zero. + */ +static void +sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg) +{ + u32 delta = sh_eth_read(ndev, reg); + + if (delta) { + *stat += delta; + sh_eth_write(ndev, 0, reg); + } +} + +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->cd->no_tx_cntrs) + return &ndev->stats; + + if (!mdp->is_opened) + return &ndev->stats; + + sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); + sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); + + if (mdp->cd->cexcr) { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CERCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CEECR); + } else { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CNDCR); + } + + return &ndev->stats; +} + +/* device close function */ +static int sh_eth_close(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + netif_stop_queue(ndev); + + /* Serialise with the interrupt handler and NAPI, then disable + * interrupts. We have to clear the irq_enabled flag first to + * ensure that interrupts won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* PHY Disconnect */ + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + } + + free_irq(ndev->irq, ndev); + + /* Free all the skbuffs in the Rx queue and the DMA buffer. */ + sh_eth_ring_free(ndev); + + mdp->is_opened = 0; + + pm_runtime_put(&mdp->pdev->dev); + + return 0; +} + +static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + ndev->mtu = new_mtu; + netdev_update_features(ndev); + + return 0; +} + +/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ +static u32 sh_eth_tsu_get_post_mask(int entry) +{ + return 0x0f << (28 - ((entry % 8) * 4)); +} + +static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) +{ + return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); +} + +static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; + u32 tmp; + + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); +} + +static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; + u32 post_mask, ref_mask, tmp; + + post_mask = sh_eth_tsu_get_post_mask(entry); + ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; + + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); + + /* If other port enables, the function returns "true" */ + return tmp & ref_mask; +} + +static int sh_eth_tsu_busy(struct net_device *ndev) +{ + int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; + struct sh_eth_private *mdp = netdev_priv(ndev); + + while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { + udelay(10); + timeout--; + if (timeout <= 0) { + netdev_err(ndev, "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset, + const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 val; + + val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; + iowrite32(val, mdp->tsu_addr + offset); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + val = addr[4] << 8 | addr[5]; + iowrite32(val, mdp->tsu_addr + offset + 4); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + return 0; +} + +static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 val; + + val = ioread32(mdp->tsu_addr + offset); + addr[0] = (val >> 24) & 0xff; + addr[1] = (val >> 16) & 0xff; + addr[2] = (val >> 8) & 0xff; + addr[3] = val & 0xff; + val = ioread32(mdp->tsu_addr + offset + 4); + addr[4] = (val >> 8) & 0xff; + addr[5] = val & 0xff; +} + + +static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i; + u8 c_addr[ETH_ALEN]; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(ndev, reg_offset, c_addr); + if (ether_addr_equal(addr, c_addr)) + return i; + } + + return -ENOENT; +} + +static int sh_eth_tsu_find_empty(struct net_device *ndev) +{ + u8 blank[ETH_ALEN]; + int entry; + + memset(blank, 0, sizeof(blank)); + entry = sh_eth_tsu_find_entry(ndev, blank); + return (entry < 0) ? -ENOMEM : entry; +} + +static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int ret; + u8 blank[ETH_ALEN]; + + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & + ~(1 << (31 - entry)), TSU_TEN); + + memset(blank, 0, sizeof(blank)); + ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); + if (ret < 0) + return ret; + return 0; +} + +static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i < 0) { + /* No entry found, create one */ + i = sh_eth_tsu_find_empty(ndev); + if (i < 0) + return -ENOMEM; + ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); + if (ret < 0) + return ret; + + /* Enable the entry */ + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | + (1 << (31 - i)), TSU_TEN); + } + + /* Entry found or created, enable POST */ + sh_eth_tsu_enable_cam_entry_post(ndev, i); + + return 0; +} + +static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i) { + /* Entry found */ + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + goto done; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } +done: + return 0; +} + +static int sh_eth_tsu_purge_all(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + continue; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static void sh_eth_tsu_purge_mcast(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u8 addr[ETH_ALEN]; + int i; + + if (!mdp->cd->tsu) + return; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(ndev, reg_offset, addr); + if (is_multicast_ether_addr(addr)) + sh_eth_tsu_del_entry(ndev, addr); + } +} + +/* Update promiscuous flag and multicast filter */ +static void sh_eth_set_rx_mode(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 ecmr_bits; + int mcast_all = 0; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + /* Initial condition is MCT = 1, PRM = 0. + * Depending on ndev->flags, set PRM or clear MCT + */ + ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; + if (mdp->cd->tsu) + ecmr_bits |= ECMR_MCT; + + if (!(ndev->flags & IFF_MULTICAST)) { + sh_eth_tsu_purge_mcast(ndev); + mcast_all = 1; + } + if (ndev->flags & IFF_ALLMULTI) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + + if (ndev->flags & IFF_PROMISC) { + sh_eth_tsu_purge_all(ndev); + ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; + } else if (mdp->cd->tsu) { + struct netdev_hw_addr *ha; + netdev_for_each_mc_addr(ha, ndev) { + if (mcast_all && is_multicast_ether_addr(ha->addr)) + continue; + + if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { + if (!mcast_all) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + } + } + } + + /* update the ethernet mode */ + sh_eth_write(ndev, ecmr_bits, ECMR); + + spin_unlock_irqrestore(&mdp->lock, flags); +} + +static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX */ + sh_eth_rcv_snd_disable(ndev); + + /* Modify RX Checksum setting */ + sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); + + /* Enable TX and RX */ + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); +} + +static int sh_eth_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) + sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM); + + ndev->features = features; + + return 0; +} + +static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) +{ + if (!mdp->port) + return TSU_VTAG0; + else + return TSU_VTAG1; +} + +static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids++; + + /* The controller has one VLAN tag HW filter. So, if the filter is + * already enabled, the driver disables it and the filte + */ + if (mdp->vlan_num_ids > 1) { + /* disable VLAN filter */ + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + return 0; + } + + sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), + vtag_reg_index); + + return 0; +} + +static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids--; + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + + return 0; +} + +/* SuperH's TSU register init function */ +static void sh_eth_tsu_init(struct sh_eth_private *mdp) +{ + if (!mdp->cd->dual_port) { + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, + TSU_FWSLC); /* Enable POST registers */ + return; + } + + sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); + sh_eth_tsu_write(mdp, 0, TSU_PRISL0); + sh_eth_tsu_write(mdp, 0, TSU_PRISL1); + sh_eth_tsu_write(mdp, 0, TSU_FWSL0); + sh_eth_tsu_write(mdp, 0, TSU_FWSL1); + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ + sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ + sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ + sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ + sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ +} + +/* MDIO bus release function */ +static int sh_mdio_release(struct sh_eth_private *mdp) +{ + /* unregister mdio bus */ + mdiobus_unregister(mdp->mii_bus); + + /* free bitbang info */ + free_mdio_bitbang(mdp->mii_bus); + + return 0; +} + +static int sh_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_read_c22(bus, phy, reg); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_write_c22(bus, phy, reg, val); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_read_c45(bus, phy, devad, reg); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, + int reg, u16 val) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_write_c45(bus, phy, devad, reg, val); + pm_runtime_put(bus->parent); + + return res; +} + +/* MDIO bus init function */ +static int sh_mdio_init(struct sh_eth_private *mdp, + struct sh_eth_plat_data *pd) +{ + int ret; + struct bb_info *bitbang; + struct platform_device *pdev = mdp->pdev; + struct device *dev = &mdp->pdev->dev; + struct phy_device *phydev; + struct device_node *pn; + + /* create bit control struct for PHY */ + bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; + + /* bitbang init */ + bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; + bitbang->set_gate = pd->set_mdio_gate; + bitbang->ctrl.ops = &bb_ops; + + /* MII controller setting */ + mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!mdp->mii_bus) + return -ENOMEM; + + /* Wrap accessors with Runtime PM-aware ops */ + mdp->mii_bus->read = sh_mdiobb_read_c22; + mdp->mii_bus->write = sh_mdiobb_write_c22; + mdp->mii_bus->read_c45 = sh_mdiobb_read_c45; + mdp->mii_bus->write_c45 = sh_mdiobb_write_c45; + + /* Hook up MII support for ethtool */ + mdp->mii_bus->name = "sh_mii"; + mdp->mii_bus->parent = dev; + snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* register MDIO bus */ + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); + if (ret) + goto out_free_bus; + + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + + return 0; + +out_free_bus: + free_mdio_bitbang(mdp->mii_bus); + return ret; +} + +static const u16 *sh_eth_get_register_offset(int register_type) +{ + const u16 *reg_offset = NULL; + + switch (register_type) { + case SH_ETH_REG_GIGABIT: + reg_offset = sh_eth_offset_gigabit; + break; + case SH_ETH_REG_FAST_RCAR: + reg_offset = sh_eth_offset_fast_rcar; + break; + case SH_ETH_REG_FAST_SH4: + reg_offset = sh_eth_offset_fast_sh4; + break; + case SH_ETH_REG_FAST_SH3_SH2: + reg_offset = sh_eth_offset_fast_sh3_sh2; + break; + } + + return reg_offset; +} + +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_change_mtu = sh_eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = sh_eth_set_features, +}; + +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_change_mtu = sh_eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = sh_eth_set_features, +}; + +#ifdef CONFIG_OF +static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct sh_eth_plat_data *pdata; + phy_interface_t interface; + int ret; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + ret = of_get_phy_mode(np, &interface); + if (ret) + return NULL; + pdata->phy_interface = interface; + + of_get_mac_address(np, pdata->mac_addr); + + pdata->no_ether_link = + of_property_read_bool(np, "renesas,no-ether-link"); + pdata->ether_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + return pdata; +} + +static const struct of_device_id sh_eth_match_table[] = { + { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, + { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, + { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, + { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, + { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, + { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_eth_match_table); +#else +static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + +static int sh_eth_drv_probe(struct platform_device *pdev) +{ + struct resource *res; + struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); + const struct platform_device_id *id = platform_get_device_id(pdev); + struct sh_eth_private *mdp; + struct net_device *ndev; + int ret; + + ndev = alloc_etherdev(sizeof(struct sh_eth_private)); + if (!ndev) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto out_release; + ndev->irq = ret; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + mdp = netdev_priv(ndev); + mdp->num_tx_ring = TX_RING_SIZE; + mdp->num_rx_ring = RX_RING_SIZE; + mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(mdp->addr)) { + ret = PTR_ERR(mdp->addr); + goto out_release; + } + + ndev->base_addr = res->start; + + spin_lock_init(&mdp->lock); + mdp->pdev = pdev; + + if (pdev->dev.of_node) + pd = sh_eth_parse_dt(&pdev->dev); + if (!pd) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -EINVAL; + goto out_release; + } + + /* get PHY ID */ + mdp->phy_id = pd->phy; + mdp->phy_interface = pd->phy_interface; + mdp->no_ether_link = pd->no_ether_link; + mdp->ether_link_active_low = pd->ether_link_active_low; + + /* set cpu data */ + if (id) + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + else + mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); + + mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); + if (!mdp->reg_offset) { + dev_err(&pdev->dev, "Unknown register type (%d)\n", + mdp->cd->register_type); + ret = -EINVAL; + goto out_release; + } + sh_eth_set_default_cpu_data(mdp->cd); + + /* User's manual states max MTU should be 2048 but due to the + * alignment calculations in sh_eth_ring_init() the practical + * MTU is a bit less. Maybe this can be optimized some more. + */ + ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + + if (mdp->cd->rx_csum) { + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + } + + /* set function */ + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; + ndev->ethtool_ops = &sh_eth_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + + /* debug message level */ + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; + + /* read and set MAC address */ + read_mac_address(ndev, pd->mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one.\n"); + eth_hw_addr_random(ndev); + } + + if (mdp->cd->tsu) { + int port = pdev->id < 0 ? 0 : pdev->id % 2; + struct resource *rtsu; + + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (port == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + /* ioremap the TSU registers */ + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; + goto out_release; + } + mdp->port = port; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + /* Need to init only the first port of the two sharing a TSU */ + if (port == 0) { + if (mdp->cd->chip_reset) + mdp->cd->chip_reset(ndev); + + /* TSU init (Init only)*/ + sh_eth_tsu_init(mdp); + } + } + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* MDIO bus init */ + ret = sh_mdio_init(mdp, pd); + if (ret) { + dev_err_probe(&pdev->dev, ret, "MDIO init failed\n"); + goto out_release; + } + + netif_napi_add(ndev, &mdp->napi, sh_eth_poll); + + /* network device register */ + ret = register_netdev(ndev); + if (ret) + goto out_napi_del; + + if (mdp->cd->magic) + device_set_wakeup_capable(&pdev->dev, 1); + + /* print device information */ + netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + pm_runtime_put(&pdev->dev); + platform_set_drvdata(pdev, ndev); + + return ret; + +out_napi_del: + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + +out_release: + /* net_dev free */ + free_netdev(ndev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int sh_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + pm_runtime_disable(&pdev->dev); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int sh_eth_wol_setup(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* Only allow ECI interrupts */ + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); + + /* Enable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + return enable_irq_wake(ndev->irq); +} + +static int sh_eth_wol_restore(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + napi_enable(&mdp->napi); + + /* Disable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0); + + /* The device needs to be reset to restore MagicPacket logic + * for next wakeup. If we close and open the device it will + * both be reset and all registers restored. This is what + * happens during suspend and resume without WoL enabled. + */ + sh_eth_close(ndev); + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + + return disable_irq_wake(ndev->irq); +} + +static int sh_eth_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (mdp->wol_enabled) + ret = sh_eth_wol_setup(ndev); + else + ret = sh_eth_close(ndev); + + return ret; +} + +static int sh_eth_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (mdp->wol_enabled) + ret = sh_eth_wol_restore(ndev); + else + ret = sh_eth_open(ndev); + + if (ret < 0) + return ret; + + netif_device_attach(ndev); + + return ret; +} +#endif + +static int sh_eth_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops sh_eth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume) + SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL) +}; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif + +static const struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table); + +static struct platform_driver sh_eth_driver = { + .probe = sh_eth_drv_probe, + .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, + .driver = { + .name = CARDNAME, + .pm = SH_ETH_PM_OPS, + .of_match_table = of_match_ptr(sh_eth_match_table), + }, +}; + +module_platform_driver(sh_eth_driver); + +MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h new file mode 100644 index 0000000000..a5c07c6ff4 --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* SuperH Ethernet device driver + * + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2012 Renesas Solutions Corp. + */ + +#ifndef __SH_ETH_H__ +#define __SH_ETH_H__ + +#define CARDNAME "sh-eth" +#define TX_TIMEOUT (5*HZ) +#define TX_RING_SIZE 64 /* Tx ring size */ +#define RX_RING_SIZE 64 /* Rx ring size */ +#define TX_RING_MIN 64 +#define RX_RING_MIN 64 +#define TX_RING_MAX 1024 +#define RX_RING_MAX 1024 +#define PKT_BUF_SZ 1538 +#define SH_ETH_TSU_TIMEOUT_MS 500 +#define SH_ETH_TSU_CAM_ENTRIES 32 + +enum { + /* IMPORTANT: To keep ethtool register dump working, add new + * register names immediately before SH_ETH_MAX_REGISTER_OFFSET. + */ + + /* E-DMAC registers */ + EDSR = 0, + EDMR, + EDTRR, + EDRRR, + EESR, + EESIPR, + TDLAR, + TDFAR, + TDFXR, + TDFFR, + RDLAR, + RDFAR, + RDFXR, + RDFFR, + TRSCER, + RMFCR, + TFTR, + FDR, + RMCR, + EDOCR, + TFUCR, + RFOCR, + RMIIMODE, + FCFTR, + RPADIR, + TRIMD, + RBWAR, + TBRAR, + + /* Ether registers */ + ECMR, + ECSR, + ECSIPR, + PIR, + PSR, + RDMLR, + PIPR, + RFLR, + IPGR, + APR, + MPR, + PFTCR, + PFRCR, + RFCR, + RFCF, + TPAUSER, + TPAUSECR, + BCFR, + BCFRR, + GECMR, + BCULR, + MAHR, + MALR, + TROCR, + CDCR, + LCCR, + CNDCR, + CEFCR, + FRECR, + TSFRCR, + TLFRCR, + CERCR, + CEECR, + MAFCR, + RTRATE, + CSMR, + RMII_MII, + + /* TSU Absolute address */ + ARSTR, + TSU_CTRST, + TSU_FWEN0, + TSU_FWEN1, + TSU_FCM, + TSU_BSYSL0, + TSU_BSYSL1, + TSU_PRISL0, + TSU_PRISL1, + TSU_FWSL0, + TSU_FWSL1, + TSU_FWSLC, + TSU_QTAG0, /* Same as TSU_QTAGM0 */ + TSU_QTAG1, /* Same as TSU_QTAGM1 */ + TSU_QTAGM0, + TSU_QTAGM1, + TSU_FWSR, + TSU_FWINMK, + TSU_ADQT0, + TSU_ADQT1, + TSU_VTAG0, + TSU_VTAG1, + TSU_ADSBSY, + TSU_TEN, + TSU_POST1, + TSU_POST2, + TSU_POST3, + TSU_POST4, + TSU_ADRH0, + /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */ + + TXNLCR0, + TXALCR0, + RXNLCR0, + RXALCR0, + FWNLCR0, + FWALCR0, + TXNLCR1, + TXALCR1, + RXNLCR1, + RXALCR1, + FWNLCR1, + FWALCR1, + + /* This value must be written at last. */ + SH_ETH_MAX_REGISTER_OFFSET, +}; + +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_RCAR, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; + +/* Driver's parameters */ +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS) +#define SH_ETH_RX_ALIGN 32 +#else +#define SH_ETH_RX_ALIGN 2 +#endif + +/* Register's bits + */ +/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */ +enum EDSR_BIT { + EDSR_ENT = 0x01, EDSR_ENR = 0x02, +}; +#define EDSR_ENALL (EDSR_ENT|EDSR_ENR) + +/* GECMR : sh7734, sh7763 and r8a7740 only */ +enum GECMR_BIT { + GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, +}; + +/* EDMR */ +enum EDMR_BIT { + EDMR_NBST = 0x80, + EDMR_EL = 0x40, /* Litte endian */ + EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, + EDMR_SRST_GETHER = 0x03, + EDMR_SRST_ETHER = 0x01, +}; + +/* EDTRR */ +enum EDTRR_BIT { + EDTRR_TRNS_GETHER = 0x03, + EDTRR_TRNS_ETHER = 0x01, +}; + +/* EDRRR */ +enum EDRRR_BIT { + EDRRR_R = 0x01, +}; + +/* TPAUSER */ +enum TPAUSER_BIT { + TPAUSER_TPAUSE = 0x0000ffff, + TPAUSER_UNLIMITED = 0, +}; + +/* BCFR */ +enum BCFR_BIT { + BCFR_RPAUSE = 0x0000ffff, + BCFR_UNLIMITED = 0, +}; + +/* PIR */ +enum PIR_BIT { + PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01, +}; + +/* PSR */ +enum PSR_BIT { PSR_LMON = 0x01, }; + +/* EESR */ +enum EESR_BIT { + EESR_TWB1 = 0x80000000, + EESR_TWB = 0x40000000, /* same as TWB0 */ + EESR_TC1 = 0x20000000, + EESR_TUC = 0x10000000, + EESR_ROC = 0x08000000, + EESR_TABT = 0x04000000, + EESR_RABT = 0x02000000, + EESR_RFRMER = 0x01000000, /* same as RFCOF */ + EESR_ADE = 0x00800000, + EESR_ECI = 0x00400000, + EESR_FTC = 0x00200000, /* same as TC or TC0 */ + EESR_TDE = 0x00100000, + EESR_TFE = 0x00080000, /* same as TFUF */ + EESR_FRC = 0x00040000, /* same as FR */ + EESR_RDE = 0x00020000, + EESR_RFE = 0x00010000, + EESR_CND = 0x00000800, + EESR_DLC = 0x00000400, + EESR_CD = 0x00000200, + EESR_TRO = 0x00000100, + EESR_RMAF = 0x00000080, + EESR_CEEF = 0x00000040, + EESR_CELF = 0x00000020, + EESR_RRF = 0x00000010, + EESR_RTLF = 0x00000008, + EESR_RTSF = 0x00000004, + EESR_PRE = 0x00000002, + EESR_CERF = 0x00000001, +}; + +#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \ + EESR_RMAF | /* Multicast address recv */ \ + EESR_RRF | /* Bit frame recv */ \ + EESR_RTLF | /* Long frame recv */ \ + EESR_RTSF | /* Short frame recv */ \ + EESR_PRE | /* PHY-LSI recv error */ \ + EESR_CERF) /* Recv frame CRC error */ + +#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ + EESR_TRO) +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ + EESR_RDE | EESR_RFRMER | EESR_ADE | \ + EESR_TFE | EESR_TDE) + +/* EESIPR */ +enum EESIPR_BIT { + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, +}; + +/* FCFTR */ +enum FCFTR_BIT { + FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, + FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004, + FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, +}; +#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) +#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) + +/* RMCR */ +enum RMCR_BIT { + RMCR_RNC = 0x00000001, +}; + +/* ECMR */ +enum ECMR_BIT { + ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000, + ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, + ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, + ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, + ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, + ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, +}; + +/* ECSR */ +enum ECSR_BIT { + ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, + ECSR_LCHNG = 0x04, + ECSR_MPD = 0x02, ECSR_ICD = 0x01, +}; + +#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \ + ECSR_ICD | ECSIPR_MPDIP) + +/* ECSIPR */ +enum ECSIPR_BIT { + ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, + ECSIPR_LCHNGIP = 0x04, + ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, +}; + +#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \ + ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP) + +/* APR */ +enum APR_BIT { + APR_AP = 0x0000ffff, +}; + +/* MPR */ +enum MPR_BIT { + MPR_MP = 0x0000ffff, +}; + +/* TRSCER */ +enum TRSCER_BIT { + TRSCER_CNDCE = 0x00000800, + TRSCER_DLCCE = 0x00000400, + TRSCER_CDCE = 0x00000200, + TRSCER_TROCE = 0x00000100, + TRSCER_RMAFCE = 0x00000080, + TRSCER_RRFCE = 0x00000010, + TRSCER_RTLFCE = 0x00000008, + TRSCER_RTSFCE = 0x00000004, + TRSCER_PRECE = 0x00000002, + TRSCER_CERFCE = 0x00000001, +}; + +#define DEFAULT_TRSCER_ERR_MASK (TRSCER_RMAFCE | TRSCER_RRFCE | TRSCER_CDCE) + +/* RPADIR */ +enum RPADIR_BIT { + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, +}; + +/* FDR */ +#define DEFAULT_FDR_INIT 0x00000707 + +/* ARSTR */ +enum ARSTR_BIT { ARSTR_ARST = 0x00000001, }; + +/* TSU_FWEN0 */ +enum TSU_FWEN0_BIT { + TSU_FWEN0_0 = 0x00000001, +}; + +/* TSU_ADSBSY */ +enum TSU_ADSBSY_BIT { + TSU_ADSBSY_0 = 0x00000001, +}; + +/* TSU_TEN */ +enum TSU_TEN_BIT { + TSU_TEN_0 = 0x80000000, +}; + +/* TSU_FWSL0 */ +enum TSU_FWSL0_BIT { + TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800, + TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200, + TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010, +}; + +/* TSU_FWSLC */ +enum TSU_FWSLC_BIT { + TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000, + TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040, + TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010, + TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004, + TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001, +}; + +/* TSU_VTAGn */ +#define TSU_VTAG_ENABLE 0x80000000 +#define TSU_VTAG_VID_MASK 0x00000fff + +/* The sh ether Tx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_txdesc { + u32 status; /* TD0 */ + u32 len; /* TD1 */ + u32 addr; /* TD2 */ + u32 pad0; /* padding data */ +} __aligned(2) __packed; + +/* Transmit descriptor 0 bits */ +enum TD_STS_BIT { + TD_TACT = 0x80000000, + TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, + TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, + TD_TWBI = 0x04000000, +}; +#define TDF1ST TD_TFP1 +#define TDFEND TD_TFP0 +#define TD_TFP (TD_TFP1 | TD_TFP0) + +/* Transmit descriptor 1 bits */ +enum TD_LEN_BIT { + TD_TBL = 0xffff0000, /* transmit buffer length */ +}; + +/* The sh ether Rx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_rxdesc { + u32 status; /* RD0 */ + u32 len; /* RD1 */ + u32 addr; /* RD2 */ + u32 pad0; /* padding data */ +} __aligned(2) __packed; + +/* Receive descriptor 0 bits */ +enum RD_STS_BIT { + RD_RACT = 0x80000000, + RD_RDLE = 0x40000000, + RD_RFP1 = 0x20000000, + RD_RFP0 = 0x10000000, + RD_RFE = 0x08000000, + RD_RFS10 = 0x00000200, + RD_RFS9 = 0x00000100, + RD_RFS8 = 0x00000080, + RD_RFS7 = 0x00000040, + RD_RFS6 = 0x00000020, + RD_RFS5 = 0x00000010, + RD_RFS4 = 0x00000008, + RD_RFS3 = 0x00000004, + RD_RFS2 = 0x00000002, + RD_RFS1 = 0x00000001, +}; +#define RDF1ST RD_RFP1 +#define RDFEND RD_RFP0 +#define RD_RFP (RD_RFP1 | RD_RFP0) + +/* Receive descriptor 1 bits */ +enum RD_LEN_BIT { + RD_RFL = 0x0000ffff, /* receive frame length */ + RD_RBL = 0xffff0000, /* receive buffer length */ +}; + +/* This structure is used by each CPU dependency handling. */ +struct sh_eth_cpu_data { + /* mandatory functions */ + int (*soft_reset)(struct net_device *ndev); + + /* optional functions */ + void (*chip_reset)(struct net_device *ndev); + void (*set_duplex)(struct net_device *ndev); + void (*set_rate)(struct net_device *ndev); + + /* mandatory initialize value */ + int register_type; + u32 edtrr_trns; + u32 eesipr_value; + + /* optional initialize value */ + u32 ecsr_value; + u32 ecsipr_value; + u32 fdr_value; + u32 fcftr_value; + + /* interrupt checking mask */ + u32 tx_check; + u32 eesr_err_check; + + /* Error mask */ + u32 trscer_err_mask; + + /* hardware features */ + unsigned long irq_flags; /* IRQ configuration flags */ + unsigned no_psr:1; /* EtherC DOES NOT have PSR */ + unsigned apr:1; /* EtherC has APR */ + unsigned mpr:1; /* EtherC has MPR */ + unsigned tpauser:1; /* EtherC has TPAUSER */ + unsigned gecmr:1; /* EtherC has GECMR */ + unsigned bculr:1; /* EtherC has BCULR */ + unsigned tsu:1; /* EtherC has TSU */ + unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */ + unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */ + unsigned rpadir:1; /* E-DMAC has RPADIR */ + unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */ + unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */ + unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ + unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ + unsigned csmr:1; /* E-DMAC has CSMR */ + unsigned rx_csum:1; /* EtherC has ECMR.RCSC */ + unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */ + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned no_tx_cntrs:1; /* EtherC DOES NOT have TX error counters */ + unsigned cexcr:1; /* EtherC has CERCR/CEECR */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ +}; + +struct sh_eth_private { + struct platform_device *pdev; + struct sh_eth_cpu_data *cd; + const u16 *reg_offset; + void __iomem *addr; + void __iomem *tsu_addr; + struct clk *clk; + u32 num_rx_ring; + u32 num_tx_ring; + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + struct sh_eth_rxdesc *rx_ring; + struct sh_eth_txdesc *tx_ring; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + spinlock_t lock; /* Register access lock */ + u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ + u32 cur_tx, dirty_tx; + u32 rx_buf_sz; /* Based on MTU+slack. */ + struct napi_struct napi; + bool irq_enabled; + /* MII transceiver section. */ + u32 phy_id; /* PHY ID */ + struct mii_bus *mii_bus; /* MDIO bus control */ + int link; + phy_interface_t phy_interface; + int msg_enable; + int speed; + int duplex; + int port; /* for TSU */ + int vlan_num_ids; /* for VLAN tag filter */ + + unsigned no_ether_link:1; + unsigned ether_link_active_low:1; + unsigned is_opened:1; + unsigned wol_enabled:1; +}; + +#endif /* #ifndef __SH_ETH_H__ */ -- cgit v1.2.3