diff options
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r-- | drivers/net/ethernet/renesas/Kconfig | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/Makefile | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/ravb.h | 1065 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 2350 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_ptp.c | 348 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 3518 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.h | 550 |
7 files changed, 7886 insertions, 0 deletions
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig new file mode 100644 index 000000000..bb0ebdfd4 --- /dev/null +++ b/drivers/net/ethernet/renesas/Kconfig @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Renesas device configuration +# + +config NET_VENDOR_RENESAS + bool "Renesas devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Renesas devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_RENESAS + +config SH_ETH + tristate "Renesas SuperH Ethernet support" + depends on ARCH_RENESAS || SUPERH || COMPILE_TEST + select CRC32 + select MII + select MDIO_BITBANG + select PHYLIB + ---help--- + Renesas SuperH Ethernet device driver. + This driver supporting CPUs are: + - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, + R8A7740, R8A774x, R8A777x and R8A779x. + +config RAVB + tristate "Renesas Ethernet AVB support" + depends on ARCH_RENESAS || COMPILE_TEST + select CRC32 + select MII + select MDIO_BITBANG + select PHYLIB + imply PTP_1588_CLOCK + help + Renesas Ethernet AVB device driver. + This driver supports the following SoCs: + - R8A779x. + +endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile new file mode 100644 index 000000000..f21ab8c02 --- /dev/null +++ b/drivers/net/ethernet/renesas/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Renesas device drivers. +# + +obj-$(CONFIG_SH_ETH) += sh_eth.o + +ravb-objs := ravb_main.o ravb_ptp.o + +obj-$(CONFIG_RAVB) += ravb.o diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h new file mode 100644 index 000000000..e04af9546 --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb.h @@ -0,0 +1,1065 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Renesas Ethernet AVB device driver + * + * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> + * + * Based on the SuperH Ethernet driver + */ + +#ifndef __RAVB_H__ +#define __RAVB_H__ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mdio-bitbang.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/ptp_clock_kernel.h> + +#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */ +#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */ +#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */ +#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */ +#define BE_TX_RING_MIN 64 +#define BE_RX_RING_MIN 64 +#define BE_TX_RING_MAX 1024 +#define BE_RX_RING_MAX 2048 + +#define PKT_BUF_SZ 1538 + +/* Driver's parameters */ +#define RAVB_ALIGN 128 + +/* Hardware time stamp */ +#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */ +#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */ + +#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */ +#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */ +#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002 +#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006 +#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */ + +enum ravb_reg { + /* AVB-DMAC registers */ + CCC = 0x0000, + DBAT = 0x0004, + DLR = 0x0008, + CSR = 0x000C, + CDAR0 = 0x0010, + CDAR1 = 0x0014, + CDAR2 = 0x0018, + CDAR3 = 0x001C, + CDAR4 = 0x0020, + CDAR5 = 0x0024, + CDAR6 = 0x0028, + CDAR7 = 0x002C, + CDAR8 = 0x0030, + CDAR9 = 0x0034, + CDAR10 = 0x0038, + CDAR11 = 0x003C, + CDAR12 = 0x0040, + CDAR13 = 0x0044, + CDAR14 = 0x0048, + CDAR15 = 0x004C, + CDAR16 = 0x0050, + CDAR17 = 0x0054, + CDAR18 = 0x0058, + CDAR19 = 0x005C, + CDAR20 = 0x0060, + CDAR21 = 0x0064, + ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ + RCR = 0x0090, + RQC0 = 0x0094, + RQC1 = 0x0098, + RQC2 = 0x009C, + RQC3 = 0x00A0, + RQC4 = 0x00A4, + RPC = 0x00B0, + UFCW = 0x00BC, + UFCS = 0x00C0, + UFCV0 = 0x00C4, + UFCV1 = 0x00C8, + UFCV2 = 0x00CC, + UFCV3 = 0x00D0, + UFCV4 = 0x00D4, + UFCD0 = 0x00E0, + UFCD1 = 0x00E4, + UFCD2 = 0x00E8, + UFCD3 = 0x00EC, + UFCD4 = 0x00F0, + SFO = 0x00FC, + SFP0 = 0x0100, + SFP1 = 0x0104, + SFP2 = 0x0108, + SFP3 = 0x010C, + SFP4 = 0x0110, + SFP5 = 0x0114, + SFP6 = 0x0118, + SFP7 = 0x011C, + SFP8 = 0x0120, + SFP9 = 0x0124, + SFP10 = 0x0128, + SFP11 = 0x012C, + SFP12 = 0x0130, + SFP13 = 0x0134, + SFP14 = 0x0138, + SFP15 = 0x013C, + SFP16 = 0x0140, + SFP17 = 0x0144, + SFP18 = 0x0148, + SFP19 = 0x014C, + SFP20 = 0x0150, + SFP21 = 0x0154, + SFP22 = 0x0158, + SFP23 = 0x015C, + SFP24 = 0x0160, + SFP25 = 0x0164, + SFP26 = 0x0168, + SFP27 = 0x016C, + SFP28 = 0x0170, + SFP29 = 0x0174, + SFP30 = 0x0178, + SFP31 = 0x017C, + SFM0 = 0x01C0, + SFM1 = 0x01C4, + TGC = 0x0300, + TCCR = 0x0304, + TSR = 0x0308, + TFA0 = 0x0310, + TFA1 = 0x0314, + TFA2 = 0x0318, + CIVR0 = 0x0320, + CIVR1 = 0x0324, + CDVR0 = 0x0328, + CDVR1 = 0x032C, + CUL0 = 0x0330, + CUL1 = 0x0334, + CLL0 = 0x0338, + CLL1 = 0x033C, + DIC = 0x0350, + DIS = 0x0354, + EIC = 0x0358, + EIS = 0x035C, + RIC0 = 0x0360, + RIS0 = 0x0364, + RIC1 = 0x0368, + RIS1 = 0x036C, + RIC2 = 0x0370, + RIS2 = 0x0374, + TIC = 0x0378, + TIS = 0x037C, + ISS = 0x0380, + CIE = 0x0384, /* R-Car Gen3 only */ + GCCR = 0x0390, + GMTT = 0x0394, + GPTC = 0x0398, + GTI = 0x039C, + GTO0 = 0x03A0, + GTO1 = 0x03A4, + GTO2 = 0x03A8, + GIC = 0x03AC, + GIS = 0x03B0, + GCPT = 0x03B4, /* Undocumented? */ + GCT0 = 0x03B8, + GCT1 = 0x03BC, + GCT2 = 0x03C0, + GIE = 0x03CC, /* R-Car Gen3 only */ + GID = 0x03D0, /* R-Car Gen3 only */ + DIL = 0x0440, /* R-Car Gen3 only */ + RIE0 = 0x0460, /* R-Car Gen3 only */ + RID0 = 0x0464, /* R-Car Gen3 only */ + RIE2 = 0x0470, /* R-Car Gen3 only */ + RID2 = 0x0474, /* R-Car Gen3 only */ + TIE = 0x0478, /* R-Car Gen3 only */ + TID = 0x047c, /* R-Car Gen3 only */ + + /* E-MAC registers */ + ECMR = 0x0500, + RFLR = 0x0508, + ECSR = 0x0510, + ECSIPR = 0x0518, + PIR = 0x0520, + PSR = 0x0528, + PIPR = 0x052c, + MPR = 0x0558, + PFTCR = 0x055c, + PFRCR = 0x0560, + GECMR = 0x05b0, + MAHR = 0x05c0, + MALR = 0x05c8, + TROCR = 0x0700, /* Undocumented? */ + CDCR = 0x0708, /* Undocumented? */ + LCCR = 0x0710, /* Undocumented? */ + CEFCR = 0x0740, + FRECR = 0x0748, + TSFRCR = 0x0750, + TLFRCR = 0x0758, + RFCR = 0x0760, + CERCR = 0x0768, /* Undocumented? */ + CEECR = 0x0770, /* Undocumented? */ + MAFCR = 0x0778, +}; + + +/* Register bits of the Ethernet AVB */ +/* CCC */ +enum CCC_BIT { + CCC_OPC = 0x00000003, + CCC_OPC_RESET = 0x00000000, + CCC_OPC_CONFIG = 0x00000001, + CCC_OPC_OPERATION = 0x00000002, + CCC_GAC = 0x00000080, + CCC_DTSR = 0x00000100, + CCC_CSEL = 0x00030000, + CCC_CSEL_HPB = 0x00010000, + CCC_CSEL_ETH_TX = 0x00020000, + CCC_CSEL_GMII_REF = 0x00030000, + CCC_BOC = 0x00100000, /* Undocumented? */ + CCC_LBME = 0x01000000, +}; + +/* CSR */ +enum CSR_BIT { + CSR_OPS = 0x0000000F, + CSR_OPS_RESET = 0x00000001, + CSR_OPS_CONFIG = 0x00000002, + CSR_OPS_OPERATION = 0x00000004, + CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */ + CSR_DTS = 0x00000100, + CSR_TPO0 = 0x00010000, + CSR_TPO1 = 0x00020000, + CSR_TPO2 = 0x00040000, + CSR_TPO3 = 0x00080000, + CSR_RPO = 0x00100000, +}; + +/* ESR */ +enum ESR_BIT { + ESR_EQN = 0x0000001F, + ESR_ET = 0x00000F00, + ESR_EIL = 0x00001000, +}; + +/* APSR */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, + APSR_CMSW = 0x00000010, + APSR_DM = 0x00006000, /* Undocumented? */ + APSR_DM_RDM = 0x00002000, + APSR_DM_TDM = 0x00004000, +}; + +/* RCR */ +enum RCR_BIT { + RCR_EFFS = 0x00000001, + RCR_ENCF = 0x00000002, + RCR_ESF = 0x0000000C, + RCR_ETS0 = 0x00000010, + RCR_ETS2 = 0x00000020, + RCR_RFCL = 0x1FFF0000, +}; + +/* RQC0/1/2/3/4 */ +enum RQC_BIT { + RQC_RSM0 = 0x00000003, + RQC_UFCC0 = 0x00000030, + RQC_RSM1 = 0x00000300, + RQC_UFCC1 = 0x00003000, + RQC_RSM2 = 0x00030000, + RQC_UFCC2 = 0x00300000, + RQC_RSM3 = 0x03000000, + RQC_UFCC3 = 0x30000000, +}; + +/* RPC */ +enum RPC_BIT { + RPC_PCNT = 0x00000700, + RPC_DCNT = 0x00FF0000, +}; + +/* UFCW */ +enum UFCW_BIT { + UFCW_WL0 = 0x0000003F, + UFCW_WL1 = 0x00003F00, + UFCW_WL2 = 0x003F0000, + UFCW_WL3 = 0x3F000000, +}; + +/* UFCS */ +enum UFCS_BIT { + UFCS_SL0 = 0x0000003F, + UFCS_SL1 = 0x00003F00, + UFCS_SL2 = 0x003F0000, + UFCS_SL3 = 0x3F000000, +}; + +/* UFCV0/1/2/3/4 */ +enum UFCV_BIT { + UFCV_CV0 = 0x0000003F, + UFCV_CV1 = 0x00003F00, + UFCV_CV2 = 0x003F0000, + UFCV_CV3 = 0x3F000000, +}; + +/* UFCD0/1/2/3/4 */ +enum UFCD_BIT { + UFCD_DV0 = 0x0000003F, + UFCD_DV1 = 0x00003F00, + UFCD_DV2 = 0x003F0000, + UFCD_DV3 = 0x3F000000, +}; + +/* SFO */ +enum SFO_BIT { + SFO_FPB = 0x0000003F, +}; + +/* RTC */ +enum RTC_BIT { + RTC_MFL0 = 0x00000FFF, + RTC_MFL1 = 0x0FFF0000, +}; + +/* TGC */ +enum TGC_BIT { + TGC_TSM0 = 0x00000001, + TGC_TSM1 = 0x00000002, + TGC_TSM2 = 0x00000004, + TGC_TSM3 = 0x00000008, + TGC_TQP = 0x00000030, + TGC_TQP_NONAVB = 0x00000000, + TGC_TQP_AVBMODE1 = 0x00000010, + TGC_TQP_AVBMODE2 = 0x00000030, + TGC_TBD0 = 0x00000300, + TGC_TBD1 = 0x00003000, + TGC_TBD2 = 0x00030000, + TGC_TBD3 = 0x00300000, +}; + +/* TCCR */ +enum TCCR_BIT { + TCCR_TSRQ0 = 0x00000001, + TCCR_TSRQ1 = 0x00000002, + TCCR_TSRQ2 = 0x00000004, + TCCR_TSRQ3 = 0x00000008, + TCCR_TFEN = 0x00000100, + TCCR_TFR = 0x00000200, +}; + +/* TSR */ +enum TSR_BIT { + TSR_CCS0 = 0x00000003, + TSR_CCS1 = 0x0000000C, + TSR_TFFL = 0x00000700, +}; + +/* TFA2 */ +enum TFA2_BIT { + TFA2_TSV = 0x0000FFFF, + TFA2_TST = 0x03FF0000, +}; + +/* DIC */ +enum DIC_BIT { + DIC_DPE1 = 0x00000002, + DIC_DPE2 = 0x00000004, + DIC_DPE3 = 0x00000008, + DIC_DPE4 = 0x00000010, + DIC_DPE5 = 0x00000020, + DIC_DPE6 = 0x00000040, + DIC_DPE7 = 0x00000080, + DIC_DPE8 = 0x00000100, + DIC_DPE9 = 0x00000200, + DIC_DPE10 = 0x00000400, + DIC_DPE11 = 0x00000800, + DIC_DPE12 = 0x00001000, + DIC_DPE13 = 0x00002000, + DIC_DPE14 = 0x00004000, + DIC_DPE15 = 0x00008000, +}; + +/* DIS */ +enum DIS_BIT { + DIS_DPF1 = 0x00000002, + DIS_DPF2 = 0x00000004, + DIS_DPF3 = 0x00000008, + DIS_DPF4 = 0x00000010, + DIS_DPF5 = 0x00000020, + DIS_DPF6 = 0x00000040, + DIS_DPF7 = 0x00000080, + DIS_DPF8 = 0x00000100, + DIS_DPF9 = 0x00000200, + DIS_DPF10 = 0x00000400, + DIS_DPF11 = 0x00000800, + DIS_DPF12 = 0x00001000, + DIS_DPF13 = 0x00002000, + DIS_DPF14 = 0x00004000, + DIS_DPF15 = 0x00008000, +}; + +/* EIC */ +enum EIC_BIT { + EIC_MREE = 0x00000001, + EIC_MTEE = 0x00000002, + EIC_QEE = 0x00000004, + EIC_SEE = 0x00000008, + EIC_CLLE0 = 0x00000010, + EIC_CLLE1 = 0x00000020, + EIC_CULE0 = 0x00000040, + EIC_CULE1 = 0x00000080, + EIC_TFFE = 0x00000100, +}; + +/* EIS */ +enum EIS_BIT { + EIS_MREF = 0x00000001, + EIS_MTEF = 0x00000002, + EIS_QEF = 0x00000004, + EIS_SEF = 0x00000008, + EIS_CLLF0 = 0x00000010, + EIS_CLLF1 = 0x00000020, + EIS_CULF0 = 0x00000040, + EIS_CULF1 = 0x00000080, + EIS_TFFF = 0x00000100, + EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), +}; + +/* RIC0 */ +enum RIC0_BIT { + RIC0_FRE0 = 0x00000001, + RIC0_FRE1 = 0x00000002, + RIC0_FRE2 = 0x00000004, + RIC0_FRE3 = 0x00000008, + RIC0_FRE4 = 0x00000010, + RIC0_FRE5 = 0x00000020, + RIC0_FRE6 = 0x00000040, + RIC0_FRE7 = 0x00000080, + RIC0_FRE8 = 0x00000100, + RIC0_FRE9 = 0x00000200, + RIC0_FRE10 = 0x00000400, + RIC0_FRE11 = 0x00000800, + RIC0_FRE12 = 0x00001000, + RIC0_FRE13 = 0x00002000, + RIC0_FRE14 = 0x00004000, + RIC0_FRE15 = 0x00008000, + RIC0_FRE16 = 0x00010000, + RIC0_FRE17 = 0x00020000, +}; + +/* RIC0 */ +enum RIS0_BIT { + RIS0_FRF0 = 0x00000001, + RIS0_FRF1 = 0x00000002, + RIS0_FRF2 = 0x00000004, + RIS0_FRF3 = 0x00000008, + RIS0_FRF4 = 0x00000010, + RIS0_FRF5 = 0x00000020, + RIS0_FRF6 = 0x00000040, + RIS0_FRF7 = 0x00000080, + RIS0_FRF8 = 0x00000100, + RIS0_FRF9 = 0x00000200, + RIS0_FRF10 = 0x00000400, + RIS0_FRF11 = 0x00000800, + RIS0_FRF12 = 0x00001000, + RIS0_FRF13 = 0x00002000, + RIS0_FRF14 = 0x00004000, + RIS0_FRF15 = 0x00008000, + RIS0_FRF16 = 0x00010000, + RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), +}; + +/* RIC1 */ +enum RIC1_BIT { + RIC1_RFWE = 0x80000000, +}; + +/* RIS1 */ +enum RIS1_BIT { + RIS1_RFWF = 0x80000000, +}; + +/* RIC2 */ +enum RIC2_BIT { + RIC2_QFE0 = 0x00000001, + RIC2_QFE1 = 0x00000002, + RIC2_QFE2 = 0x00000004, + RIC2_QFE3 = 0x00000008, + RIC2_QFE4 = 0x00000010, + RIC2_QFE5 = 0x00000020, + RIC2_QFE6 = 0x00000040, + RIC2_QFE7 = 0x00000080, + RIC2_QFE8 = 0x00000100, + RIC2_QFE9 = 0x00000200, + RIC2_QFE10 = 0x00000400, + RIC2_QFE11 = 0x00000800, + RIC2_QFE12 = 0x00001000, + RIC2_QFE13 = 0x00002000, + RIC2_QFE14 = 0x00004000, + RIC2_QFE15 = 0x00008000, + RIC2_QFE16 = 0x00010000, + RIC2_QFE17 = 0x00020000, + RIC2_RFFE = 0x80000000, +}; + +/* RIS2 */ +enum RIS2_BIT { + RIS2_QFF0 = 0x00000001, + RIS2_QFF1 = 0x00000002, + RIS2_QFF2 = 0x00000004, + RIS2_QFF3 = 0x00000008, + RIS2_QFF4 = 0x00000010, + RIS2_QFF5 = 0x00000020, + RIS2_QFF6 = 0x00000040, + RIS2_QFF7 = 0x00000080, + RIS2_QFF8 = 0x00000100, + RIS2_QFF9 = 0x00000200, + RIS2_QFF10 = 0x00000400, + RIS2_QFF11 = 0x00000800, + RIS2_QFF12 = 0x00001000, + RIS2_QFF13 = 0x00002000, + RIS2_QFF14 = 0x00004000, + RIS2_QFF15 = 0x00008000, + RIS2_QFF16 = 0x00010000, + RIS2_QFF17 = 0x00020000, + RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), +}; + +/* TIC */ +enum TIC_BIT { + TIC_FTE0 = 0x00000001, /* Undocumented? */ + TIC_FTE1 = 0x00000002, /* Undocumented? */ + TIC_TFUE = 0x00000100, + TIC_TFWE = 0x00000200, +}; + +/* TIS */ +enum TIS_BIT { + TIS_FTF0 = 0x00000001, /* Undocumented? */ + TIS_FTF1 = 0x00000002, /* Undocumented? */ + TIS_TFUF = 0x00000100, + TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) +}; + +/* ISS */ +enum ISS_BIT { + ISS_FRS = 0x00000001, /* Undocumented? */ + ISS_FTS = 0x00000004, /* Undocumented? */ + ISS_ES = 0x00000040, + ISS_MS = 0x00000080, + ISS_TFUS = 0x00000100, + ISS_TFWS = 0x00000200, + ISS_RFWS = 0x00001000, + ISS_CGIS = 0x00002000, + ISS_DPS1 = 0x00020000, + ISS_DPS2 = 0x00040000, + ISS_DPS3 = 0x00080000, + ISS_DPS4 = 0x00100000, + ISS_DPS5 = 0x00200000, + ISS_DPS6 = 0x00400000, + ISS_DPS7 = 0x00800000, + ISS_DPS8 = 0x01000000, + ISS_DPS9 = 0x02000000, + ISS_DPS10 = 0x04000000, + ISS_DPS11 = 0x08000000, + ISS_DPS12 = 0x10000000, + ISS_DPS13 = 0x20000000, + ISS_DPS14 = 0x40000000, + ISS_DPS15 = 0x80000000, +}; + +/* CIE (R-Car Gen3 only) */ +enum CIE_BIT { + CIE_CRIE = 0x00000001, + CIE_CTIE = 0x00000100, + CIE_RQFM = 0x00010000, + CIE_CL0M = 0x00020000, + CIE_RFWL = 0x00040000, + CIE_RFFL = 0x00080000, +}; + +/* GCCR */ +enum GCCR_BIT { + GCCR_TCR = 0x00000003, + GCCR_TCR_NOREQ = 0x00000000, /* No request */ + GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */ + GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */ + GCCR_LTO = 0x00000004, + GCCR_LTI = 0x00000008, + GCCR_LPTC = 0x00000010, + GCCR_LMTT = 0x00000020, + GCCR_TCSS = 0x00000300, + GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */ + GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */ + GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */ +}; + +/* GTI */ +enum GTI_BIT { + GTI_TIV = 0x0FFFFFFF, +}; + +#define GTI_TIV_MAX GTI_TIV +#define GTI_TIV_MIN 0x20 + +/* GIC */ +enum GIC_BIT { + GIC_PTCE = 0x00000001, /* Undocumented? */ + GIC_PTME = 0x00000004, +}; + +/* GIS */ +enum GIS_BIT { + GIS_PTCF = 0x00000001, /* Undocumented? */ + GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), +}; + +/* GIE (R-Car Gen3 only) */ +enum GIE_BIT { + GIE_PTCS = 0x00000001, + GIE_PTOS = 0x00000002, + GIE_PTMS0 = 0x00000004, + GIE_PTMS1 = 0x00000008, + GIE_PTMS2 = 0x00000010, + GIE_PTMS3 = 0x00000020, + GIE_PTMS4 = 0x00000040, + GIE_PTMS5 = 0x00000080, + GIE_PTMS6 = 0x00000100, + GIE_PTMS7 = 0x00000200, + GIE_ATCS0 = 0x00010000, + GIE_ATCS1 = 0x00020000, + GIE_ATCS2 = 0x00040000, + GIE_ATCS3 = 0x00080000, + GIE_ATCS4 = 0x00100000, + GIE_ATCS5 = 0x00200000, + GIE_ATCS6 = 0x00400000, + GIE_ATCS7 = 0x00800000, + GIE_ATCS8 = 0x01000000, + GIE_ATCS9 = 0x02000000, + GIE_ATCS10 = 0x04000000, + GIE_ATCS11 = 0x08000000, + GIE_ATCS12 = 0x10000000, + GIE_ATCS13 = 0x20000000, + GIE_ATCS14 = 0x40000000, + GIE_ATCS15 = 0x80000000, +}; + +/* GID (R-Car Gen3 only) */ +enum GID_BIT { + GID_PTCD = 0x00000001, + GID_PTOD = 0x00000002, + GID_PTMD0 = 0x00000004, + GID_PTMD1 = 0x00000008, + GID_PTMD2 = 0x00000010, + GID_PTMD3 = 0x00000020, + GID_PTMD4 = 0x00000040, + GID_PTMD5 = 0x00000080, + GID_PTMD6 = 0x00000100, + GID_PTMD7 = 0x00000200, + GID_ATCD0 = 0x00010000, + GID_ATCD1 = 0x00020000, + GID_ATCD2 = 0x00040000, + GID_ATCD3 = 0x00080000, + GID_ATCD4 = 0x00100000, + GID_ATCD5 = 0x00200000, + GID_ATCD6 = 0x00400000, + GID_ATCD7 = 0x00800000, + GID_ATCD8 = 0x01000000, + GID_ATCD9 = 0x02000000, + GID_ATCD10 = 0x04000000, + GID_ATCD11 = 0x08000000, + GID_ATCD12 = 0x10000000, + GID_ATCD13 = 0x20000000, + GID_ATCD14 = 0x40000000, + GID_ATCD15 = 0x80000000, +}; + +/* RIE0 (R-Car Gen3 only) */ +enum RIE0_BIT { + RIE0_FRS0 = 0x00000001, + RIE0_FRS1 = 0x00000002, + RIE0_FRS2 = 0x00000004, + RIE0_FRS3 = 0x00000008, + RIE0_FRS4 = 0x00000010, + RIE0_FRS5 = 0x00000020, + RIE0_FRS6 = 0x00000040, + RIE0_FRS7 = 0x00000080, + RIE0_FRS8 = 0x00000100, + RIE0_FRS9 = 0x00000200, + RIE0_FRS10 = 0x00000400, + RIE0_FRS11 = 0x00000800, + RIE0_FRS12 = 0x00001000, + RIE0_FRS13 = 0x00002000, + RIE0_FRS14 = 0x00004000, + RIE0_FRS15 = 0x00008000, + RIE0_FRS16 = 0x00010000, + RIE0_FRS17 = 0x00020000, +}; + +/* RID0 (R-Car Gen3 only) */ +enum RID0_BIT { + RID0_FRD0 = 0x00000001, + RID0_FRD1 = 0x00000002, + RID0_FRD2 = 0x00000004, + RID0_FRD3 = 0x00000008, + RID0_FRD4 = 0x00000010, + RID0_FRD5 = 0x00000020, + RID0_FRD6 = 0x00000040, + RID0_FRD7 = 0x00000080, + RID0_FRD8 = 0x00000100, + RID0_FRD9 = 0x00000200, + RID0_FRD10 = 0x00000400, + RID0_FRD11 = 0x00000800, + RID0_FRD12 = 0x00001000, + RID0_FRD13 = 0x00002000, + RID0_FRD14 = 0x00004000, + RID0_FRD15 = 0x00008000, + RID0_FRD16 = 0x00010000, + RID0_FRD17 = 0x00020000, +}; + +/* RIE2 (R-Car Gen3 only) */ +enum RIE2_BIT { + RIE2_QFS0 = 0x00000001, + RIE2_QFS1 = 0x00000002, + RIE2_QFS2 = 0x00000004, + RIE2_QFS3 = 0x00000008, + RIE2_QFS4 = 0x00000010, + RIE2_QFS5 = 0x00000020, + RIE2_QFS6 = 0x00000040, + RIE2_QFS7 = 0x00000080, + RIE2_QFS8 = 0x00000100, + RIE2_QFS9 = 0x00000200, + RIE2_QFS10 = 0x00000400, + RIE2_QFS11 = 0x00000800, + RIE2_QFS12 = 0x00001000, + RIE2_QFS13 = 0x00002000, + RIE2_QFS14 = 0x00004000, + RIE2_QFS15 = 0x00008000, + RIE2_QFS16 = 0x00010000, + RIE2_QFS17 = 0x00020000, + RIE2_RFFS = 0x80000000, +}; + +/* RID2 (R-Car Gen3 only) */ +enum RID2_BIT { + RID2_QFD0 = 0x00000001, + RID2_QFD1 = 0x00000002, + RID2_QFD2 = 0x00000004, + RID2_QFD3 = 0x00000008, + RID2_QFD4 = 0x00000010, + RID2_QFD5 = 0x00000020, + RID2_QFD6 = 0x00000040, + RID2_QFD7 = 0x00000080, + RID2_QFD8 = 0x00000100, + RID2_QFD9 = 0x00000200, + RID2_QFD10 = 0x00000400, + RID2_QFD11 = 0x00000800, + RID2_QFD12 = 0x00001000, + RID2_QFD13 = 0x00002000, + RID2_QFD14 = 0x00004000, + RID2_QFD15 = 0x00008000, + RID2_QFD16 = 0x00010000, + RID2_QFD17 = 0x00020000, + RID2_RFFD = 0x80000000, +}; + +/* TIE (R-Car Gen3 only) */ +enum TIE_BIT { + TIE_FTS0 = 0x00000001, + TIE_FTS1 = 0x00000002, + TIE_FTS2 = 0x00000004, + TIE_FTS3 = 0x00000008, + TIE_TFUS = 0x00000100, + TIE_TFWS = 0x00000200, + TIE_MFUS = 0x00000400, + TIE_MFWS = 0x00000800, + TIE_TDPS0 = 0x00010000, + TIE_TDPS1 = 0x00020000, + TIE_TDPS2 = 0x00040000, + TIE_TDPS3 = 0x00080000, +}; + +/* TID (R-Car Gen3 only) */ +enum TID_BIT { + TID_FTD0 = 0x00000001, + TID_FTD1 = 0x00000002, + TID_FTD2 = 0x00000004, + TID_FTD3 = 0x00000008, + TID_TFUD = 0x00000100, + TID_TFWD = 0x00000200, + TID_MFUD = 0x00000400, + TID_MFWD = 0x00000800, + TID_TDPD0 = 0x00010000, + TID_TDPD1 = 0x00020000, + TID_TDPD2 = 0x00040000, + TID_TDPD3 = 0x00080000, +}; + +/* ECMR */ +enum ECMR_BIT { + ECMR_PRM = 0x00000001, + ECMR_DM = 0x00000002, + ECMR_TE = 0x00000020, + ECMR_RE = 0x00000040, + ECMR_MPDE = 0x00000200, + ECMR_TXF = 0x00010000, /* Undocumented? */ + ECMR_RXF = 0x00020000, + ECMR_PFR = 0x00040000, + ECMR_ZPF = 0x00080000, /* Undocumented? */ + ECMR_RZPF = 0x00100000, + ECMR_DPAD = 0x00200000, + ECMR_RCSC = 0x00800000, + ECMR_TRCCM = 0x04000000, +}; + +/* ECSR */ +enum ECSR_BIT { + ECSR_ICD = 0x00000001, + ECSR_MPD = 0x00000002, + ECSR_LCHNG = 0x00000004, + ECSR_PHYI = 0x00000008, +}; + +/* ECSIPR */ +enum ECSIPR_BIT { + ECSIPR_ICDIP = 0x00000001, + ECSIPR_MPDIP = 0x00000002, + ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */ +}; + +/* PIR */ +enum PIR_BIT { + PIR_MDC = 0x00000001, + PIR_MMD = 0x00000002, + PIR_MDO = 0x00000004, + PIR_MDI = 0x00000008, +}; + +/* PSR */ +enum PSR_BIT { + PSR_LMON = 0x00000001, +}; + +/* PIPR */ +enum PIPR_BIT { + PIPR_PHYIP = 0x00000001, +}; + +/* MPR */ +enum MPR_BIT { + MPR_MP = 0x0000ffff, +}; + +/* GECMR */ +enum GECMR_BIT { + GECMR_SPEED = 0x00000001, + GECMR_SPEED_100 = 0x00000000, + GECMR_SPEED_1000 = 0x00000001, +}; + +/* The Ethernet AVB descriptor definitions. */ +struct ravb_desc { + __le16 ds; /* Descriptor size */ + u8 cc; /* Content control MSBs (reserved) */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descriptor pointer */ +}; + +#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */ + +enum DIE_DT { + /* Frame data */ + DT_FMID = 0x40, + DT_FSTART = 0x50, + DT_FEND = 0x60, + DT_FSINGLE = 0x70, + /* Chain control */ + DT_LINK = 0x80, + DT_LINKFIX = 0x90, + DT_EOS = 0xa0, + /* HW/SW arbitration */ + DT_FEMPTY = 0xc0, + DT_FEMPTY_IS = 0xd0, + DT_FEMPTY_IC = 0xe0, + DT_FEMPTY_ND = 0xf0, + DT_LEMPTY = 0x20, + DT_EEMPTY = 0x30, +}; + +struct ravb_rx_desc { + __le16 ds_cc; /* Descriptor size and content control LSBs */ + u8 msc; /* MAC status code */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ +}; + +struct ravb_ex_rx_desc { + __le16 ds_cc; /* Descriptor size and content control lower bits */ + u8 msc; /* MAC status code */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ + __le32 ts_n; /* Timestampe nsec */ + __le32 ts_sl; /* Timestamp low */ + __le16 ts_sh; /* Timestamp high */ + __le16 res; /* Reserved bits */ +}; + +enum RX_DS_CC_BIT { + RX_DS = 0x0fff, /* Data size */ + RX_TR = 0x1000, /* Truncation indication */ + RX_EI = 0x2000, /* Error indication */ + RX_PS = 0xc000, /* Padding selection */ +}; + +/* E-MAC status code */ +enum MSC_BIT { + MSC_CRC = 0x01, /* Frame CRC error */ + MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */ + MSC_RTSF = 0x04, /* Frame length error (frame too short) */ + MSC_RTLF = 0x08, /* Frame length error (frame too long) */ + MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */ + MSC_CRL = 0x20, /* Carrier lost */ + MSC_CEEF = 0x40, /* Carrier extension error */ + MSC_MC = 0x80, /* Multicast frame reception */ +}; + +struct ravb_tx_desc { + __le16 ds_tagl; /* Descriptor size and frame tag LSBs */ + u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */ + u8 die_dt; /* Descriptor interrupt enable and type */ + __le32 dptr; /* Descpriptor pointer */ +}; + +enum TX_DS_TAGL_BIT { + TX_DS = 0x0fff, /* Data size */ + TX_TAGL = 0xf000, /* Frame tag LSBs */ +}; + +enum TX_TAGH_TSR_BIT { + TX_TAGH = 0x3f, /* Frame tag MSBs */ + TX_TSR = 0x40, /* Timestamp storage request */ +}; +enum RAVB_QUEUE { + RAVB_BE = 0, /* Best Effort Queue */ + RAVB_NC, /* Network Control Queue */ +}; + +#define DBAT_ENTRY_NUM 22 +#define RX_QUEUE_OFFSET 4 +#define NUM_RX_QUEUE 2 +#define NUM_TX_QUEUE 2 +#define NUM_TX_DESC 2 /* TX descriptors per packet */ + +struct ravb_tstamp_skb { + struct list_head list; + struct sk_buff *skb; + u16 tag; +}; + +struct ravb_ptp_perout { + u32 target; + u32 period; +}; + +#define N_EXT_TS 1 +#define N_PER_OUT 1 + +struct ravb_ptp { + struct ptp_clock *clock; + struct ptp_clock_info info; + u32 default_addend; + u32 current_addend; + int extts[N_EXT_TS]; + struct ravb_ptp_perout perout[N_PER_OUT]; +}; + +enum ravb_chip_id { + RCAR_GEN2, + RCAR_GEN3, +}; + +struct ravb_private { + struct net_device *ndev; + struct platform_device *pdev; + void __iomem *addr; + struct clk *clk; + struct mdiobb_ctrl mdiobb; + u32 num_rx_ring[NUM_RX_QUEUE]; + u32 num_tx_ring[NUM_TX_QUEUE]; + u32 desc_bat_size; + dma_addr_t desc_bat_dma; + struct ravb_desc *desc_bat; + dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; + dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; + struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; + struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; + void *tx_align[NUM_TX_QUEUE]; + struct sk_buff **rx_skb[NUM_RX_QUEUE]; + struct sk_buff **tx_skb[NUM_TX_QUEUE]; + u32 rx_over_errors; + u32 rx_fifo_errors; + struct net_device_stats stats[NUM_RX_QUEUE]; + u32 tstamp_tx_ctrl; + u32 tstamp_rx_ctrl; + struct list_head ts_skb_list; + u32 ts_skb_tag; + struct ravb_ptp ptp; + spinlock_t lock; /* Register access lock */ + u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */ + u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ + u32 cur_tx[NUM_TX_QUEUE]; + u32 dirty_tx[NUM_TX_QUEUE]; + u32 rx_buf_sz; /* Based on MTU+slack. */ + struct napi_struct napi[NUM_RX_QUEUE]; + struct work_struct work; + /* MII transceiver section. */ + struct mii_bus *mii_bus; /* MDIO bus control */ + int link; + phy_interface_t phy_interface; + int msg_enable; + int speed; + int emac_irq; + enum ravb_chip_id chip_id; + int rx_irqs[NUM_RX_QUEUE]; + int tx_irqs[NUM_TX_QUEUE]; + + unsigned no_avb_link:1; + unsigned avb_link_active_low:1; + unsigned wol_enabled:1; +}; + +static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) +{ + struct ravb_private *priv = netdev_priv(ndev); + + return ioread32(priv->addr + reg); +} + +static inline void ravb_write(struct net_device *ndev, u32 data, + enum ravb_reg reg) +{ + struct ravb_private *priv = netdev_priv(ndev); + + iowrite32(data, priv->addr + reg); +} + +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set); +int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); + +void ravb_ptp_interrupt(struct net_device *ndev); +void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev); +void ravb_ptp_stop(struct net_device *ndev); + +#endif /* #ifndef __RAVB_H__ */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c new file mode 100644 index 000000000..c24b7ea37 --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -0,0 +1,2350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Renesas Ethernet AVB device driver + * + * Copyright (C) 2014-2019 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> + * + * Based on the SuperH Ethernet driver + */ + +#include <linux/cache.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/sys_soc.h> + +#include <asm/div64.h> + +#include "ravb.h" + +#define RAVB_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { + "ch0", /* RAVB_BE */ + "ch1", /* RAVB_NC */ +}; + +static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { + "ch18", /* RAVB_BE */ + "ch19", /* RAVB_NC */ +}; + +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set) +{ + ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); +} + +int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) +{ + int i; + + for (i = 0; i < 10000; i++) { + if ((ravb_read(ndev, reg) & mask) == value) + return 0; + udelay(10); + } + return -ETIMEDOUT; +} + +static int ravb_config(struct net_device *ndev) +{ + int error; + + /* Set config mode */ + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + /* Check if the operating mode is changed to the config mode */ + error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); + if (error) + netdev_err(ndev, "failed to switch device to config mode\n"); + + return error; +} + +static void ravb_set_rate(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + switch (priv->speed) { + case 100: /* 100BASE */ + ravb_write(ndev, GECMR_SPEED_100, GECMR); + break; + case 1000: /* 1000BASE */ + ravb_write(ndev, GECMR_SPEED_1000, GECMR); + break; + } +} + +static void ravb_set_buffer_align(struct sk_buff *skb) +{ + u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); + + if (reserve) + skb_reserve(skb, RAVB_ALIGN - reserve); +} + +/* Get MAC address from the MAC address registers + * + * Ethernet AVB device doesn't have ROM for MAC address. + * This function gets the MAC address that was used by a bootloader. + */ +static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) +{ + if (mac) { + ether_addr_copy(ndev->dev_addr, mac); + } else { + u32 mahr = ravb_read(ndev, MAHR); + u32 malr = ravb_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; + } +} + +static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) +{ + struct ravb_private *priv = container_of(ctrl, struct ravb_private, + mdiobb); + + ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); +} + +/* MDC pin control */ +static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + ravb_mdio_ctrl(ctrl, PIR_MDC, level); +} + +/* Data I/O pin control */ +static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output) +{ + ravb_mdio_ctrl(ctrl, PIR_MMD, output); +} + +/* Set data bit */ +static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value) +{ + ravb_mdio_ctrl(ctrl, PIR_MDO, value); +} + +/* Get data bit */ +static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl) +{ + struct ravb_private *priv = container_of(ctrl, struct ravb_private, + mdiobb); + + return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; +} + +/* MDIO bus control struct */ +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ravb_set_mdc, + .set_mdio_dir = ravb_set_mdio_dir, + .set_mdio_data = ravb_set_mdio_data, + .get_mdio_data = ravb_get_mdio_data, +}; + +/* Free TX skb function for AVB-IP */ +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats[q]; + struct ravb_tx_desc *desc; + int free_num = 0; + int entry; + u32 size; + + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { + bool txed; + + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + NUM_TX_DESC); + desc = &priv->tx_ring[q][entry]; + txed = desc->die_dt == DT_FEMPTY; + if (free_txed_only && !txed) + break; + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + size = le16_to_cpu(desc->ds_tagl) & TX_DS; + /* Free the original skb. */ + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + /* Last packet descriptor? */ + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { + entry /= NUM_TX_DESC; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + if (txed) + stats->tx_packets++; + } + free_num++; + } + if (txed) + stats->tx_bytes += size; + desc->die_dt = DT_EEMPTY; + } + return free_num; +} + +/* Free skb's and DMA buffers for Ethernet AVB */ +static void ravb_ring_free(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + int ring_size; + int i; + + if (priv->rx_ring[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + priv->rx_buf_sz, + DMA_FROM_DEVICE); + } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], + priv->rx_desc_dma[q]); + priv->rx_ring[q] = NULL; + } + + if (priv->tx_ring[q]) { + ravb_tx_free(ndev, q, false); + + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], + priv->tx_desc_dma[q]); + priv->tx_ring[q] = NULL; + } + + /* Free RX skb ringbuffer */ + if (priv->rx_skb[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) + dev_kfree_skb(priv->rx_skb[q][i]); + } + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + + /* Free TX skb ringbuffer. + * SKBs are freed by ravb_tx_free() call above. + */ + kfree(priv->tx_skb[q]); + priv->tx_skb[q] = NULL; +} + +/* Format skb and descriptor buffer for Ethernet AVB */ +static void ravb_ring_format(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_ex_rx_desc *rx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; + int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; + int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + NUM_TX_DESC; + dma_addr_t dma_addr; + int i; + + priv->cur_rx[q] = 0; + priv->cur_tx[q] = 0; + priv->dirty_rx[q] = 0; + priv->dirty_tx[q] = 0; + + memset(priv->rx_ring[q], 0, rx_ring_size); + /* Build RX ring buffer */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + /* RX descriptor */ + rx_desc = &priv->rx_ring[q][i]; + rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, + priv->rx_buf_sz, + DMA_FROM_DEVICE); + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); + rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->die_dt = DT_FEMPTY; + } + rx_desc = &priv->rx_ring[q][i]; + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ + + memset(priv->tx_ring[q], 0, tx_ring_size); + /* Build TX ring buffer */ + for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; + i++, tx_desc++) { + tx_desc->die_dt = DT_EEMPTY; + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } + tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); + tx_desc->die_dt = DT_LINKFIX; /* type */ + + /* RX descriptor base address for best effort */ + desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; + desc->die_dt = DT_LINKFIX; /* type */ + desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + + /* TX descriptor base address for best effort */ + desc = &priv->desc_bat[q]; + desc->die_dt = DT_LINKFIX; /* type */ + desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); +} + +/* Init skb and descriptor buffer for Ethernet AVB */ +static int ravb_ring_init(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + int ring_size; + int i; + + priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + + ETH_HLEN + VLAN_HLEN + sizeof(__sum16); + + /* Allocate RX and TX skb rings */ + priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], + sizeof(*priv->rx_skb[q]), GFP_KERNEL); + priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], + sizeof(*priv->tx_skb[q]), GFP_KERNEL); + if (!priv->rx_skb[q] || !priv->tx_skb[q]) + goto error; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); + if (!skb) + goto error; + ravb_set_buffer_align(skb); + priv->rx_skb[q][i] = skb; + } + + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + + /* Allocate all RX descriptors. */ + ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); + priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + if (!priv->rx_ring[q]) + goto error; + + priv->dirty_rx[q] = 0; + + /* Allocate all TX descriptors. */ + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->tx_desc_dma[q], + GFP_KERNEL); + if (!priv->tx_ring[q]) + goto error; + + return 0; + +error: + ravb_ring_free(ndev, q); + + return -ENOMEM; +} + +/* E-MAC init function */ +static void ravb_emac_init(struct net_device *ndev) +{ + /* Receive frame limit set register */ + ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); + + /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ + ravb_write(ndev, ECMR_ZPF | ECMR_DM | + (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | + ECMR_TE | ECMR_RE, ECMR); + + ravb_set_rate(ndev); + + /* Set MAC address */ + ravb_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + ravb_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); + + /* E-MAC status register clear */ + ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); + + /* E-MAC interrupt enable register */ + ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); +} + +/* Device init function for Ethernet AVB */ +static int ravb_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int error; + + /* Set CONFIG mode */ + error = ravb_config(ndev); + if (error) + return error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); + +#if defined(__LITTLE_ENDIAN) + ravb_modify(ndev, CCC, CCC_BOC, 0); +#else + ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); +#endif + + /* Set AVB RX */ + ravb_write(ndev, + RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); + + /* Set FIFO size */ + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); + + /* Timestamp enable */ + ravb_write(ndev, TCCR_TFEN, TCCR); + + /* Interrupt init: */ + if (priv->chip_id == RCAR_GEN3) { + /* Clear DIL.DPLx */ + ravb_write(ndev, 0, DIL); + /* Set queue specific interrupt */ + ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); + } + /* Frame receive */ + ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0, RIC1); + /* Receive FIFO full error, descriptor empty */ + ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); + /* Frame transmitted, timestamp FIFO updated */ + ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); + + /* Setting the control will start the AVB-DMAC process. */ + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); + + return 0; +} + +static void ravb_get_tx_tstamp(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_tstamp_skb *ts_skb, *ts_skb2; + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + struct timespec64 ts; + u16 tag, tfa_tag; + int count; + u32 tfa2; + + count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; + while (count--) { + tfa2 = ravb_read(ndev, TFA2); + tfa_tag = (tfa2 & TFA2_TST) >> 16; + ts.tv_nsec = (u64)ravb_read(ndev, TFA0); + ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) | + ravb_read(ndev, TFA1); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, + list) { + skb = ts_skb->skb; + tag = ts_skb->tag; + list_del(&ts_skb->list); + kfree(ts_skb); + if (tag == tfa_tag) { + skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); + break; + } else { + dev_kfree_skb_any(skb); + } + } + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); + } +} + +static void ravb_rx_csum(struct sk_buff *skb) +{ + u8 *hw_csum; + + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes + * appended to packet data + */ + if (unlikely(skb->len < sizeof(__sum16))) + return; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + skb_trim(skb, skb->len - sizeof(__sum16)); +} + +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - + priv->cur_rx[q]; + struct net_device_stats *stats = &priv->stats[q]; + struct ravb_ex_rx_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + struct timespec64 ts; + u8 desc_status; + u16 pkt_len; + int limit; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + desc = &priv->rx_ring[q][entry]; + while (desc->die_dt != DT_FEMPTY) { + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + desc_status = desc->msc; + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; + + if (--boguscnt < 0) + break; + + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + + if (desc_status & MSC_MC) + stats->multicast++; + + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | + MSC_CEEF)) { + stats->rx_errors++; + if (desc_status & MSC_CRC) + stats->rx_crc_errors++; + if (desc_status & MSC_RFE) + stats->rx_frame_errors++; + if (desc_status & (MSC_RTLF | MSC_RTSF)) + stats->rx_length_errors++; + if (desc_status & MSC_CEEF) + stats->rx_missed_errors++; + } else { + u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; + + skb = priv->rx_skb[q][entry]; + priv->rx_skb[q][entry] = NULL; + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + priv->rx_buf_sz, + DMA_FROM_DEVICE); + get_ts &= (q == RAVB_NC) ? + RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : + ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + if (get_ts) { + struct skb_shared_hwtstamps *shhwtstamps; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << + 32) | le32_to_cpu(desc->ts_sl); + ts.tv_nsec = le32_to_cpu(desc->ts_n); + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); + } + + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + ravb_rx_csum(skb); + napi_gro_receive(&priv->napi[q], skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + } + + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; + desc = &priv->rx_ring[q][entry]; + } + + /* Refill the RX ring buffers. */ + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; + desc = &priv->rx_ring[q][entry]; + desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + + if (!priv->rx_skb[q][entry]) { + skb = netdev_alloc_skb(ndev, + priv->rx_buf_sz + + RAVB_ALIGN - 1); + if (!skb) + break; /* Better luck next round. */ + ravb_set_buffer_align(skb); + dma_addr = dma_map_single(ndev->dev.parent, skb->data, + le16_to_cpu(desc->ds_cc), + DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->ds_cc = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + priv->rx_skb[q][entry] = skb; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEMPTY; + } + + *quota -= limit - (++boguscnt); + + return boguscnt <= 0; +} + +static void ravb_rcv_snd_disable(struct net_device *ndev) +{ + /* Disable TX and RX */ + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); +} + +static void ravb_rcv_snd_enable(struct net_device *ndev) +{ + /* Enable TX and RX */ + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); +} + +/* function for waiting dma process finished */ +static int ravb_stop_dma(struct net_device *ndev) +{ + int error; + + /* Wait for stopping the hardware TX process */ + error = ravb_wait(ndev, TCCR, + TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0); + if (error) + return error; + + error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, + 0); + if (error) + return error; + + /* Stop the E-MAC's RX/TX processes. */ + ravb_rcv_snd_disable(ndev); + + /* Wait for stopping the RX DMA process */ + error = ravb_wait(ndev, CSR, CSR_RPO, 0); + if (error) + return error; + + /* Stop AVB-DMAC process */ + return ravb_config(ndev); +} + +/* E-MAC interrupt handler */ +static void ravb_emac_interrupt_unlocked(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ecsr, psr; + + ecsr = ravb_read(ndev, ECSR); + ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ + + if (ecsr & ECSR_MPD) + pm_wakeup_event(&priv->pdev->dev, 0); + if (ecsr & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (ecsr & ECSR_LCHNG) { + /* Link changed */ + if (priv->no_avb_link) + return; + psr = ravb_read(ndev, PSR); + if (priv->avb_link_active_low) + psr ^= PSR_LMON; + if (!(psr & PSR_LMON)) { + /* DIsable RX and TX */ + ravb_rcv_snd_disable(ndev); + } else { + /* Enable RX and TX */ + ravb_rcv_snd_enable(ndev); + } + } +} + +static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + + spin_lock(&priv->lock); + ravb_emac_interrupt_unlocked(ndev); + mmiowb(); + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + +/* Error interrupt handler */ +static void ravb_error_interrupt(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 eis, ris2; + + eis = ravb_read(ndev, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); + if (eis & EIS_QFS) { + ris2 = ravb_read(ndev, RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + RIS2); + + /* Receive Descriptor Empty int */ + if (ris2 & RIS2_QFF0) + priv->stats[RAVB_BE].rx_over_errors++; + + /* Receive Descriptor Empty int */ + if (ris2 & RIS2_QFF1) + priv->stats[RAVB_NC].rx_over_errors++; + + /* Receive FIFO Overflow int */ + if (ris2 & RIS2_RFFF) + priv->rx_fifo_errors++; + } +} + +static bool ravb_queue_interrupt(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ris0 = ravb_read(ndev, RIS0); + u32 ric0 = ravb_read(ndev, RIC0); + u32 tis = ravb_read(ndev, TIS); + u32 tic = ravb_read(ndev, TIC); + + if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { + if (napi_schedule_prep(&priv->napi[q])) { + /* Mask RX and TX interrupts */ + if (priv->chip_id == RCAR_GEN2) { + ravb_write(ndev, ric0 & ~BIT(q), RIC0); + ravb_write(ndev, tic & ~BIT(q), TIC); + } else { + ravb_write(ndev, BIT(q), RID0); + ravb_write(ndev, BIT(q), TID); + } + __napi_schedule(&priv->napi[q]); + } else { + netdev_warn(ndev, + "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", + ris0, ric0); + netdev_warn(ndev, + " tx status 0x%08x, tx mask 0x%08x.\n", + tis, tic); + } + return true; + } + return false; +} + +static bool ravb_timestamp_interrupt(struct net_device *ndev) +{ + u32 tis = ravb_read(ndev, TIS); + + if (tis & TIS_TFUF) { + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); + ravb_get_tx_tstamp(ndev); + return true; + } + return false; +} + +static irqreturn_t ravb_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Received and transmitted interrupts */ + if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { + int q; + + /* Timestamp updated */ + if (ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Network control and best effort queue RX/TX */ + for (q = RAVB_NC; q >= RAVB_BE; q--) { + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + } + } + + /* E-MAC status summary */ + if (iss & ISS_MS) { + ravb_emac_interrupt_unlocked(ndev); + result = IRQ_HANDLED; + } + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); + result = IRQ_HANDLED; + } + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +/* Timestamp/Error/gPTP interrupt handler */ +static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Timestamp updated */ + if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); + result = IRQ_HANDLED; + } + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + + spin_lock(&priv->lock); + + /* Network control/Best effort queue RX/TX */ + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_BE); +} + +static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_NC); +} + +static int ravb_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + int q = napi - priv->napi; + int mask = BIT(q); + int quota = budget; + u32 ris0, tis; + + for (;;) { + tis = ravb_read(ndev, TIS); + ris0 = ravb_read(ndev, RIS0); + if (!((ris0 & mask) || (tis & mask))) + break; + + /* Processing RX Descriptor Ring */ + if (ris0 & mask) { + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); + if (ravb_rx(ndev, "a, q)) + goto out; + } + /* Processing TX Descriptor Ring */ + if (tis & mask) { + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); + ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + } + } + + napi_complete(napi); + + /* Re-enable RX/TX interrupts */ + spin_lock_irqsave(&priv->lock, flags); + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Receive error message handling */ + priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; + if (priv->rx_over_errors != ndev->stats.rx_over_errors) + ndev->stats.rx_over_errors = priv->rx_over_errors; + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) + ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; +out: + return budget - quota; +} + +/* PHY state control function */ +static void ravb_adjust_link(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link) + ravb_rcv_snd_disable(ndev); + + if (phydev->link) { + if (phydev->speed != priv->speed) { + new_state = true; + priv->speed = phydev->speed; + ravb_set_rate(ndev); + } + if (!priv->link) { + ravb_modify(ndev, ECMR, ECMR_TXF, 0); + new_state = true; + priv->link = phydev->link; + } + } else if (priv->link) { + new_state = true; + priv->link = 0; + priv->speed = 0; + } + + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link && phydev->link) + ravb_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); +} + +static const struct soc_device_attribute r8a7795es10[] = { + { .soc_id = "r8a7795", .revision = "ES1.0", }, + { /* sentinel */ } +}; + +/* PHY init function */ +static int ravb_phy_init(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct ravb_private *priv = netdev_priv(ndev); + struct phy_device *phydev; + struct device_node *pn; + int err; + + priv->link = 0; + priv->speed = 0; + + /* Try connecting to PHY */ + pn = of_parse_phandle(np, "phy-handle", 0); + if (!pn) { + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + return err; + } + pn = of_node_get(np); + } + phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, + priv->phy_interface); + of_node_put(pn); + if (!phydev) { + netdev_err(ndev, "failed to connect PHY\n"); + err = -ENOENT; + goto err_deregister_fixed_link; + } + + /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 + * at this time. + */ + if (soc_device_match(r8a7795es10)) { + err = phy_set_max_speed(phydev, SPEED_100); + if (err) { + netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); + goto err_phy_disconnect; + } + + netdev_info(ndev, "limited PHY to 100Mbit/s\n"); + } + + /* 10BASE is not supported */ + phydev->supported &= ~PHY_10BT_FEATURES; + + phy_attached_info(phydev); + + return 0; + +err_phy_disconnect: + phy_disconnect(phydev); +err_deregister_fixed_link: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + + return err; +} + +/* PHY control start function */ +static int ravb_phy_start(struct net_device *ndev) +{ + int error; + + error = ravb_phy_init(ndev); + if (error) + return error; + + phy_start(ndev->phydev); + + return 0; +} + +static u32 ravb_get_msglevel(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void ravb_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ravb_private *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_queue_0_current", + "tx_queue_0_current", + "rx_queue_0_dirty", + "tx_queue_0_dirty", + "rx_queue_0_packets", + "tx_queue_0_packets", + "rx_queue_0_bytes", + "tx_queue_0_bytes", + "rx_queue_0_mcast_packets", + "rx_queue_0_errors", + "rx_queue_0_crc_errors", + "rx_queue_0_frame_errors", + "rx_queue_0_length_errors", + "rx_queue_0_missed_errors", + "rx_queue_0_over_errors", + + "rx_queue_1_current", + "tx_queue_1_current", + "rx_queue_1_dirty", + "tx_queue_1_dirty", + "rx_queue_1_packets", + "tx_queue_1_packets", + "rx_queue_1_bytes", + "tx_queue_1_bytes", + "rx_queue_1_mcast_packets", + "rx_queue_1_errors", + "rx_queue_1_crc_errors", + "rx_queue_1_frame_errors", + "rx_queue_1_length_errors", + "rx_queue_1_missed_errors", + "rx_queue_1_over_errors", +}; + +#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats) + +static int ravb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return RAVB_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ravb_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, u64 *data) +{ + struct ravb_private *priv = netdev_priv(ndev); + int i = 0; + int q; + + /* Device-specific stats */ + for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { + struct net_device_stats *stats = &priv->stats[q]; + + data[i++] = priv->cur_rx[q]; + data[i++] = priv->cur_tx[q]; + data[i++] = priv->dirty_rx[q]; + data[i++] = priv->dirty_tx[q]; + data[i++] = stats->rx_packets; + data[i++] = stats->tx_packets; + data[i++] = stats->rx_bytes; + data[i++] = stats->tx_bytes; + data[i++] = stats->multicast; + data[i++] = stats->rx_errors; + data[i++] = stats->rx_crc_errors; + data[i++] = stats->rx_frame_errors; + data[i++] = stats->rx_length_errors; + data[i++] = stats->rx_missed_errors; + data[i++] = stats->rx_over_errors; + } +} + +static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + break; + } +} + +static void ravb_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ring->rx_max_pending = BE_RX_RING_MAX; + ring->tx_max_pending = BE_TX_RING_MAX; + ring->rx_pending = priv->num_rx_ring[RAVB_BE]; + ring->tx_pending = priv->num_tx_ring[RAVB_BE]; +} + +static int ravb_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct ravb_private *priv = netdev_priv(ndev); + int error; + + if (ring->tx_pending > BE_TX_RING_MAX || + ring->rx_pending > BE_RX_RING_MAX || + ring->tx_pending < BE_TX_RING_MIN || + ring->rx_pending < BE_RX_RING_MIN) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + /* Stop PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); + /* Wait for DMA stopping */ + error = ravb_stop_dma(ndev); + if (error) { + netdev_err(ndev, + "cannot set ringparam! Any AVB processes are still running?\n"); + return error; + } + synchronize_irq(ndev->irq); + + /* Free all the skb's in the RX queue and the DMA buffers. */ + ravb_ring_free(ndev, RAVB_BE); + ravb_ring_free(ndev, RAVB_NC); + } + + /* Set new parameters */ + priv->num_rx_ring[RAVB_BE] = ring->rx_pending; + priv->num_tx_ring[RAVB_BE] = ring->tx_pending; + + if (netif_running(ndev)) { + error = ravb_dmac_init(ndev); + if (error) { + netdev_err(ndev, + "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return error; + } + + ravb_emac_init(ndev); + + /* Initialise PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); + + netif_device_attach(ndev); + } + + return 0; +} + +static int ravb_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct ravb_private *priv = netdev_priv(ndev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_ALL); + info->phc_index = ptp_clock_index(priv->ptp.clock); + + return 0; +} + +static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; +} + +static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); + + return 0; +} + +static const struct ethtool_ops ravb_ethtool_ops = { + .nway_reset = phy_ethtool_nway_reset, + .get_msglevel = ravb_get_msglevel, + .set_msglevel = ravb_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = ravb_get_strings, + .get_ethtool_stats = ravb_get_ethtool_stats, + .get_sset_count = ravb_get_sset_count, + .get_ringparam = ravb_get_ringparam, + .set_ringparam = ravb_set_ringparam, + .get_ts_info = ravb_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_wol = ravb_get_wol, + .set_wol = ravb_set_wol, +}; + +static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, + struct net_device *ndev, struct device *dev, + const char *ch) +{ + char *name; + int error; + + name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!name) + return -ENOMEM; + error = request_irq(irq, handler, 0, name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", name); + + return error; +} + +/* Network device open function for Ethernet AVB */ +static int ravb_open(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + napi_enable(&priv->napi[RAVB_BE]); + napi_enable(&priv->napi[RAVB_NC]); + + if (priv->chip_id == RCAR_GEN2) { + error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, + ndev->name, ndev); + if (error) { + netdev_err(ndev, "cannot request IRQ\n"); + goto out_napi_off; + } + } else { + error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, + dev, "ch22:multi"); + if (error) + goto out_napi_off; + error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, + dev, "ch24:emac"); + if (error) + goto out_free_irq; + error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch0:rx_be"); + if (error) + goto out_free_irq_emac; + error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch18:tx_be"); + if (error) + goto out_free_irq_be_rx; + error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch1:rx_nc"); + if (error) + goto out_free_irq_be_tx; + error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch19:tx_nc"); + if (error) + goto out_free_irq_nc_rx; + } + + /* Device init */ + error = ravb_dmac_init(ndev); + if (error) + goto out_free_irq_nc_tx; + ravb_emac_init(ndev); + + /* Initialise PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); + + netif_tx_start_all_queues(ndev); + + /* PHY control start */ + error = ravb_phy_start(ndev); + if (error) + goto out_ptp_stop; + + return 0; + +out_ptp_stop: + /* Stop PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); +out_free_irq_nc_tx: + if (priv->chip_id == RCAR_GEN2) + goto out_free_irq; + free_irq(priv->tx_irqs[RAVB_NC], ndev); +out_free_irq_nc_rx: + free_irq(priv->rx_irqs[RAVB_NC], ndev); +out_free_irq_be_tx: + free_irq(priv->tx_irqs[RAVB_BE], ndev); +out_free_irq_be_rx: + free_irq(priv->rx_irqs[RAVB_BE], ndev); +out_free_irq_emac: + free_irq(priv->emac_irq, ndev); +out_free_irq: + free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + return error; +} + +/* Timeout function for Ethernet AVB */ +static void ravb_tx_timeout(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + netif_err(priv, tx_err, ndev, + "transmit timed out, status %08x, resetting...\n", + ravb_read(ndev, ISS)); + + /* tx_errors count up */ + ndev->stats.tx_errors++; + + schedule_work(&priv->work); +} + +static void ravb_tx_timeout_work(struct work_struct *work) +{ + struct ravb_private *priv = container_of(work, struct ravb_private, + work); + struct net_device *ndev = priv->ndev; + int error; + + netif_tx_stop_all_queues(ndev); + + /* Stop PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); + + /* Wait for DMA stopping */ + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } + + ravb_ring_free(ndev, RAVB_BE); + ravb_ring_free(ndev, RAVB_NC); + + /* Device init */ + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } + ravb_emac_init(ndev); + +out: + /* Initialise PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); + + netif_tx_start_all_queues(ndev); +} + +/* Packet transmit function for Ethernet AVB */ +static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u16 q = skb_get_queue_mapping(skb); + struct ravb_tstamp_skb *ts_skb; + struct ravb_tx_desc *desc; + unsigned long flags; + u32 dma_addr; + void *buffer; + u32 entry; + u32 len; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * + NUM_TX_DESC) { + netif_err(priv, tx_queued, ndev, + "still transmitting with the full ring!\n"); + netif_stop_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + goto exit; + + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); + priv->tx_skb[q][entry / NUM_TX_DESC] = skb; + + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / NUM_TX_DESC * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + /* Zero length DMA descriptors are problematic as they seem to + * terminate DMA transfers. Avoid them by simply using a length of + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of + * data by the call to skb_put_padto() above this is safe with + * respect to both the length of the first DMA descriptor (len) + * overflowing the available data and the length of the second DMA + * descriptor (skb->len - len) being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap; + + desc++; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + /* TX timestamp required */ + if (q == RAVB_NC) { + ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); + if (!ts_skb) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, len, + DMA_TO_DEVICE); + goto unmap; + } + ts_skb->skb = skb_get(skb); + ts_skb->tag = priv->ts_skb_tag++; + priv->ts_skb_tag &= 0x3ff; + list_add_tail(&ts_skb->list, &priv->ts_skb_list); + + /* TAG and timestamp required flag */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); + } + + skb_tx_timestamp(skb); + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); + + priv->cur_tx[q] += NUM_TX_DESC; + if (priv->cur_tx[q] - priv->dirty_tx[q] > + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + !ravb_tx_free(ndev, q, true)) + netif_stop_subqueue(ndev, q); + +exit: + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + return NETDEV_TX_OK; + +unmap: + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); +drop: + dev_kfree_skb_any(skb); + priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + goto exit; +} + +static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + /* If skb needs TX timestamp, it is handled in network control queue */ + return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : + RAVB_BE; + +} + +static struct net_device_stats *ravb_get_stats(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct net_device_stats *nstats, *stats0, *stats1; + + nstats = &ndev->stats; + stats0 = &priv->stats[RAVB_BE]; + stats1 = &priv->stats[RAVB_NC]; + + nstats->tx_dropped += ravb_read(ndev, TROCR); + ravb_write(ndev, 0, TROCR); /* (write clear) */ + nstats->collisions += ravb_read(ndev, CDCR); + ravb_write(ndev, 0, CDCR); /* (write clear) */ + nstats->tx_carrier_errors += ravb_read(ndev, LCCR); + ravb_write(ndev, 0, LCCR); /* (write clear) */ + + nstats->tx_carrier_errors += ravb_read(ndev, CERCR); + ravb_write(ndev, 0, CERCR); /* (write clear) */ + nstats->tx_carrier_errors += ravb_read(ndev, CEECR); + ravb_write(ndev, 0, CEECR); /* (write clear) */ + + nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; + nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; + nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes; + nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes; + nstats->multicast = stats0->multicast + stats1->multicast; + nstats->rx_errors = stats0->rx_errors + stats1->rx_errors; + nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors; + nstats->rx_frame_errors = + stats0->rx_frame_errors + stats1->rx_frame_errors; + nstats->rx_length_errors = + stats0->rx_length_errors + stats1->rx_length_errors; + nstats->rx_missed_errors = + stats0->rx_missed_errors + stats1->rx_missed_errors; + nstats->rx_over_errors = + stats0->rx_over_errors + stats1->rx_over_errors; + + return nstats; +} + +/* Update promiscuous bit */ +static void ravb_set_rx_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ravb_modify(ndev, ECMR, ECMR_PRM, + ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Device close function for Ethernet AVB */ +static int ravb_close(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_tstamp_skb *ts_skb, *ts_skb2; + + netif_tx_stop_all_queues(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Stop PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_stop(ndev); + + /* Set the config mode to stop the AVB-DMAC's processes */ + if (ravb_stop_dma(ndev) < 0) + netdev_err(ndev, + "device will be stopped after h/w processes are done.\n"); + + /* Clear the timestamp list */ + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { + list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); + kfree(ts_skb); + } + + /* PHY disconnect */ + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + } + + if (priv->chip_id != RCAR_GEN2) { + free_irq(priv->tx_irqs[RAVB_NC], ndev); + free_irq(priv->rx_irqs[RAVB_NC], ndev); + free_irq(priv->tx_irqs[RAVB_BE], ndev); + free_irq(priv->rx_irqs[RAVB_BE], ndev); + free_irq(priv->emac_irq, ndev); + } + free_irq(ndev->irq, ndev); + + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + + /* Free all the skb's in the RX queue and the DMA buffers. */ + ravb_ring_free(ndev, RAVB_BE); + ravb_ring_free(ndev, RAVB_NC); + + return 0; +} + +static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : + HWTSTAMP_TX_OFF; + switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { + case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case RAVB_RXTSTAMP_TYPE_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Control hardware time stamping */ +static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct hwtstamp_config config; + u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; + u32 tstamp_tx_ctrl; + + if (copy_from_user(&config, req->ifr_data, sizeof(config))) + return -EFAULT; + + /* Reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_tx_ctrl = 0; + break; + case HWTSTAMP_TX_ON: + tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_rx_ctrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; + } + + priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* ioctl to device function */ +static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + switch (cmd) { + case SIOCGHWTSTAMP: + return ravb_hwtstamp_get(ndev, req); + case SIOCSHWTSTAMP: + return ravb_hwtstamp_set(ndev, req); + } + + return phy_mii_ioctl(phydev, req, cmd); +} + +static int ravb_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + ndev->mtu = new_mtu; + netdev_update_features(ndev); + + return 0; +} + +static void ravb_set_rx_csum(struct net_device *ndev, bool enable) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX */ + ravb_rcv_snd_disable(ndev); + + /* Modify RX Checksum setting */ + ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); + + /* Enable TX and RX */ + ravb_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int ravb_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXCSUM) + ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); + + ndev->features = features; + + return 0; +} + +static const struct net_device_ops ravb_netdev_ops = { + .ndo_open = ravb_open, + .ndo_stop = ravb_close, + .ndo_start_xmit = ravb_start_xmit, + .ndo_select_queue = ravb_select_queue, + .ndo_get_stats = ravb_get_stats, + .ndo_set_rx_mode = ravb_set_rx_mode, + .ndo_tx_timeout = ravb_tx_timeout, + .ndo_do_ioctl = ravb_do_ioctl, + .ndo_change_mtu = ravb_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = ravb_set_features, +}; + +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + +static const struct of_device_id ravb_match_table[] = { + { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, + { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, + { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, + { } +}; +MODULE_DEVICE_TABLE(of, ravb_match_table); + +static int ravb_set_gti(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + unsigned long rate; + uint64_t inc; + + rate = clk_get_rate(priv->clk); + if (!rate) + return -EINVAL; + + inc = 1000000000ULL << 20; + do_div(inc, rate); + + if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { + dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", + inc, GTI_TIV_MIN, GTI_TIV_MAX); + return -EINVAL; + } + + ravb_write(ndev, inc, GTI); + + return 0; +} + +static void ravb_set_config_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); + } +} + +/* Set tx and rx clock internal delay modes */ +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int set = 0; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + set |= APSR_DM_RDM; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + set |= APSR_DM_TDM; + + ravb_modify(ndev, APSR, APSR_DM, set); +} + +static int ravb_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct ravb_private *priv; + enum ravb_chip_id chip_id; + struct net_device *ndev; + int error, irq, q; + struct resource *res; + int i; + + if (!np) { + dev_err(&pdev->dev, + "this driver is required to be instantiated from device tree\n"); + return -EINVAL; + } + + /* Get base address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "invalid resource\n"); + return -EINVAL; + } + + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), + NUM_TX_QUEUE, NUM_RX_QUEUE); + if (!ndev) + return -ENOMEM; + + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + /* The Ether-specific entries in the device structure. */ + ndev->base_addr = res->start; + + chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); + + if (chip_id == RCAR_GEN3) + irq = platform_get_irq_byname(pdev, "ch22"); + else + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + error = irq; + goto out_release; + } + ndev->irq = irq; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv = netdev_priv(ndev); + priv->ndev = ndev; + priv->pdev = pdev; + priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; + priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; + priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; + priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + priv->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->addr)) { + error = PTR_ERR(priv->addr); + goto out_release; + } + + spin_lock_init(&priv->lock); + INIT_WORK(&priv->work, ravb_tx_timeout_work); + + priv->phy_interface = of_get_phy_mode(np); + + priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); + priv->avb_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + if (chip_id == RCAR_GEN3) { + irq = platform_get_irq_byname(pdev, "ch24"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->emac_irq = irq; + for (i = 0; i < NUM_RX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->rx_irqs[i] = irq; + } + for (i = 0; i < NUM_TX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->tx_irqs[i] = irq; + } + } + + priv->chip_id = chip_id; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + error = PTR_ERR(priv->clk); + goto out_release; + } + + ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + + /* Set function */ + ndev->netdev_ops = &ravb_netdev_ops; + ndev->ethtool_ops = &ravb_ethtool_ops; + + /* Set AVB config mode */ + ravb_set_config_mode(ndev); + + /* Set GTI value */ + error = ravb_set_gti(ndev); + if (error) + goto out_release; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + + /* Allocate descriptor base address table */ + priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; + priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, + &priv->desc_bat_dma, GFP_KERNEL); + if (!priv->desc_bat) { + dev_err(&pdev->dev, + "Cannot allocate desc base address table (size %d bytes)\n", + priv->desc_bat_size); + error = -ENOMEM; + goto out_release; + } + for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) + priv->desc_bat[q].die_dt = DT_EOS; + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + /* Initialise HW timestamp list */ + INIT_LIST_HEAD(&priv->ts_skb_list); + + /* Initialise PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_init(ndev, pdev); + + /* Debug message level */ + priv->msg_enable = RAVB_DEF_MSG_ENABLE; + + /* Read and set MAC address */ + ravb_read_mac_address(ndev, of_get_mac_address(np)); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one\n"); + eth_hw_addr_random(ndev); + } + + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + dev_err(&pdev->dev, "failed to initialize MDIO\n"); + goto out_dma_free; + } + + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); + + /* Network device register */ + error = register_netdev(ndev); + if (error) + goto out_napi_del; + + device_set_wakeup_capable(&pdev->dev, 1); + + /* Print device information */ + netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + platform_set_drvdata(pdev, ndev); + + return 0; + +out_napi_del: + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); +out_dma_free: + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + + /* Stop PTP Clock driver */ + if (chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); +out_release: + free_netdev(ndev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return error; +} + +static int ravb_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ravb_private *priv = netdev_priv(ndev); + + /* Stop PTP Clock driver */ + if (priv->chip_id != RCAR_GEN2) + ravb_ptp_stop(ndev); + + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + /* Set reset mode */ + ravb_write(ndev, CCC_OPC_RESET, CCC); + pm_runtime_put_sync(&pdev->dev); + unregister_netdev(ndev); + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); + pm_runtime_disable(&pdev->dev); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int ravb_wol_setup(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Only allow ECI interrupts */ + synchronize_irq(priv->emac_irq); + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); + + /* Enable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + return enable_irq_wake(priv->emac_irq); +} + +static int ravb_wol_restore(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi[RAVB_NC]); + napi_enable(&priv->napi[RAVB_BE]); + + /* Disable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, 0); + + ret = ravb_close(ndev); + if (ret < 0) + return ret; + + return disable_irq_wake(priv->emac_irq); +} + +static int __maybe_unused ravb_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (priv->wol_enabled) + ret = ravb_wol_setup(ndev); + else + ret = ravb_close(ndev); + + return ret; +} + +static int __maybe_unused ravb_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + int ret = 0; + + /* If WoL is enabled set reset mode to rearm the WoL logic */ + if (priv->wol_enabled) + ravb_write(ndev, CCC_OPC_RESET, CCC); + + /* All register have been reset to default values. + * Restore all registers which where setup at probe time and + * reopen device if it was running before system suspended. + */ + + /* Set AVB config mode */ + ravb_set_config_mode(ndev); + + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + + /* Restore descriptor base address table */ + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + if (netif_running(ndev)) { + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } + ret = ravb_open(ndev); + if (ret < 0) + return ret; + netif_device_attach(ndev); + } + + return ret; +} + +static int __maybe_unused ravb_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops ravb_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) + SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) +}; + +static struct platform_driver ravb_driver = { + .probe = ravb_probe, + .remove = ravb_remove, + .driver = { + .name = "ravb", + .pm = &ravb_dev_pm_ops, + .of_match_table = ravb_match_table, + }, +}; + +module_platform_driver(ravb_driver); + +MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai"); +MODULE_DESCRIPTION("Renesas Ethernet AVB driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c new file mode 100644 index 000000000..dce2a40a3 --- /dev/null +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* PTP 1588 clock using the Renesas Ethernet AVB + * + * Copyright (C) 2013-2015 Renesas Electronics Corporation + * Copyright (C) 2015 Renesas Solutions Corp. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> + */ + +#include "ravb.h" + +static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request) +{ + struct net_device *ndev = priv->ndev; + int error; + + error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); + if (error) + return error; + + ravb_modify(ndev, GCCR, request, request); + return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); +} + +/* Caller must hold the lock */ +static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts) +{ + struct net_device *ndev = priv->ndev; + int error; + + error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE); + if (error) + return error; + + ts->tv_nsec = ravb_read(ndev, GCT0); + ts->tv_sec = ravb_read(ndev, GCT1) | + ((s64)ravb_read(ndev, GCT2) << 32); + + return 0; +} + +/* Caller must hold the lock */ +static int ravb_ptp_time_write(struct ravb_private *priv, + const struct timespec64 *ts) +{ + struct net_device *ndev = priv->ndev; + int error; + u32 gccr; + + error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET); + if (error) + return error; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LTO) + return -EBUSY; + ravb_write(ndev, ts->tv_nsec, GTO0); + ravb_write(ndev, ts->tv_sec, GTO1); + ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2); + ravb_write(ndev, gccr | GCCR_LTO, GCCR); + + return 0; +} + +/* Caller must hold the lock */ +static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns) +{ + struct net_device *ndev = priv->ndev; + /* When the comparison value (GPTC.PTCV) is in range of + * [x-1 to x+1] (x is the configured increment value in + * GTI.TIV), it may happen that a comparison match is + * not detected when the timer wraps around. + */ + u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1; + u32 gccr; + + if (ns < gti_ns_plus_1) + ns = gti_ns_plus_1; + else if (ns > 0 - gti_ns_plus_1) + ns = 0 - gti_ns_plus_1; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LPTC) + return -EBUSY; + ravb_write(ndev, ns, GPTC); + ravb_write(ndev, gccr | GCCR_LPTC, GCCR); + + return 0; +} + +/* PTP clock operations */ +static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct net_device *ndev = priv->ndev; + unsigned long flags; + u32 diff, addend; + bool neg_adj = false; + u32 gccr; + + if (ppb < 0) { + neg_adj = true; + ppb = -ppb; + } + addend = priv->ptp.default_addend; + diff = div_u64((u64)addend * ppb, NSEC_PER_SEC); + + addend = neg_adj ? addend - diff : addend + diff; + + spin_lock_irqsave(&priv->lock, flags); + + priv->ptp.current_addend = addend; + + gccr = ravb_read(ndev, GCCR); + if (gccr & GCCR_LTI) { + spin_unlock_irqrestore(&priv->lock, flags); + return -EBUSY; + } + ravb_write(ndev, addend & GTI_TIV, GTI); + ravb_write(ndev, gccr | GCCR_LTI, GCCR); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct timespec64 ts; + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_read(priv, &ts); + if (!error) { + u64 now = ktime_to_ns(timespec64_to_ktime(ts)); + + ts = ns_to_timespec64(now + delta); + error = ravb_ptp_time_write(priv, &ts); + } + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_read(priv, ts); + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + unsigned long flags; + int error; + + spin_lock_irqsave(&priv->lock, flags); + error = ravb_ptp_time_write(priv, ts); + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_extts_request *req, int on) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct net_device *ndev = priv->ndev; + unsigned long flags; + + if (req->index) + return -EINVAL; + + if (priv->ptp.extts[req->index] == on) + return 0; + priv->ptp.extts[req->index] = on; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + else if (on) + ravb_write(ndev, GIE_PTCS, GIE); + else + ravb_write(ndev, GID_PTCD, GID); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int ravb_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_perout_request *req, int on) +{ + struct ravb_private *priv = container_of(ptp, struct ravb_private, + ptp.info); + struct net_device *ndev = priv->ndev; + struct ravb_ptp_perout *perout; + unsigned long flags; + int error = 0; + + if (req->index) + return -EINVAL; + + if (on) { + u64 start_ns; + u64 period_ns; + + start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec; + period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec; + + if (start_ns > U32_MAX) { + netdev_warn(ndev, + "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n"); + return -ERANGE; + } + + if (period_ns > U32_MAX) { + netdev_warn(ndev, + "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n"); + return -ERANGE; + } + + spin_lock_irqsave(&priv->lock, flags); + + perout = &priv->ptp.perout[req->index]; + perout->target = (u32)start_ns; + perout->period = (u32)period_ns; + error = ravb_ptp_update_compare(priv, (u32)start_ns); + if (!error) { + /* Unmask interrupt */ + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + else + ravb_write(ndev, GIE_PTMS0, GIE); + } + } else { + spin_lock_irqsave(&priv->lock, flags); + + perout = &priv->ptp.perout[req->index]; + perout->period = 0; + + /* Mask interrupt */ + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, 0); + else + ravb_write(ndev, GID_PTMD0, GID); + } + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + + return error; +} + +static int ravb_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *req, int on) +{ + switch (req->type) { + case PTP_CLK_REQ_EXTTS: + return ravb_ptp_extts(ptp, &req->extts, on); + case PTP_CLK_REQ_PEROUT: + return ravb_ptp_perout(ptp, &req->perout, on); + default: + return -EOPNOTSUPP; + } +} + +static const struct ptp_clock_info ravb_ptp_info = { + .owner = THIS_MODULE, + .name = "ravb clock", + .max_adj = 50000000, + .n_ext_ts = N_EXT_TS, + .n_per_out = N_PER_OUT, + .adjfreq = ravb_ptp_adjfreq, + .adjtime = ravb_ptp_adjtime, + .gettime64 = ravb_ptp_gettime64, + .settime64 = ravb_ptp_settime64, + .enable = ravb_ptp_enable, +}; + +/* Caller must hold the lock */ +void ravb_ptp_interrupt(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 gis = ravb_read(ndev, GIS); + + gis &= ravb_read(ndev, GIC); + if (gis & GIS_PTCF) { + struct ptp_clock_event event; + + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ravb_read(ndev, GCPT); + ptp_clock_event(priv->ptp.clock, &event); + } + if (gis & GIS_PTMF) { + struct ravb_ptp_perout *perout = priv->ptp.perout; + + if (perout->period) { + perout->target += perout->period; + ravb_ptp_update_compare(priv, perout->target); + } + } + + ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); +} + +void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + priv->ptp.info = ravb_ptp_info; + + priv->ptp.default_addend = ravb_read(ndev, GTI); + priv->ptp.current_addend = priv->ptp.default_addend; + + spin_lock_irqsave(&priv->lock, flags); + ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); + ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + + priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev); +} + +void ravb_ptp_stop(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ravb_write(ndev, 0, GIC); + ravb_write(ndev, 0, GIS); + + ptp_clock_unregister(priv->ptp.clock); +} diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c new file mode 100644 index 000000000..c44aea47c --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -0,0 +1,3518 @@ +// SPDX-License-Identifier: GPL-2.0 +/* SuperH Ethernet device driver + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2014 Renesas Solutions Corp. + * Copyright (C) 2013-2017 Cogent Embedded, Inc. + * Copyright (C) 2014 Codethink Limited + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/mdio-bitbang.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/cache.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/sh_eth.h> +#include <linux/of_mdio.h> + +#include "sh_eth.h" + +#define SH_ETH_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR| \ + NETIF_MSG_TX_ERR) + +#define SH_ETH_OFFSET_INVALID ((u16)~0) + +#define SH_ETH_OFFSET_DEFAULTS \ + [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID + +static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [PSR] = 0x0528, + [PIPR] = 0x052c, + [RFLR] = 0x0508, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [GECMR] = 0x05b0, + [BCULR] = 0x05b4, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [TROCR] = 0x0700, + [CDCR] = 0x0708, + [LCCR] = 0x0710, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [CERCR] = 0x0768, + [CEECR] = 0x0770, + [MAFCR] = 0x0778, + [RMII_MII] = 0x0790, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_VTAG0] = 0x0058, + [TSU_VTAG1] = 0x005c, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a4, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, +}; + +static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [RFLR] = 0x0508, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [MAFCR] = 0x0778, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWSLC] = 0x0038, + [TSU_VTAG0] = 0x0058, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008C, +}; + +static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0300, + [RFLR] = 0x0308, + [ECSR] = 0x0310, + [ECSIPR] = 0x0318, + [PIR] = 0x0320, + [PSR] = 0x0328, + [RDMLR] = 0x0340, + [IPGR] = 0x0350, + [APR] = 0x0354, + [MPR] = 0x0358, + [RFCF] = 0x0360, + [TPAUSER] = 0x0364, + [TPAUSECR] = 0x0368, + [MAHR] = 0x03c0, + [MALR] = 0x03c8, + [TROCR] = 0x03d0, + [CDCR] = 0x03d4, + [LCCR] = 0x03d8, + [CNDCR] = 0x03dc, + [CEFCR] = 0x03e4, + [FRECR] = 0x03e8, + [TSFRCR] = 0x03ec, + [TLFRCR] = 0x03f0, + [RFCR] = 0x03f4, + [MAFCR] = 0x03f8, + + [EDMR] = 0x0200, + [EDTRR] = 0x0208, + [EDRRR] = 0x0210, + [TDLAR] = 0x0218, + [RDLAR] = 0x0220, + [EESR] = 0x0228, + [EESIPR] = 0x0230, + [TRSCER] = 0x0238, + [RMFCR] = 0x0240, + [TFTR] = 0x0248, + [FDR] = 0x0250, + [RMCR] = 0x0258, + [TFUCR] = 0x0264, + [RFOCR] = 0x0268, + [RMIIMODE] = 0x026c, + [FCFTR] = 0x0270, + [TRIMD] = 0x027c, +}; + +static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0100, + [RFLR] = 0x0108, + [ECSR] = 0x0110, + [ECSIPR] = 0x0118, + [PIR] = 0x0120, + [PSR] = 0x0128, + [RDMLR] = 0x0140, + [IPGR] = 0x0150, + [APR] = 0x0154, + [MPR] = 0x0158, + [TPAUSER] = 0x0164, + [RFCF] = 0x0160, + [TPAUSECR] = 0x0168, + [BCFRR] = 0x016c, + [MAHR] = 0x01c0, + [MALR] = 0x01c8, + [TROCR] = 0x01d0, + [CDCR] = 0x01d4, + [LCCR] = 0x01d8, + [CNDCR] = 0x01dc, + [CEFCR] = 0x01e4, + [FRECR] = 0x01e8, + [TSFRCR] = 0x01ec, + [TLFRCR] = 0x01f0, + [RFCR] = 0x01f4, + [MAFCR] = 0x01f8, + [RTRATE] = 0x01fc, + + [EDMR] = 0x0000, + [EDTRR] = 0x0008, + [EDRRR] = 0x0010, + [TDLAR] = 0x0018, + [RDLAR] = 0x0020, + [EESR] = 0x0028, + [EESIPR] = 0x0030, + [TRSCER] = 0x0038, + [RMFCR] = 0x0040, + [TFTR] = 0x0048, + [FDR] = 0x0050, + [RMCR] = 0x0058, + [TFUCR] = 0x0064, + [RFOCR] = 0x0068, + [FCFTR] = 0x0070, + [RPADIR] = 0x0078, + [TRIMD] = 0x007c, + [RBWAR] = 0x00c8, + [RDFAR] = 0x00cc, + [TBRAR] = 0x00d4, + [TDFAR] = 0x00d8, +}; + +static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDMR] = 0x0000, + [EDTRR] = 0x0004, + [EDRRR] = 0x0008, + [TDLAR] = 0x000c, + [RDLAR] = 0x0010, + [EESR] = 0x0014, + [EESIPR] = 0x0018, + [TRSCER] = 0x001c, + [RMFCR] = 0x0020, + [TFTR] = 0x0024, + [FDR] = 0x0028, + [RMCR] = 0x002c, + [EDOCR] = 0x0030, + [FCFTR] = 0x0034, + [RPADIR] = 0x0038, + [TRIMD] = 0x003c, + [RBWAR] = 0x0040, + [RDFAR] = 0x0044, + [TBRAR] = 0x004c, + [TDFAR] = 0x0050, + + [ECMR] = 0x0160, + [ECSR] = 0x0164, + [ECSIPR] = 0x0168, + [PIR] = 0x016c, + [MAHR] = 0x0170, + [MALR] = 0x0174, + [RFLR] = 0x0178, + [PSR] = 0x017c, + [TROCR] = 0x0180, + [CDCR] = 0x0184, + [LCCR] = 0x0188, + [CNDCR] = 0x018c, + [CEFCR] = 0x0194, + [FRECR] = 0x0198, + [TSFRCR] = 0x019c, + [TLFRCR] = 0x01a0, + [RFCR] = 0x01a4, + [MAFCR] = 0x01a8, + [IPGR] = 0x01b4, + [APR] = 0x01b8, + [MPR] = 0x01bc, + [TPAUSER] = 0x01c4, + [BCFR] = 0x01cc, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a4, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, + + [TSU_ADRH0] = 0x0100, +}; + +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); + +static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); +} + +static u32 sh_eth_read(struct net_device *ndev, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->addr + offset); +} + +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, + u32 set) +{ + sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set, + enum_index); +} + +static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) +{ + return mdp->reg_offset[enum_index]; +} + +static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) +{ + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->tsu_addr + offset); +} + +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->tsu_addr + offset); +} + +static void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN + u32 *p = (u32 *)src; + u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif +} + +static void sh_eth_select_mii(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 value; + + switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID: + value = 0x3; + break; + case PHY_INTERFACE_MODE_GMII: + value = 0x2; + break; + case PHY_INTERFACE_MODE_MII: + value = 0x1; + break; + case PHY_INTERFACE_MODE_RMII: + value = 0x0; + break; + default: + netdev_warn(ndev, + "PHY interface mode was not setup. Set to MII.\n"); + value = 0x1; + break; + } + + sh_eth_write(ndev, value, RMII_MII); +} + +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); +} + +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); + mdelay(1); +} + +static int sh_eth_soft_reset(struct net_device *ndev) +{ + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); + mdelay(3); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); + + return 0; +} + +static int sh_eth_check_soft_reset(struct net_device *ndev) +{ + int cnt; + + for (cnt = 100; cnt > 0; cnt--) { + if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) + return 0; + mdelay(1); + } + + netdev_err(ndev, "Device reset failed\n"); + return -ETIMEDOUT; +} + +static int sh_eth_soft_reset_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); + + ret = sh_eth_check_soft_reset(ndev); + if (ret) + return ret; + + /* Table Init */ + sh_eth_write(ndev, 0, TDLAR); + sh_eth_write(ndev, 0, TDFAR); + sh_eth_write(ndev, 0, TDFXR); + sh_eth_write(ndev, 0, TDFFR); + sh_eth_write(ndev, 0, RDLAR); + sh_eth_write(ndev, 0, RDFAR); + sh_eth_write(ndev, 0, RDFXR); + sh_eth_write(ndev, 0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_checksum) + sh_eth_write(ndev, 0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + + return ret; +} + +static void sh_eth_set_rate_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + } +} + +#ifdef CONFIG_OF +/* R7S72100 */ +static struct sh_eth_cpu_data r7s72100_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + + .register_type = SH_ETH_REG_FAST_RZ, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000070f, + + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + + .no_psr = 1, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .hw_checksum = 1, + .tsu = 1, + .no_tx_cntrs = 1, +}; + +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) +{ + sh_eth_chip_reset(ndev); + + sh_eth_select_mii(ndev); +} + +/* R8A7740 */ +static struct sh_eth_cpu_data r8a7740_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset_r8a7740, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .hw_checksum = 1, + .tsu = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* There is CPU dependent code */ +static void sh_eth_set_rate_rcar(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_modify(ndev, ECMR, ECMR_ELB, 0); + break; + case 100:/* 100BASE */ + sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); + break; + } +} + +/* R-Car Gen1 */ +static struct sh_eth_cpu_data rcar_gen1_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + .fdr_value = 0x00000f0f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_xdfar = 1, +}; + +/* R-Car Gen2 and RZ/G1 */ +static struct sh_eth_cpu_data rcar_gen2_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + .fdr_value = 0x00000f0f, + + .trscer_err_mask = DESC_I_RINT8, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_xdfar = 1, + .rmiimode = 1, + .magic = 1, +}; + +/* R8A77980 */ +static struct sh_eth_cpu_data r8a77980_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | + EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .nbst = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .hw_checksum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* R7S9210 */ +static struct sh_eth_cpu_data r7s9210_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | + EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | + EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | + EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | + EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .fdr_value = 0x0000070f, + + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .no_ade = 1, + .xdfar_rw = 1, +}; +#endif /* CONFIG_OF */ + +static void sh_eth_set_rate_sh7724(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_modify(ndev, ECMR, ECMR_RTM, 0); + break; + case 100:/* 100BASE */ + sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); + break; + } +} + +/* SH7724 */ +static struct sh_eth_cpu_data sh7724_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7724, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, +}; + +static void sh_eth_set_rate_sh7757(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0, RTRATE); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 1, RTRATE); + break; + } +} + +/* SH7757 */ +static struct sh_eth_cpu_data sh7757_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_ade = 1, + .rpadir = 1, + .rtrate = 1, + .dual_port = 1, +}; + +#define SH_GIGA_ETH_BASE 0xfee00000UL +#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) +#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) +static void sh_eth_chip_reset_giga(struct net_device *ndev) +{ + u32 mahr[2], malr[2]; + int i; + + /* save MAHR and MALR */ + for (i = 0; i < 2; i++) { + malr[i] = ioread32((void *)GIGA_MALR(i)); + mahr[i] = ioread32((void *)GIGA_MAHR(i)); + } + + sh_eth_chip_reset(ndev); + + /* restore MAHR and MALR */ + for (i = 0; i < 2; i++) { + iowrite32(malr[i], (void *)GIGA_MALR(i)); + iowrite32(mahr[i], (void *)GIGA_MAHR(i)); + } +} + +static void sh_eth_set_rate_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0x00000000, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 0x00000010, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, 0x00000020, GECMR); + break; + } +} + +/* SH7757(GETHERC) */ +static struct sh_eth_cpu_data sh7757_data_giga = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset_giga, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_giga, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + .fdr_value = 0x0000072f, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .cexcr = 1, + .dual_port = 1, +}; + +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .hw_checksum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; + +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, + .magic = 1, + .cexcr = 1, + .dual_port = 1, +}; + +static struct sh_eth_cpu_data sh7619_data = { + .soft_reset = sh_eth_soft_reset, + + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; + +static struct sh_eth_cpu_data sh771x_data = { + .soft_reset = sh_eth_soft_reset, + + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .trscer_err_mask = DESC_I_RINT8, + + .tsu = 1, + .dual_port = 1, +}; + +static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) +{ + if (!cd->ecsr_value) + cd->ecsr_value = DEFAULT_ECSR_INIT; + + if (!cd->ecsipr_value) + cd->ecsipr_value = DEFAULT_ECSIPR_INIT; + + if (!cd->fcftr_value) + cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | + DEFAULT_FIFO_F_D_RFD; + + if (!cd->fdr_value) + cd->fdr_value = DEFAULT_FDR_INIT; + + if (!cd->tx_check) + cd->tx_check = DEFAULT_TX_CHECK; + + if (!cd->eesr_err_check) + cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->trscer_err_mask) + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; +} + +static void sh_eth_set_receive_align(struct sk_buff *skb) +{ + uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); + + if (reserve) + skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); +} + +/* Program the hardware MAC address from dev->dev_addr. */ +static void update_mac_address(struct net_device *ndev) +{ + sh_eth_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + sh_eth_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); +} + +/* Get MAC address from SuperH MAC address register + * + * SuperH's Ethernet device doesn't have 'ROM' to MAC address. + * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). + * When you want use this device, you must set MAC address in bootloader. + * + */ +static void read_mac_address(struct net_device *ndev, unsigned char *mac) +{ + if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { + memcpy(ndev->dev_addr, mac, ETH_ALEN); + } else { + u32 mahr = sh_eth_read(ndev, MAHR); + u32 malr = sh_eth_read(ndev, MALR); + + ndev->dev_addr[0] = (mahr >> 24) & 0xFF; + ndev->dev_addr[1] = (mahr >> 16) & 0xFF; + ndev->dev_addr[2] = (mahr >> 8) & 0xFF; + ndev->dev_addr[3] = (mahr >> 0) & 0xFF; + ndev->dev_addr[4] = (malr >> 8) & 0xFF; + ndev->dev_addr[5] = (malr >> 0) & 0xFF; + } +} + +struct bb_info { + void (*set_gate)(void *addr); + struct mdiobb_ctrl ctrl; + void *addr; +}; + +static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + u32 pir; + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + pir = ioread32(bitbang->addr); + if (set) + pir |= mask; + else + pir &= ~mask; + iowrite32(pir, bitbang->addr); +} + +/* Data I/O pin control */ +static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MMD, bit); +} + +/* Set bit data*/ +static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MDO, bit); +} + +/* Get bit data*/ +static int sh_get_mdio(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + return (ioread32(bitbang->addr) & PIR_MDI) != 0; +} + +/* MDC pin control */ +static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + sh_mdio_ctrl(ctrl, PIR_MDC, bit); +} + +/* mdio bus control struct */ +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = sh_mdc_ctrl, + .set_mdio_dir = sh_mmd_ctrl, + .set_mdio_data = sh_set_mdio, + .get_mdio_data = sh_get_mdio, +}; + +/* free Tx skb function */ +static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + int free_num = 0; + int entry; + bool sent; + + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { + entry = mdp->dirty_tx % mdp->num_tx_ring; + txdesc = &mdp->tx_ring[entry]; + sent = !(txdesc->status & cpu_to_le32(TD_TACT)); + if (sent_only && !sent) + break; + /* TACT bit must be checked before all the following reads */ + dma_rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, le32_to_cpu(txdesc->status)); + /* Free the original skb. */ + if (mdp->tx_skbuff[entry]) { + dma_unmap_single(&mdp->pdev->dev, + le32_to_cpu(txdesc->addr), + le32_to_cpu(txdesc->len) >> 16, + DMA_TO_DEVICE); + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); + mdp->tx_skbuff[entry] = NULL; + free_num++; + } + txdesc->status = cpu_to_le32(TD_TFP); + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_le32(TD_TDLE); + + if (sent) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; + } + } + return free_num; +} + +/* free skb and descriptor buffer */ +static void sh_eth_ring_free(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ringsize, i; + + if (mdp->rx_ring) { + for (i = 0; i < mdp->num_rx_ring; i++) { + if (mdp->rx_skbuff[i]) { + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; + + dma_unmap_single(&mdp->pdev->dev, + le32_to_cpu(rxdesc->addr), + ALIGN(mdp->rx_buf_sz, 32), + DMA_FROM_DEVICE); + } + } + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, + mdp->rx_desc_dma); + mdp->rx_ring = NULL; + } + + /* Free Rx skb ringbuffer */ + if (mdp->rx_skbuff) { + for (i = 0; i < mdp->num_rx_ring; i++) + dev_kfree_skb(mdp->rx_skbuff[i]); + } + kfree(mdp->rx_skbuff); + mdp->rx_skbuff = NULL; + + if (mdp->tx_ring) { + sh_eth_tx_free(ndev, false); + + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, + mdp->tx_desc_dma); + mdp->tx_ring = NULL; + } + + /* Free Tx skb ringbuffer */ + kfree(mdp->tx_skbuff); + mdp->tx_skbuff = NULL; +} + +/* format skb and descriptor buffer */ +static void sh_eth_ring_format(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + struct sk_buff *skb; + struct sh_eth_rxdesc *rxdesc = NULL; + struct sh_eth_txdesc *txdesc = NULL; + int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; + int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; + dma_addr_t dma_addr; + u32 buf_len; + + mdp->cur_rx = 0; + mdp->cur_tx = 0; + mdp->dirty_rx = 0; + mdp->dirty_tx = 0; + + memset(mdp->rx_ring, 0, rx_ringsize); + + /* build Rx ring buffer */ + for (i = 0; i < mdp->num_rx_ring; i++) { + /* skb */ + mdp->rx_skbuff[i] = NULL; + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; + sh_eth_set_receive_align(skb); + + /* The size of the buffer is a multiple of 32 bytes. */ + buf_len = ALIGN(mdp->rx_buf_sz, 32); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[i] = skb; + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->len = cpu_to_le32(buf_len << 16); + rxdesc->addr = cpu_to_le32(dma_addr); + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); + + /* Rx descriptor address set */ + if (i == 0) { + sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); + if (mdp->cd->xdfar_rw) + sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); + } + } + + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); + + /* Mark the last entry as wrapping the ring. */ + if (rxdesc) + rxdesc->status |= cpu_to_le32(RD_RDLE); + + memset(mdp->tx_ring, 0, tx_ringsize); + + /* build Tx ring buffer */ + for (i = 0; i < mdp->num_tx_ring; i++) { + mdp->tx_skbuff[i] = NULL; + txdesc = &mdp->tx_ring[i]; + txdesc->status = cpu_to_le32(TD_TFP); + txdesc->len = cpu_to_le32(0); + if (i == 0) { + /* Tx descriptor address set */ + sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); + if (mdp->cd->xdfar_rw) + sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); + } + } + + txdesc->status |= cpu_to_le32(TD_TDLE); +} + +/* Get skb and descriptor buffer */ +static int sh_eth_ring_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int rx_ringsize, tx_ringsize; + + /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the + * card needs room to do 8 byte alignment, +2 so we can reserve + * the first 2 bytes, and +16 gets room for the status word from the + * card. + */ + mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : + (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); + if (mdp->cd->rpadir) + mdp->rx_buf_sz += NET_IP_ALIGN; + + /* Allocate RX and TX skb rings */ + mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), + GFP_KERNEL); + if (!mdp->rx_skbuff) + return -ENOMEM; + + mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), + GFP_KERNEL); + if (!mdp->tx_skbuff) + goto ring_free; + + /* Allocate all Rx descriptors. */ + rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, + &mdp->rx_desc_dma, GFP_KERNEL); + if (!mdp->rx_ring) + goto ring_free; + + mdp->dirty_rx = 0; + + /* Allocate all Tx descriptors. */ + tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, + &mdp->tx_desc_dma, GFP_KERNEL); + if (!mdp->tx_ring) + goto ring_free; + return 0; + +ring_free: + /* Free Rx and Tx skb ring buffer and DMA buffer */ + sh_eth_ring_free(ndev); + + return -ENOMEM; +} + +static int sh_eth_dev_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + /* Soft Reset */ + ret = mdp->cd->soft_reset(ndev); + if (ret) + return ret; + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* Descriptor format */ + sh_eth_ring_format(ndev); + if (mdp->cd->rpadir) + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); + + /* all sh_eth int mask */ + sh_eth_write(ndev, 0, EESIPR); + +#if defined(__LITTLE_ENDIAN) + if (mdp->cd->hw_swap) + sh_eth_write(ndev, EDMR_EL, EDMR); + else +#endif + sh_eth_write(ndev, 0, EDMR); + + /* FIFO size set */ + sh_eth_write(ndev, mdp->cd->fdr_value, FDR); + sh_eth_write(ndev, 0, TFTR); + + /* Frame recv control (enable multiple-packets per rx irq) */ + sh_eth_write(ndev, RMCR_RNC, RMCR); + + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); + + /* DMA transfer burst mode */ + if (mdp->cd->nbst) + sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST); + + /* Burst cycle count upper-limit */ + if (mdp->cd->bculr) + sh_eth_write(ndev, 0x800, BCULR); + + sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); + + if (!mdp->cd->no_trimd) + sh_eth_write(ndev, 0, TRIMD); + + /* Recv frame limit set register */ + sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, + RFLR); + + sh_eth_modify(ndev, EESR, 0, 0); + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + + /* PAUSE Prohibition */ + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE, ECMR); + + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + + /* E-MAC Status Register clear */ + sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); + + /* E-MAC Interrupt Enable register */ + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + + /* Set MAC address */ + update_mac_address(ndev); + + /* mask reset */ + if (mdp->cd->apr) + sh_eth_write(ndev, 1, APR); + if (mdp->cd->mpr) + sh_eth_write(ndev, 1, MPR); + if (mdp->cd->tpauser) + sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); + + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); + + return ret; +} + +static void sh_eth_dev_exit(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Deactivate all TX descriptors, so DMA should stop at next + * packet boundary if it's currently running + */ + for (i = 0; i < mdp->num_tx_ring; i++) + mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); + + /* Disable TX FIFO egress to MAC */ + sh_eth_rcv_snd_disable(ndev); + + /* Stop RX DMA at next packet boundary */ + sh_eth_write(ndev, 0, EDRRR); + + /* Aside from TX DMA, we can't tell when the hardware is + * really stopped, so we need to reset to make sure. + * Before doing that, wait for long enough to *probably* + * finish transmitting the last packet and poll stats. + */ + msleep(2); /* max frame time at 10 Mbps < 1250 us */ + sh_eth_get_stats(ndev); + mdp->cd->soft_reset(ndev); + + /* Set the RMII mode again if required */ + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* Set MAC address again */ + update_mac_address(ndev); +} + +/* Packet receive function */ +static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + + int entry = mdp->cur_rx % mdp->num_rx_ring; + int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; + int limit; + struct sk_buff *skb; + u32 desc_status; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; + dma_addr_t dma_addr; + u16 pkt_len; + u32 buf_len; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + rxdesc = &mdp->rx_ring[entry]; + while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + dma_rmb(); + desc_status = le32_to_cpu(rxdesc->status); + pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; + + if (--boguscnt < 0) + break; + + netif_info(mdp, rx_status, ndev, + "rx entry %d status 0x%08x len %d\n", + entry, desc_status, pkt_len); + + if (!(desc_status & RDFEND)) + ndev->stats.rx_length_errors++; + + /* In case of almost all GETHER/ETHERs, the Receive Frame State + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the + * driver needs right shifting by 16. + */ + if (mdp->cd->hw_checksum) + desc_status >>= 16; + + skb = mdp->rx_skbuff[entry]; + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | + RD_RFS5 | RD_RFS6 | RD_RFS10)) { + ndev->stats.rx_errors++; + if (desc_status & RD_RFS1) + ndev->stats.rx_crc_errors++; + if (desc_status & RD_RFS2) + ndev->stats.rx_frame_errors++; + if (desc_status & RD_RFS3) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS4) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS6) + ndev->stats.rx_missed_errors++; + if (desc_status & RD_RFS10) + ndev->stats.rx_over_errors++; + } else if (skb) { + dma_addr = le32_to_cpu(rxdesc->addr); + if (!mdp->cd->hw_swap) + sh_eth_soft_swap( + phys_to_virt(ALIGN(dma_addr, 4)), + pkt_len + 2); + mdp->rx_skbuff[entry] = NULL; + if (mdp->cd->rpadir) + skb_reserve(skb, NET_IP_ALIGN); + dma_unmap_single(&mdp->pdev->dev, dma_addr, + ALIGN(mdp->rx_buf_sz, 32), + DMA_FROM_DEVICE); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += pkt_len; + if (desc_status & RD_RFS8) + ndev->stats.multicast++; + } + entry = (++mdp->cur_rx) % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { + entry = mdp->dirty_rx % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + /* The size of the buffer is 32 byte boundary. */ + buf_len = ALIGN(mdp->rx_buf_sz, 32); + rxdesc->len = cpu_to_le32(buf_len << 16); + + if (mdp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; /* Better luck next round. */ + sh_eth_set_receive_align(skb); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, + buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[entry] = skb; + + skb_checksum_none_assert(skb); + rxdesc->addr = cpu_to_le32(dma_addr); + } + dma_wmb(); /* RACT bit must be set after all the above writes */ + if (entry >= mdp->num_rx_ring - 1) + rxdesc->status |= + cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); + else + rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); + } + + /* Restart Rx engine if stopped. */ + /* If we don't need to check status, don't. -KDU */ + if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { + /* fix the values for the next receiving if RDE is set */ + if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { + u32 count = (sh_eth_read(ndev, RDFAR) - + sh_eth_read(ndev, RDLAR)) >> 4; + + mdp->cur_rx = count; + mdp->dirty_rx = count; + } + sh_eth_write(ndev, EDRRR_R, EDRRR); + } + + *quota -= limit - boguscnt - 1; + + return *quota <= 0; +} + +static void sh_eth_rcv_snd_disable(struct net_device *ndev) +{ + /* disable tx and rx */ + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); +} + +static void sh_eth_rcv_snd_enable(struct net_device *ndev) +{ + /* enable tx and rx */ + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); +} + +/* E-MAC interrupt handler */ +static void sh_eth_emac_interrupt(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 felic_stat; + u32 link_stat; + + felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) + return; + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + if (!(link_stat & PHY_ST_LINK)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); + /* clear int */ + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + } + } +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 mask; + + if (intr_status & EESR_TWB) { + /* Unused write back interrupt */ + if (intr_status & EESR_TABT) { /* Transmit Abort int */ + ndev->stats.tx_aborted_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); + } + } + + if (intr_status & EESR_RABT) { + /* Receive Abort int */ + if (intr_status & EESR_RFRMER) { + /* Receive Frame Overflow int */ + ndev->stats.rx_frame_errors++; + } + } + + if (intr_status & EESR_TDE) { + /* Transmit Descriptor Empty int */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); + } + + if (intr_status & EESR_TFE) { + /* FIFO under flow */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); + } + + if (intr_status & EESR_RDE) { + /* Receive Descriptor Empty int */ + ndev->stats.rx_over_errors++; + } + + if (intr_status & EESR_RFE) { + /* Receive FIFO Overflow int */ + ndev->stats.rx_fifo_errors++; + } + + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { + /* Address Error */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Address Error\n"); + } + + mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; + if (mdp->cd->no_ade) + mask &= ~EESR_ADE; + if (intr_status & mask) { + /* Tx error */ + u32 edtrr = sh_eth_read(ndev, EDTRR); + + /* dmesg */ + netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); + /* dirty buffer free */ + sh_eth_tx_free(ndev, true); + + /* SH7712 BUG */ + if (edtrr ^ mdp->cd->edtrr_trns) { + /* tx dma start */ + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); + } + /* wakeup */ + netif_wake_queue(ndev); + } +} + +static irqreturn_t sh_eth_interrupt(int irq, void *netdev) +{ + struct net_device *ndev = netdev; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + irqreturn_t ret = IRQ_NONE; + u32 intr_status, intr_enable; + + spin_lock(&mdp->lock); + + /* Get interrupt status */ + intr_status = sh_eth_read(ndev, EESR); + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_emac_interrupt() in order + * to quench it as it doesn't get cleared by just writing 1 to the ECI + * bit... + */ + intr_enable = sh_eth_read(ndev, EESIPR); + intr_status &= intr_enable | EESIPR_ECIIP; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | + cd->eesr_err_check)) + ret = IRQ_HANDLED; + else + goto out; + + if (unlikely(!mdp->irq_enabled)) { + sh_eth_write(ndev, 0, EESIPR); + goto out; + } + + if (intr_status & EESR_RX_CHECK) { + if (napi_schedule_prep(&mdp->napi)) { + /* Mask Rx interrupts */ + sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, + EESIPR); + __napi_schedule(&mdp->napi); + } else { + netdev_warn(ndev, + "ignoring interrupt, status 0x%08x, mask 0x%08x.\n", + intr_status, intr_enable); + } + } + + /* Tx Check */ + if (intr_status & cd->tx_check) { + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + + sh_eth_tx_free(ndev, true); + netif_wake_queue(ndev); + } + + /* E-MAC interrupt */ + if (intr_status & EESR_ECI) + sh_eth_emac_interrupt(ndev); + + if (intr_status & cd->eesr_err_check) { + /* Clear error interrupts */ + sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); + + sh_eth_error(ndev, intr_status); + } + +out: + spin_unlock(&mdp->lock); + + return ret; +} + +static int sh_eth_poll(struct napi_struct *napi, int budget) +{ + struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, + napi); + struct net_device *ndev = napi->dev; + int quota = budget; + u32 intr_status; + + for (;;) { + intr_status = sh_eth_read(ndev, EESR); + if (!(intr_status & EESR_RX_CHECK)) + break; + /* Clear Rx interrupts */ + sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); + + if (sh_eth_rx(ndev, intr_status, "a)) + goto out; + } + + napi_complete(napi); + + /* Reenable Rx interrupts */ + if (mdp->irq_enabled) + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +out: + return budget - quota; +} + +/* PHY state control function */ +static void sh_eth_adjust_link(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + + if (phydev->link) { + if (phydev->duplex != mdp->duplex) { + new_state = 1; + mdp->duplex = phydev->duplex; + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + } + + if (phydev->speed != mdp->speed) { + new_state = 1; + mdp->speed = phydev->speed; + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + } + if (!mdp->link) { + sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); + new_state = 1; + mdp->link = phydev->link; + } + } else if (mdp->link) { + new_state = 1; + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + } + + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) + sh_eth_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&mdp->lock, flags); + + if (new_state && netif_msg_link(mdp)) + phy_print_status(phydev); +} + +/* PHY init function */ +static int sh_eth_phy_init(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev; + + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + + /* Try connect to PHY */ + if (np) { + struct device_node *pn; + + pn = of_parse_phandle(np, "phy-handle", 0); + phydev = of_phy_connect(ndev, pn, + sh_eth_adjust_link, 0, + mdp->phy_interface); + + of_node_put(pn); + if (!phydev) + phydev = ERR_PTR(-ENOENT); + } else { + char phy_id[MII_BUS_ID_SIZE + 3]; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + mdp->mii_bus->id, mdp->phy_id); + + phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, + mdp->phy_interface); + } + + if (IS_ERR(phydev)) { + netdev_err(ndev, "failed to connect PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { + int err = phy_set_max_speed(phydev, SPEED_100); + if (err) { + netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); + phy_disconnect(phydev); + return err; + } + } + + phy_attached_info(phydev); + + return 0; +} + +/* PHY control start function */ +static int sh_eth_phy_start(struct net_device *ndev) +{ + int ret; + + ret = sh_eth_phy_init(ndev); + if (ret) + return ret; + + phy_start(ndev->phydev); + + return 0; +} + +/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the + * version must be bumped as well. Just adding registers up to that + * limit is fine, as long as the existing register indices don't + * change. + */ +#define SH_ETH_REG_DUMP_VERSION 1 +#define SH_ETH_REG_DUMP_MAX_REGS 256 + +static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + u32 *valid_map; + size_t len; + + BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS); + + /* Dump starts with a bitmap that tells ethtool which + * registers are defined for this chip. + */ + len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); + if (buf) { + valid_map = buf; + buf += len; + } else { + valid_map = NULL; + } + + /* Add a register to the dump, if it has a defined offset. + * This automatically skips most undefined registers, but for + * some it is also necessary to check a capability flag in + * struct sh_eth_cpu_data. + */ +#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) +#define add_reg_from(reg, read_expr) do { \ + if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ + if (buf) { \ + mark_reg_valid(reg); \ + *buf++ = read_expr; \ + } \ + ++len; \ + } \ + } while (0) +#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) +#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) + + add_reg(EDSR); + add_reg(EDMR); + add_reg(EDTRR); + add_reg(EDRRR); + add_reg(EESR); + add_reg(EESIPR); + add_reg(TDLAR); + add_reg(TDFAR); + add_reg(TDFXR); + add_reg(TDFFR); + add_reg(RDLAR); + add_reg(RDFAR); + add_reg(RDFXR); + add_reg(RDFFR); + add_reg(TRSCER); + add_reg(RMFCR); + add_reg(TFTR); + add_reg(FDR); + add_reg(RMCR); + add_reg(TFUCR); + add_reg(RFOCR); + if (cd->rmiimode) + add_reg(RMIIMODE); + add_reg(FCFTR); + if (cd->rpadir) + add_reg(RPADIR); + if (!cd->no_trimd) + add_reg(TRIMD); + add_reg(ECMR); + add_reg(ECSR); + add_reg(ECSIPR); + add_reg(PIR); + if (!cd->no_psr) + add_reg(PSR); + add_reg(RDMLR); + add_reg(RFLR); + add_reg(IPGR); + if (cd->apr) + add_reg(APR); + if (cd->mpr) + add_reg(MPR); + add_reg(RFCR); + add_reg(RFCF); + if (cd->tpauser) + add_reg(TPAUSER); + add_reg(TPAUSECR); + add_reg(GECMR); + if (cd->bculr) + add_reg(BCULR); + add_reg(MAHR); + add_reg(MALR); + add_reg(TROCR); + add_reg(CDCR); + add_reg(LCCR); + add_reg(CNDCR); + add_reg(CEFCR); + add_reg(FRECR); + add_reg(TSFRCR); + add_reg(TLFRCR); + add_reg(CERCR); + add_reg(CEECR); + add_reg(MAFCR); + if (cd->rtrate) + add_reg(RTRATE); + if (cd->hw_checksum) + add_reg(CSMR); + if (cd->select_mii) + add_reg(RMII_MII); + if (cd->tsu) { + add_tsu_reg(ARSTR); + add_tsu_reg(TSU_CTRST); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } + add_tsu_reg(TSU_FWSLC); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } + add_tsu_reg(TSU_ADSBSY); + add_tsu_reg(TSU_TEN); + add_tsu_reg(TSU_POST1); + add_tsu_reg(TSU_POST2); + add_tsu_reg(TSU_POST3); + add_tsu_reg(TSU_POST4); + /* This is the start of a table, not just a single register. */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32(mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); + } + len += SH_ETH_TSU_CAM_ENTRIES * 2; + } + +#undef mark_reg_valid +#undef add_reg_from +#undef add_reg +#undef add_tsu_reg + + return len * 4; +} + +static int sh_eth_get_regs_len(struct net_device *ndev) +{ + return __sh_eth_get_regs(ndev, NULL); +} + +static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + regs->version = SH_ETH_REG_DUMP_VERSION; + + pm_runtime_get_sync(&mdp->pdev->dev); + __sh_eth_get_regs(ndev, buf); + pm_runtime_put_sync(&mdp->pdev->dev); +} + +static u32 sh_eth_get_msglevel(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + return mdp->msg_enable; +} + +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + mdp->msg_enable = value; +} + +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_current", "tx_current", + "rx_dirty", "tx_dirty", +}; +#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) + +static int sh_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SH_ETH_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void sh_eth_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i = 0; + + /* device-specific stats */ + data[i++] = mdp->cur_rx; + data[i++] = mdp->cur_tx; + data[i++] = mdp->dirty_rx; + data[i++] = mdp->dirty_tx; +} + +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +} + +static void sh_eth_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + ring->rx_max_pending = RX_RING_MAX; + ring->tx_max_pending = TX_RING_MAX; + ring->rx_pending = mdp->num_rx_ring; + ring->tx_pending = mdp->num_tx_ring; +} + +static int sh_eth_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + if (ring->tx_pending > TX_RING_MAX || + ring->rx_pending > RX_RING_MAX || + ring->tx_pending < TX_RING_MIN || + ring->rx_pending < RX_RING_MIN) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + netif_tx_disable(ndev); + + /* Serialise with the interrupt handler and NAPI, then + * disable interrupts. We have to clear the + * irq_enabled flag first to ensure that interrupts + * won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_synchronize(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* Free all the skbuffs in the Rx queue and the DMA buffers. */ + sh_eth_ring_free(ndev); + } + + /* Set new parameters */ + mdp->num_rx_ring = ring->rx_pending; + mdp->num_tx_ring = ring->tx_pending; + + if (netif_running(ndev)) { + ret = sh_eth_ring_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", + __func__); + return ret; + } + ret = sh_eth_dev_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", + __func__); + return ret; + } + + netif_device_attach(ndev); + } + + return 0; +} + +static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (mdp->cd->magic) { + wol->supported = WAKE_MAGIC; + wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); + + return 0; +} + +static const struct ethtool_ops sh_eth_ethtool_ops = { + .get_regs_len = sh_eth_get_regs_len, + .get_regs = sh_eth_get_regs, + .nway_reset = phy_ethtool_nway_reset, + .get_msglevel = sh_eth_get_msglevel, + .set_msglevel = sh_eth_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sh_eth_get_strings, + .get_ethtool_stats = sh_eth_get_ethtool_stats, + .get_sset_count = sh_eth_get_sset_count, + .get_ringparam = sh_eth_get_ringparam, + .set_ringparam = sh_eth_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, +}; + +/* network device open function */ +static int sh_eth_open(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + pm_runtime_get_sync(&mdp->pdev->dev); + + napi_enable(&mdp->napi); + + ret = request_irq(ndev->irq, sh_eth_interrupt, + mdp->cd->irq_flags, ndev->name, ndev); + if (ret) { + netdev_err(ndev, "Can not assign IRQ number\n"); + goto out_napi_off; + } + + /* Descriptor set */ + ret = sh_eth_ring_init(ndev); + if (ret) + goto out_free_irq; + + /* device init */ + ret = sh_eth_dev_init(ndev); + if (ret) + goto out_free_irq; + + /* PHY control start*/ + ret = sh_eth_phy_start(ndev); + if (ret) + goto out_free_irq; + + netif_start_queue(ndev); + + mdp->is_opened = 1; + + return ret; + +out_free_irq: + free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); + pm_runtime_put_sync(&mdp->pdev->dev); + return ret; +} + +/* Timeout function */ +static void sh_eth_tx_timeout(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + int i; + + netif_stop_queue(ndev); + + netif_err(mdp, timer, ndev, + "transmit timed out, status %8.8x, resetting...\n", + sh_eth_read(ndev, EESR)); + + /* tx_errors count up */ + ndev->stats.tx_errors++; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < mdp->num_rx_ring; i++) { + rxdesc = &mdp->rx_ring[i]; + rxdesc->status = cpu_to_le32(0); + rxdesc->addr = cpu_to_le32(0xBADF00D0); + dev_kfree_skb(mdp->rx_skbuff[i]); + mdp->rx_skbuff[i] = NULL; + } + for (i = 0; i < mdp->num_tx_ring; i++) { + dev_kfree_skb(mdp->tx_skbuff[i]); + mdp->tx_skbuff[i] = NULL; + } + + /* device init */ + sh_eth_dev_init(ndev); + + netif_start_queue(ndev); +} + +/* Packet transmit function */ +static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + dma_addr_t dma_addr; + u32 entry; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { + if (!sh_eth_tx_free(ndev, true)) { + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mdp->lock, flags); + return NETDEV_TX_BUSY; + } + } + spin_unlock_irqrestore(&mdp->lock, flags); + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + entry = mdp->cur_tx % mdp->num_tx_ring; + mdp->tx_skbuff[entry] = skb; + txdesc = &mdp->tx_ring[entry]; + /* soft swap. */ + if (!mdp->cd->hw_swap) + sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); + dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + txdesc->addr = cpu_to_le32(dma_addr); + txdesc->len = cpu_to_le32(skb->len << 16); + + dma_wmb(); /* TACT bit must be set after all the above writes */ + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); + else + txdesc->status |= cpu_to_le32(TD_TACT); + + wmb(); /* cur_tx must be incremented after TACT bit was set */ + mdp->cur_tx++; + + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); + + return NETDEV_TX_OK; +} + +/* The statistics registers have write-clear behaviour, which means we + * will lose any increment between the read and write. We mitigate + * this by only clearing when we read a non-zero value, so we will + * never falsely report a total of zero. + */ +static void +sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg) +{ + u32 delta = sh_eth_read(ndev, reg); + + if (delta) { + *stat += delta; + sh_eth_write(ndev, 0, reg); + } +} + +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->cd->no_tx_cntrs) + return &ndev->stats; + + if (!mdp->is_opened) + return &ndev->stats; + + sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); + sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); + + if (mdp->cd->cexcr) { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CERCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CEECR); + } else { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CNDCR); + } + + return &ndev->stats; +} + +/* device close function */ +static int sh_eth_close(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + netif_stop_queue(ndev); + + /* Serialise with the interrupt handler and NAPI, then disable + * interrupts. We have to clear the irq_enabled flag first to + * ensure that interrupts won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* PHY Disconnect */ + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + } + + free_irq(ndev->irq, ndev); + + /* Free all the skbuffs in the Rx queue and the DMA buffer. */ + sh_eth_ring_free(ndev); + + mdp->is_opened = 0; + + pm_runtime_put(&mdp->pdev->dev); + + return 0; +} + +/* ioctl to device function */ +static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + ndev->mtu = new_mtu; + netdev_update_features(ndev); + + return 0; +} + +/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ +static u32 sh_eth_tsu_get_post_mask(int entry) +{ + return 0x0f << (28 - ((entry % 8) * 4)); +} + +static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) +{ + return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); +} + +static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; + u32 tmp; + + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); +} + +static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; + u32 post_mask, ref_mask, tmp; + + post_mask = sh_eth_tsu_get_post_mask(entry); + ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; + + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); + + /* If other port enables, the function returns "true" */ + return tmp & ref_mask; +} + +static int sh_eth_tsu_busy(struct net_device *ndev) +{ + int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; + struct sh_eth_private *mdp = netdev_priv(ndev); + + while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { + udelay(10); + timeout--; + if (timeout <= 0) { + netdev_err(ndev, "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset, + const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 val; + + val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; + iowrite32(val, mdp->tsu_addr + offset); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + val = addr[4] << 8 | addr[5]; + iowrite32(val, mdp->tsu_addr + offset + 4); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + return 0; +} + +static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 val; + + val = ioread32(mdp->tsu_addr + offset); + addr[0] = (val >> 24) & 0xff; + addr[1] = (val >> 16) & 0xff; + addr[2] = (val >> 8) & 0xff; + addr[3] = val & 0xff; + val = ioread32(mdp->tsu_addr + offset + 4); + addr[4] = (val >> 8) & 0xff; + addr[5] = val & 0xff; +} + + +static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i; + u8 c_addr[ETH_ALEN]; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(ndev, reg_offset, c_addr); + if (ether_addr_equal(addr, c_addr)) + return i; + } + + return -ENOENT; +} + +static int sh_eth_tsu_find_empty(struct net_device *ndev) +{ + u8 blank[ETH_ALEN]; + int entry; + + memset(blank, 0, sizeof(blank)); + entry = sh_eth_tsu_find_entry(ndev, blank); + return (entry < 0) ? -ENOMEM : entry; +} + +static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int ret; + u8 blank[ETH_ALEN]; + + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & + ~(1 << (31 - entry)), TSU_TEN); + + memset(blank, 0, sizeof(blank)); + ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); + if (ret < 0) + return ret; + return 0; +} + +static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i < 0) { + /* No entry found, create one */ + i = sh_eth_tsu_find_empty(ndev); + if (i < 0) + return -ENOMEM; + ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); + if (ret < 0) + return ret; + + /* Enable the entry */ + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | + (1 << (31 - i)), TSU_TEN); + } + + /* Entry found or created, enable POST */ + sh_eth_tsu_enable_cam_entry_post(ndev, i); + + return 0; +} + +static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i) { + /* Entry found */ + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + goto done; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } +done: + return 0; +} + +static int sh_eth_tsu_purge_all(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + continue; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static void sh_eth_tsu_purge_mcast(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u8 addr[ETH_ALEN]; + int i; + + if (!mdp->cd->tsu) + return; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(ndev, reg_offset, addr); + if (is_multicast_ether_addr(addr)) + sh_eth_tsu_del_entry(ndev, addr); + } +} + +/* Update promiscuous flag and multicast filter */ +static void sh_eth_set_rx_mode(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 ecmr_bits; + int mcast_all = 0; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + /* Initial condition is MCT = 1, PRM = 0. + * Depending on ndev->flags, set PRM or clear MCT + */ + ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; + if (mdp->cd->tsu) + ecmr_bits |= ECMR_MCT; + + if (!(ndev->flags & IFF_MULTICAST)) { + sh_eth_tsu_purge_mcast(ndev); + mcast_all = 1; + } + if (ndev->flags & IFF_ALLMULTI) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + + if (ndev->flags & IFF_PROMISC) { + sh_eth_tsu_purge_all(ndev); + ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; + } else if (mdp->cd->tsu) { + struct netdev_hw_addr *ha; + netdev_for_each_mc_addr(ha, ndev) { + if (mcast_all && is_multicast_ether_addr(ha->addr)) + continue; + + if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { + if (!mcast_all) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + } + } + } + + /* update the ethernet mode */ + sh_eth_write(ndev, ecmr_bits, ECMR); + + spin_unlock_irqrestore(&mdp->lock, flags); +} + +static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) +{ + if (!mdp->port) + return TSU_VTAG0; + else + return TSU_VTAG1; +} + +static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids++; + + /* The controller has one VLAN tag HW filter. So, if the filter is + * already enabled, the driver disables it and the filte + */ + if (mdp->vlan_num_ids > 1) { + /* disable VLAN filter */ + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + return 0; + } + + sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), + vtag_reg_index); + + return 0; +} + +static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids--; + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + + return 0; +} + +/* SuperH's TSU register init function */ +static void sh_eth_tsu_init(struct sh_eth_private *mdp) +{ + if (!mdp->cd->dual_port) { + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, + TSU_FWSLC); /* Enable POST registers */ + return; + } + + sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); + sh_eth_tsu_write(mdp, 0, TSU_PRISL0); + sh_eth_tsu_write(mdp, 0, TSU_PRISL1); + sh_eth_tsu_write(mdp, 0, TSU_FWSL0); + sh_eth_tsu_write(mdp, 0, TSU_FWSL1); + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ + sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ + sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ + sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ + sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ +} + +/* MDIO bus release function */ +static int sh_mdio_release(struct sh_eth_private *mdp) +{ + /* unregister mdio bus */ + mdiobus_unregister(mdp->mii_bus); + + /* free bitbang info */ + free_mdio_bitbang(mdp->mii_bus); + + return 0; +} + +/* MDIO bus init function */ +static int sh_mdio_init(struct sh_eth_private *mdp, + struct sh_eth_plat_data *pd) +{ + int ret; + struct bb_info *bitbang; + struct platform_device *pdev = mdp->pdev; + struct device *dev = &mdp->pdev->dev; + + /* create bit control struct for PHY */ + bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; + + /* bitbang init */ + bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; + bitbang->set_gate = pd->set_mdio_gate; + bitbang->ctrl.ops = &bb_ops; + + /* MII controller setting */ + mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!mdp->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + mdp->mii_bus->name = "sh_mii"; + mdp->mii_bus->parent = dev; + snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* register MDIO bus */ + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); + if (ret) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(mdp->mii_bus); + return ret; +} + +static const u16 *sh_eth_get_register_offset(int register_type) +{ + const u16 *reg_offset = NULL; + + switch (register_type) { + case SH_ETH_REG_GIGABIT: + reg_offset = sh_eth_offset_gigabit; + break; + case SH_ETH_REG_FAST_RZ: + reg_offset = sh_eth_offset_fast_rz; + break; + case SH_ETH_REG_FAST_RCAR: + reg_offset = sh_eth_offset_fast_rcar; + break; + case SH_ETH_REG_FAST_SH4: + reg_offset = sh_eth_offset_fast_sh4; + break; + case SH_ETH_REG_FAST_SH3_SH2: + reg_offset = sh_eth_offset_fast_sh3_sh2; + break; + } + + return reg_offset; +} + +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_change_mtu = sh_eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_change_mtu = sh_eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +#ifdef CONFIG_OF +static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct sh_eth_plat_data *pdata; + const char *mac_addr; + int ret; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + ret = of_get_phy_mode(np); + if (ret < 0) + return NULL; + pdata->phy_interface = ret; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); + + pdata->no_ether_link = + of_property_read_bool(np, "renesas,no-ether-link"); + pdata->ether_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + return pdata; +} + +static const struct of_device_id sh_eth_match_table[] = { + { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, + { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, + { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, + { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, + { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, + { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_eth_match_table); +#else +static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + +static int sh_eth_drv_probe(struct platform_device *pdev) +{ + struct resource *res; + struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); + const struct platform_device_id *id = platform_get_device_id(pdev); + struct sh_eth_private *mdp; + struct net_device *ndev; + int ret; + + /* get base addr */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ndev = alloc_etherdev(sizeof(struct sh_eth_private)); + if (!ndev) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto out_release; + ndev->irq = ret; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + mdp = netdev_priv(ndev); + mdp->num_tx_ring = TX_RING_SIZE; + mdp->num_rx_ring = RX_RING_SIZE; + mdp->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mdp->addr)) { + ret = PTR_ERR(mdp->addr); + goto out_release; + } + + ndev->base_addr = res->start; + + spin_lock_init(&mdp->lock); + mdp->pdev = pdev; + + if (pdev->dev.of_node) + pd = sh_eth_parse_dt(&pdev->dev); + if (!pd) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -EINVAL; + goto out_release; + } + + /* get PHY ID */ + mdp->phy_id = pd->phy; + mdp->phy_interface = pd->phy_interface; + mdp->no_ether_link = pd->no_ether_link; + mdp->ether_link_active_low = pd->ether_link_active_low; + + /* set cpu data */ + if (id) + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + else + mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); + + mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); + if (!mdp->reg_offset) { + dev_err(&pdev->dev, "Unknown register type (%d)\n", + mdp->cd->register_type); + ret = -EINVAL; + goto out_release; + } + sh_eth_set_default_cpu_data(mdp->cd); + + /* User's manual states max MTU should be 2048 but due to the + * alignment calculations in sh_eth_ring_init() the practical + * MTU is a bit less. Maybe this can be optimized some more. + */ + ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + + /* set function */ + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; + ndev->ethtool_ops = &sh_eth_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + + /* debug message level */ + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; + + /* read and set MAC address */ + read_mac_address(ndev, pd->mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one.\n"); + eth_hw_addr_random(ndev); + } + + if (mdp->cd->tsu) { + int port = pdev->id < 0 ? 0 : pdev->id % 2; + struct resource *rtsu; + + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (port == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + /* ioremap the TSU registers */ + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; + goto out_release; + } + mdp->port = port; + ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; + + /* Need to init only the first port of the two sharing a TSU */ + if (port == 0) { + if (mdp->cd->chip_reset) + mdp->cd->chip_reset(ndev); + + /* TSU init (Init only)*/ + sh_eth_tsu_init(mdp); + } + } + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* MDIO bus init */ + ret = sh_mdio_init(mdp, pd); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "MDIO init failed: %d\n", ret); + goto out_release; + } + + netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); + + /* network device register */ + ret = register_netdev(ndev); + if (ret) + goto out_napi_del; + + if (mdp->cd->magic) + device_set_wakeup_capable(&pdev->dev, 1); + + /* print device information */ + netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + pm_runtime_put(&pdev->dev); + platform_set_drvdata(pdev, ndev); + + return ret; + +out_napi_del: + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + +out_release: + /* net_dev free */ + free_netdev(ndev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int sh_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + pm_runtime_disable(&pdev->dev); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int sh_eth_wol_setup(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* Only allow ECI interrupts */ + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); + + /* Enable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + return enable_irq_wake(ndev->irq); +} + +static int sh_eth_wol_restore(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + napi_enable(&mdp->napi); + + /* Disable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0); + + /* The device needs to be reset to restore MagicPacket logic + * for next wakeup. If we close and open the device it will + * both be reset and all registers restored. This is what + * happens during suspend and resume without WoL enabled. + */ + ret = sh_eth_close(ndev); + if (ret < 0) + return ret; + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + + return disable_irq_wake(ndev->irq); +} + +static int sh_eth_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (mdp->wol_enabled) + ret = sh_eth_wol_setup(ndev); + else + ret = sh_eth_close(ndev); + + return ret; +} + +static int sh_eth_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (!netif_running(ndev)) + return 0; + + if (mdp->wol_enabled) + ret = sh_eth_wol_restore(ndev); + else + ret = sh_eth_open(ndev); + + if (ret < 0) + return ret; + + netif_device_attach(ndev); + + return ret; +} +#endif + +static int sh_eth_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops sh_eth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume) + SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL) +}; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif + +static const struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table); + +static struct platform_driver sh_eth_driver = { + .probe = sh_eth_drv_probe, + .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, + .driver = { + .name = CARDNAME, + .pm = SH_ETH_PM_OPS, + .of_match_table = of_match_ptr(sh_eth_match_table), + }, +}; + +module_platform_driver(sh_eth_driver); + +MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h new file mode 100644 index 000000000..0c18650bb --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -0,0 +1,550 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* SuperH Ethernet device driver + * + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2012 Renesas Solutions Corp. + */ + +#ifndef __SH_ETH_H__ +#define __SH_ETH_H__ + +#define CARDNAME "sh-eth" +#define TX_TIMEOUT (5*HZ) +#define TX_RING_SIZE 64 /* Tx ring size */ +#define RX_RING_SIZE 64 /* Rx ring size */ +#define TX_RING_MIN 64 +#define RX_RING_MIN 64 +#define TX_RING_MAX 1024 +#define RX_RING_MAX 1024 +#define PKT_BUF_SZ 1538 +#define SH_ETH_TSU_TIMEOUT_MS 500 +#define SH_ETH_TSU_CAM_ENTRIES 32 + +enum { + /* IMPORTANT: To keep ethtool register dump working, add new + * register names immediately before SH_ETH_MAX_REGISTER_OFFSET. + */ + + /* E-DMAC registers */ + EDSR = 0, + EDMR, + EDTRR, + EDRRR, + EESR, + EESIPR, + TDLAR, + TDFAR, + TDFXR, + TDFFR, + RDLAR, + RDFAR, + RDFXR, + RDFFR, + TRSCER, + RMFCR, + TFTR, + FDR, + RMCR, + EDOCR, + TFUCR, + RFOCR, + RMIIMODE, + FCFTR, + RPADIR, + TRIMD, + RBWAR, + TBRAR, + + /* Ether registers */ + ECMR, + ECSR, + ECSIPR, + PIR, + PSR, + RDMLR, + PIPR, + RFLR, + IPGR, + APR, + MPR, + PFTCR, + PFRCR, + RFCR, + RFCF, + TPAUSER, + TPAUSECR, + BCFR, + BCFRR, + GECMR, + BCULR, + MAHR, + MALR, + TROCR, + CDCR, + LCCR, + CNDCR, + CEFCR, + FRECR, + TSFRCR, + TLFRCR, + CERCR, + CEECR, + MAFCR, + RTRATE, + CSMR, + RMII_MII, + + /* TSU Absolute address */ + ARSTR, + TSU_CTRST, + TSU_FWEN0, + TSU_FWEN1, + TSU_FCM, + TSU_BSYSL0, + TSU_BSYSL1, + TSU_PRISL0, + TSU_PRISL1, + TSU_FWSL0, + TSU_FWSL1, + TSU_FWSLC, + TSU_QTAG0, /* Same as TSU_QTAGM0 */ + TSU_QTAG1, /* Same as TSU_QTAGM1 */ + TSU_QTAGM0, + TSU_QTAGM1, + TSU_FWSR, + TSU_FWINMK, + TSU_ADQT0, + TSU_ADQT1, + TSU_VTAG0, + TSU_VTAG1, + TSU_ADSBSY, + TSU_TEN, + TSU_POST1, + TSU_POST2, + TSU_POST3, + TSU_POST4, + TSU_ADRH0, + /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */ + + TXNLCR0, + TXALCR0, + RXNLCR0, + RXALCR0, + FWNLCR0, + FWALCR0, + TXNLCR1, + TXALCR1, + RXNLCR1, + RXALCR1, + FWNLCR1, + FWALCR1, + + /* This value must be written at last. */ + SH_ETH_MAX_REGISTER_OFFSET, +}; + +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_RZ, + SH_ETH_REG_FAST_RCAR, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; + +/* Driver's parameters */ +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS) +#define SH_ETH_RX_ALIGN 32 +#else +#define SH_ETH_RX_ALIGN 2 +#endif + +/* Register's bits + */ +/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */ +enum EDSR_BIT { + EDSR_ENT = 0x01, EDSR_ENR = 0x02, +}; +#define EDSR_ENALL (EDSR_ENT|EDSR_ENR) + +/* GECMR : sh7734, sh7763 and r8a7740 only */ +enum GECMR_BIT { + GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, +}; + +/* EDMR */ +enum DMAC_M_BIT { + EDMR_NBST = 0x80, + EDMR_EL = 0x40, /* Litte endian */ + EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, + EDMR_SRST_GETHER = 0x03, + EDMR_SRST_ETHER = 0x01, +}; + +/* EDTRR */ +enum DMAC_T_BIT { + EDTRR_TRNS_GETHER = 0x03, + EDTRR_TRNS_ETHER = 0x01, +}; + +/* EDRRR */ +enum EDRRR_R_BIT { + EDRRR_R = 0x01, +}; + +/* TPAUSER */ +enum TPAUSER_BIT { + TPAUSER_TPAUSE = 0x0000ffff, + TPAUSER_UNLIMITED = 0, +}; + +/* BCFR */ +enum BCFR_BIT { + BCFR_RPAUSE = 0x0000ffff, + BCFR_UNLIMITED = 0, +}; + +/* PIR */ +enum PIR_BIT { + PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01, +}; + +/* PSR */ +enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, }; + +/* EESR */ +enum EESR_BIT { + EESR_TWB1 = 0x80000000, + EESR_TWB = 0x40000000, /* same as TWB0 */ + EESR_TC1 = 0x20000000, + EESR_TUC = 0x10000000, + EESR_ROC = 0x08000000, + EESR_TABT = 0x04000000, + EESR_RABT = 0x02000000, + EESR_RFRMER = 0x01000000, /* same as RFCOF */ + EESR_ADE = 0x00800000, + EESR_ECI = 0x00400000, + EESR_FTC = 0x00200000, /* same as TC or TC0 */ + EESR_TDE = 0x00100000, + EESR_TFE = 0x00080000, /* same as TFUF */ + EESR_FRC = 0x00040000, /* same as FR */ + EESR_RDE = 0x00020000, + EESR_RFE = 0x00010000, + EESR_CND = 0x00000800, + EESR_DLC = 0x00000400, + EESR_CD = 0x00000200, + EESR_TRO = 0x00000100, + EESR_RMAF = 0x00000080, + EESR_CEEF = 0x00000040, + EESR_CELF = 0x00000020, + EESR_RRF = 0x00000010, + EESR_RTLF = 0x00000008, + EESR_RTSF = 0x00000004, + EESR_PRE = 0x00000002, + EESR_CERF = 0x00000001, +}; + +#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \ + EESR_RMAF | /* Multicast address recv */ \ + EESR_RRF | /* Bit frame recv */ \ + EESR_RTLF | /* Long frame recv */ \ + EESR_RTSF | /* Short frame recv */ \ + EESR_PRE | /* PHY-LSI recv error */ \ + EESR_CERF) /* Recv frame CRC error */ + +#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ + EESR_TRO) +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ + EESR_RDE | EESR_RFRMER | EESR_ADE | \ + EESR_TFE | EESR_TDE) + +/* EESIPR */ +enum EESIPR_BIT { + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, +}; + +/* Receive descriptor 0 bits */ +enum RD_STS_BIT { + RD_RACT = 0x80000000, RD_RDLE = 0x40000000, + RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, + RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, + RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, + RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, + RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008, + RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, + RD_RFS1 = 0x00000001, +}; +#define RDF1ST RD_RFP1 +#define RDFEND RD_RFP0 +#define RD_RFP (RD_RFP1|RD_RFP0) + +/* Receive descriptor 1 bits */ +enum RD_LEN_BIT { + RD_RFL = 0x0000ffff, /* receive frame length */ + RD_RBL = 0xffff0000, /* receive buffer length */ +}; + +/* FCFTR */ +enum FCFTR_BIT { + FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, + FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004, + FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, +}; +#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) +#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) + +/* Transmit descriptor 0 bits */ +enum TD_STS_BIT { + TD_TACT = 0x80000000, TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, TD_TWBI = 0x04000000, +}; +#define TDF1ST TD_TFP1 +#define TDFEND TD_TFP0 +#define TD_TFP (TD_TFP1|TD_TFP0) + +/* Transmit descriptor 1 bits */ +enum TD_LEN_BIT { + TD_TBL = 0xffff0000, /* transmit buffer length */ +}; + +/* RMCR */ +enum RMCR_BIT { + RMCR_RNC = 0x00000001, +}; + +/* ECMR */ +enum FELIC_MODE_BIT { + ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000, + ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, + ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, + ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, + ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, + ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, +}; + +/* ECSR */ +enum ECSR_STATUS_BIT { + ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, + ECSR_LCHNG = 0x04, + ECSR_MPD = 0x02, ECSR_ICD = 0x01, +}; + +#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \ + ECSR_ICD | ECSIPR_MPDIP) + +/* ECSIPR */ +enum ECSIPR_STATUS_MASK_BIT { + ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, + ECSIPR_LCHNGIP = 0x04, + ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, +}; + +#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \ + ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP) + +/* APR */ +enum APR_BIT { + APR_AP = 0x0000ffff, +}; + +/* MPR */ +enum MPR_BIT { + MPR_MP = 0x0000ffff, +}; + +/* TRSCER */ +enum DESC_I_BIT { + DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200, + DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010, + DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002, + DESC_I_RINT1 = 0x0001, +}; + +#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) + +/* RPADIR */ +enum RPADIR_BIT { + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, +}; + +/* FDR */ +#define DEFAULT_FDR_INIT 0x00000707 + +/* ARSTR */ +enum ARSTR_BIT { ARSTR_ARST = 0x00000001, }; + +/* TSU_FWEN0 */ +enum TSU_FWEN0_BIT { + TSU_FWEN0_0 = 0x00000001, +}; + +/* TSU_ADSBSY */ +enum TSU_ADSBSY_BIT { + TSU_ADSBSY_0 = 0x00000001, +}; + +/* TSU_TEN */ +enum TSU_TEN_BIT { + TSU_TEN_0 = 0x80000000, +}; + +/* TSU_FWSL0 */ +enum TSU_FWSL0_BIT { + TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800, + TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200, + TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010, +}; + +/* TSU_FWSLC */ +enum TSU_FWSLC_BIT { + TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000, + TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040, + TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010, + TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004, + TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001, +}; + +/* TSU_VTAGn */ +#define TSU_VTAG_ENABLE 0x80000000 +#define TSU_VTAG_VID_MASK 0x00000fff + +/* The sh ether Tx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_txdesc { + u32 status; /* TD0 */ + u32 len; /* TD1 */ + u32 addr; /* TD2 */ + u32 pad0; /* padding data */ +} __aligned(2) __packed; + +/* The sh ether Rx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_rxdesc { + u32 status; /* RD0 */ + u32 len; /* RD1 */ + u32 addr; /* RD2 */ + u32 pad0; /* padding data */ +} __aligned(2) __packed; + +/* This structure is used by each CPU dependency handling. */ +struct sh_eth_cpu_data { + /* mandatory functions */ + int (*soft_reset)(struct net_device *ndev); + + /* optional functions */ + void (*chip_reset)(struct net_device *ndev); + void (*set_duplex)(struct net_device *ndev); + void (*set_rate)(struct net_device *ndev); + + /* mandatory initialize value */ + int register_type; + u32 edtrr_trns; + u32 eesipr_value; + + /* optional initialize value */ + u32 ecsr_value; + u32 ecsipr_value; + u32 fdr_value; + u32 fcftr_value; + + /* interrupt checking mask */ + u32 tx_check; + u32 eesr_err_check; + + /* Error mask */ + u32 trscer_err_mask; + + /* hardware features */ + unsigned long irq_flags; /* IRQ configuration flags */ + unsigned no_psr:1; /* EtherC DOES NOT have PSR */ + unsigned apr:1; /* EtherC has APR */ + unsigned mpr:1; /* EtherC has MPR */ + unsigned tpauser:1; /* EtherC has TPAUSER */ + unsigned bculr:1; /* EtherC has BCULR */ + unsigned tsu:1; /* EtherC has TSU */ + unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */ + unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */ + unsigned rpadir:1; /* E-DMAC has RPADIR */ + unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */ + unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */ + unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ + unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ + unsigned hw_checksum:1; /* E-DMAC has CSMR */ + unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */ + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned no_tx_cntrs:1; /* EtherC DOES NOT have TX error counters */ + unsigned cexcr:1; /* EtherC has CERCR/CEECR */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ +}; + +struct sh_eth_private { + struct platform_device *pdev; + struct sh_eth_cpu_data *cd; + const u16 *reg_offset; + void __iomem *addr; + void __iomem *tsu_addr; + struct clk *clk; + u32 num_rx_ring; + u32 num_tx_ring; + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + struct sh_eth_rxdesc *rx_ring; + struct sh_eth_txdesc *tx_ring; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + spinlock_t lock; /* Register access lock */ + u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ + u32 cur_tx, dirty_tx; + u32 rx_buf_sz; /* Based on MTU+slack. */ + struct napi_struct napi; + bool irq_enabled; + /* MII transceiver section. */ + u32 phy_id; /* PHY ID */ + struct mii_bus *mii_bus; /* MDIO bus control */ + int link; + phy_interface_t phy_interface; + int msg_enable; + int speed; + int duplex; + int port; /* for TSU */ + int vlan_num_ids; /* for VLAN tag filter */ + + unsigned no_ether_link:1; + unsigned ether_link_active_low:1; + unsigned is_opened:1; + unsigned wol_enabled:1; +}; + +#endif /* #ifndef __SH_ETH_H__ */ |