summaryrefslogtreecommitdiffstats
path: root/drivers/ptp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/ptp
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/ptp')
-rw-r--r--drivers/ptp/Kconfig157
-rw-r--r--drivers/ptp/Makefile17
-rw-r--r--drivers/ptp/idt8a340_reg.h709
-rw-r--r--drivers/ptp/ptp_chardev.c506
-rw-r--r--drivers/ptp/ptp_clock.c439
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2230
-rw-r--r--drivers/ptp/ptp_clockmatrix.h149
-rw-r--r--drivers/ptp/ptp_dte.c347
-rw-r--r--drivers/ptp/ptp_idt82p33.c1008
-rw-r--r--drivers/ptp/ptp_idt82p33.h171
-rw-r--r--drivers/ptp/ptp_ines.c807
-rw-r--r--drivers/ptp/ptp_kvm.c197
-rw-r--r--drivers/ptp/ptp_pch.c691
-rw-r--r--drivers/ptp/ptp_private.h96
-rw-r--r--drivers/ptp/ptp_qoriq.c645
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_sysfs.c303
-rw-r--r--drivers/ptp/ptp_vmw.c144
18 files changed, 8717 insertions, 0 deletions
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
new file mode 100644
index 000000000..3e377f3c6
--- /dev/null
+++ b/drivers/ptp/Kconfig
@@ -0,0 +1,157 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PTP clock support configuration
+#
+
+menu "PTP clock support"
+
+config PTP_1588_CLOCK
+ tristate "PTP clock support"
+ depends on NET && POSIX_TIMERS
+ select PPS
+ select NET_PTP_CLASSIFY
+ help
+ The IEEE 1588 standard defines a method to precisely
+ synchronize distributed clocks over Ethernet networks. The
+ standard defines a Precision Time Protocol (PTP), which can
+ be used to achieve synchronization within a few dozen
+ microseconds. In addition, with the help of special hardware
+ time stamping units, it can be possible to achieve
+ synchronization to within a few hundred nanoseconds.
+
+ This driver adds support for PTP clocks as character
+ devices. If you want to use a PTP clock, then you should
+ also enable at least one clock driver as well.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp.
+
+config PTP_1588_CLOCK_DTE
+ tristate "Broadcom DTE as PTP clock"
+ depends on PTP_1588_CLOCK
+ depends on NET && HAS_IOMEM
+ depends on ARCH_BCM_MOBILE || (ARCH_BCM_IPROC && !(ARCH_BCM_NSP || ARCH_BCM_5301X)) || COMPILE_TEST
+ default y
+ help
+ This driver adds support for using the Digital timing engine
+ (DTE) in the Broadcom SoC's as a PTP clock.
+
+ The clock can be used in both wired and wireless networks
+ for PTP purposes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_dte.
+
+config PTP_1588_CLOCK_QORIQ
+ tristate "Freescale QorIQ 1588 timer as PTP clock"
+ depends on GIANFAR || FSL_DPAA_ETH || FSL_DPAA2_ETH || FSL_ENETC || FSL_ENETC_VF || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ default y
+ help
+ This driver adds support for using the Freescale QorIQ 1588
+ timer as a PTP clock. This clock is only useful if your PTP
+ programs are getting hardware time stamps on the PTP Ethernet
+ packets using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp-qoriq.
+
+comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
+ depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
+
+config DP83640_PHY
+ tristate "Driver for the National Semiconductor DP83640 PHYTER"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on PHYLIB
+ depends on PTP_1588_CLOCK
+ select CRC32
+ help
+ Supports the DP83640 PHYTER with IEEE 1588 features.
+
+ This driver adds support for using the DP83640 as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
+
+ In order for this to work, your MAC driver must also
+ implement the skb_tx_timestamp() function.
+
+config PTP_1588_CLOCK_INES
+ tristate "ZHAW InES PTP time stamping IP core"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
+ depends on PHYLIB
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using the ZHAW InES 1588 IP
+ core. This clock is only useful if the MII bus of your MAC
+ is wired up to the core.
+
+config PTP_1588_CLOCK_PCH
+ tristate "Intel PCH EG20T as PTP clock"
+ depends on X86_32 || COMPILE_TEST
+ depends on HAS_IOMEM && PCI
+ depends on NET
+ imply PTP_1588_CLOCK
+ help
+ This driver adds support for using the PCH EG20T as a PTP
+ clock. The hardware supports time stamping of PTP packets
+ when using the end-to-end delay (E2E) mechanism. The peer
+ delay mechanism (P2P) is not supported.
+
+ This clock is only useful if your PTP programs are getting
+ hardware time stamps on the PTP Ethernet packets using the
+ SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_pch.
+
+config PTP_1588_CLOCK_KVM
+ tristate "KVM virtual PTP clock"
+ depends on PTP_1588_CLOCK
+ depends on KVM_GUEST && X86
+ default y
+ help
+ This driver adds support for using kvm infrastructure as a PTP
+ clock. This clock is only useful if you are using KVM guests.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_kvm.
+
+config PTP_1588_CLOCK_IDT82P33
+ tristate "IDT 82P33xxx PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using the IDT 82P33xxx as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_idt82p33.
+
+config PTP_1588_CLOCK_IDTCM
+ tristate "IDT CLOCKMATRIX as PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_clockmatrix.
+
+config PTP_1588_CLOCK_VMW
+ tristate "VMware virtual PTP clock"
+ depends on ACPI && HYPERVISOR_GUEST && X86
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using VMware virtual precision
+ clock device as a PTP clock. This is only useful in virtual
+ machines running on VMware virtual infrastructure.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_vmw.
+
+endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
new file mode 100644
index 000000000..7aff75f74
--- /dev/null
+++ b/drivers/ptp/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for PTP 1588 clock support.
+#
+
+ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
+obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
+obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
+obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
+obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
+ptp-qoriq-y += ptp_qoriq.o
+ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
+obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
new file mode 100644
index 000000000..b297c4aba
--- /dev/null
+++ b/drivers/ptp/idt8a340_reg.h
@@ -0,0 +1,709 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+#endif
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
new file mode 100644
index 000000000..9311f3d09
--- /dev/null
+++ b/drivers/ptp/ptp_chardev.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP 1588 clock support - character device implementation.
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+#include <linux/module.h>
+#include <linux/posix-clock.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include <linux/nospec.h>
+
+#include "ptp_private.h"
+
+static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_request rq;
+ int err = 0;
+
+ memset(&rq, 0, sizeof(rq));
+
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ rq.type = PTP_CLK_REQ_EXTTS;
+ rq.extts.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PEROUT:
+ rq.type = PTP_CLK_REQ_PEROUT;
+ rq.perout.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PHYSYNC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_info *info = ptp->info;
+ struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
+ unsigned int i;
+
+ /* Check to see if any other pin previously had this function. */
+ for (i = 0; i < info->n_pins; i++) {
+ if (info->pin_config[i].func == func &&
+ info->pin_config[i].chan == chan) {
+ pin1 = &info->pin_config[i];
+ break;
+ }
+ }
+ if (pin1 && i == pin)
+ return 0;
+
+ /* Check the desired function and channel. */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (chan >= info->n_ext_ts)
+ return -EINVAL;
+ break;
+ case PTP_PF_PEROUT:
+ if (chan >= info->n_per_out)
+ return -EINVAL;
+ break;
+ case PTP_PF_PHYSYNC:
+ if (chan != 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info->verify(info, pin, func, chan)) {
+ pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ return -EOPNOTSUPP;
+ }
+
+ /* Disable whatever function was previously assigned. */
+ if (pin1) {
+ ptp_disable_pinfunc(info, func, chan);
+ pin1->func = PTP_PF_NONE;
+ pin1->chan = 0;
+ }
+ ptp_disable_pinfunc(info, pin2->func, pin2->chan);
+ pin2->func = func;
+ pin2->chan = chan;
+
+ return 0;
+}
+
+int ptp_open(struct posix_clock *pc, fmode_t fmode)
+{
+ return 0;
+}
+
+long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_sys_offset_extended *extoff = NULL;
+ struct ptp_sys_offset_precise precise_offset;
+ struct system_device_crosststamp xtstamp;
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_sys_offset *sysoff = NULL;
+ struct ptp_system_timestamp sts;
+ struct ptp_clock_request req;
+ struct ptp_clock_caps caps;
+ struct ptp_clock_time *pct;
+ unsigned int i, pin_index;
+ struct ptp_pin_desc pd;
+ struct timespec64 ts;
+ int enable, err = 0;
+
+ switch (cmd) {
+
+ case PTP_CLOCK_GETCAPS:
+ case PTP_CLOCK_GETCAPS2:
+ memset(&caps, 0, sizeof(caps));
+
+ caps.max_adj = ptp->info->max_adj;
+ caps.n_alarm = ptp->info->n_alarm;
+ caps.n_ext_ts = ptp->info->n_ext_ts;
+ caps.n_per_out = ptp->info->n_per_out;
+ caps.pps = ptp->info->pps;
+ caps.n_pins = ptp->info->n_pins;
+ caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
+ caps.adjust_phase = ptp->info->adjphase != NULL;
+ if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+ err = -EFAULT;
+ break;
+
+ case PTP_EXTTS_REQUEST:
+ case PTP_EXTTS_REQUEST2:
+ memset(&req, 0, sizeof(req));
+
+ if (copy_from_user(&req.extts, (void __user *)arg,
+ sizeof(req.extts))) {
+ err = -EFAULT;
+ break;
+ }
+ if (cmd == PTP_EXTTS_REQUEST2) {
+ /* Tell the drivers to check the flags carefully. */
+ req.extts.flags |= PTP_STRICT_FLAGS;
+ /* Make sure no reserved bit is set. */
+ if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+ req.extts.rsv[0] || req.extts.rsv[1]) {
+ err = -EINVAL;
+ break;
+ }
+ /* Ensure one of the rising/falling edge bits is set. */
+ if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
+ (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
+ err = -EINVAL;
+ break;
+ }
+ } else if (cmd == PTP_EXTTS_REQUEST) {
+ req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
+ req.extts.rsv[0] = 0;
+ req.extts.rsv[1] = 0;
+ }
+ if (req.extts.index >= ops->n_ext_ts) {
+ err = -EINVAL;
+ break;
+ }
+ req.type = PTP_CLK_REQ_EXTTS;
+ enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
+ case PTP_PEROUT_REQUEST:
+ case PTP_PEROUT_REQUEST2:
+ memset(&req, 0, sizeof(req));
+
+ if (copy_from_user(&req.perout, (void __user *)arg,
+ sizeof(req.perout))) {
+ err = -EFAULT;
+ break;
+ }
+ if (cmd == PTP_PEROUT_REQUEST2) {
+ struct ptp_perout_request *perout = &req.perout;
+
+ if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) {
+ err = -EINVAL;
+ break;
+ }
+ /*
+ * The "on" field has undefined meaning if
+ * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
+ * it as reserved, which must be set to zero.
+ */
+ if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
+ (perout->rsv[0] || perout->rsv[1] ||
+ perout->rsv[2] || perout->rsv[3])) {
+ err = -EINVAL;
+ break;
+ }
+ if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
+ /* The duty cycle must be subunitary. */
+ if (perout->on.sec > perout->period.sec ||
+ (perout->on.sec == perout->period.sec &&
+ perout->on.nsec > perout->period.nsec)) {
+ err = -ERANGE;
+ break;
+ }
+ }
+ if (perout->flags & PTP_PEROUT_PHASE) {
+ /*
+ * The phase should be specified modulo the
+ * period, therefore anything equal or larger
+ * than 1 period is invalid.
+ */
+ if (perout->phase.sec > perout->period.sec ||
+ (perout->phase.sec == perout->period.sec &&
+ perout->phase.nsec >= perout->period.nsec)) {
+ err = -ERANGE;
+ break;
+ }
+ }
+ } else if (cmd == PTP_PEROUT_REQUEST) {
+ req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
+ req.perout.rsv[0] = 0;
+ req.perout.rsv[1] = 0;
+ req.perout.rsv[2] = 0;
+ req.perout.rsv[3] = 0;
+ }
+ if (req.perout.index >= ops->n_per_out) {
+ err = -EINVAL;
+ break;
+ }
+ req.type = PTP_CLK_REQ_PEROUT;
+ enable = req.perout.period.sec || req.perout.period.nsec;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
+ case PTP_ENABLE_PPS:
+ case PTP_ENABLE_PPS2:
+ memset(&req, 0, sizeof(req));
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+ req.type = PTP_CLK_REQ_PPS;
+ enable = arg ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
+ case PTP_SYS_OFFSET_PRECISE:
+ case PTP_SYS_OFFSET_PRECISE2:
+ if (!ptp->info->getcrosststamp) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+ err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
+ if (err)
+ break;
+
+ memset(&precise_offset, 0, sizeof(precise_offset));
+ ts = ktime_to_timespec64(xtstamp.device);
+ precise_offset.device.sec = ts.tv_sec;
+ precise_offset.device.nsec = ts.tv_nsec;
+ ts = ktime_to_timespec64(xtstamp.sys_realtime);
+ precise_offset.sys_realtime.sec = ts.tv_sec;
+ precise_offset.sys_realtime.nsec = ts.tv_nsec;
+ ts = ktime_to_timespec64(xtstamp.sys_monoraw);
+ precise_offset.sys_monoraw.sec = ts.tv_sec;
+ precise_offset.sys_monoraw.nsec = ts.tv_nsec;
+ if (copy_to_user((void __user *)arg, &precise_offset,
+ sizeof(precise_offset)))
+ err = -EFAULT;
+ break;
+
+ case PTP_SYS_OFFSET_EXTENDED:
+ case PTP_SYS_OFFSET_EXTENDED2:
+ if (!ptp->info->gettimex64) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+ extoff = memdup_user((void __user *)arg, sizeof(*extoff));
+ if (IS_ERR(extoff)) {
+ err = PTR_ERR(extoff);
+ extoff = NULL;
+ break;
+ }
+ if (extoff->n_samples > PTP_MAX_SAMPLES
+ || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
+ err = -EINVAL;
+ break;
+ }
+ for (i = 0; i < extoff->n_samples; i++) {
+ err = ptp->info->gettimex64(ptp->info, &ts, &sts);
+ if (err)
+ goto out;
+ extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
+ extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
+ extoff->ts[i][1].sec = ts.tv_sec;
+ extoff->ts[i][1].nsec = ts.tv_nsec;
+ extoff->ts[i][2].sec = sts.post_ts.tv_sec;
+ extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
+ }
+ if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
+ err = -EFAULT;
+ break;
+
+ case PTP_SYS_OFFSET:
+ case PTP_SYS_OFFSET2:
+ sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+ if (IS_ERR(sysoff)) {
+ err = PTR_ERR(sysoff);
+ sysoff = NULL;
+ break;
+ }
+ if (sysoff->n_samples > PTP_MAX_SAMPLES) {
+ err = -EINVAL;
+ break;
+ }
+ pct = &sysoff->ts[0];
+ for (i = 0; i < sysoff->n_samples; i++) {
+ ktime_get_real_ts64(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+ if (ops->gettimex64)
+ err = ops->gettimex64(ops, &ts, NULL);
+ else
+ err = ops->gettime64(ops, &ts);
+ if (err)
+ goto out;
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+ }
+ ktime_get_real_ts64(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
+ err = -EFAULT;
+ break;
+
+ case PTP_PIN_GETFUNC:
+ case PTP_PIN_GETFUNC2:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
+ || pd.rsv[3] || pd.rsv[4])
+ && cmd == PTP_PIN_GETFUNC2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_PIN_GETFUNC) {
+ pd.rsv[0] = 0;
+ pd.rsv[1] = 0;
+ pd.rsv[2] = 0;
+ pd.rsv[3] = 0;
+ pd.rsv[4] = 0;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ pd = ops->pin_config[pin_index];
+ mutex_unlock(&ptp->pincfg_mux);
+ if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
+ err = -EFAULT;
+ break;
+
+ case PTP_PIN_SETFUNC:
+ case PTP_PIN_SETFUNC2:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
+ || pd.rsv[3] || pd.rsv[4])
+ && cmd == PTP_PIN_SETFUNC2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_PIN_SETFUNC) {
+ pd.rsv[0] = 0;
+ pd.rsv[1] = 0;
+ pd.rsv[2] = 0;
+ pd.rsv[3] = 0;
+ pd.rsv[4] = 0;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+out:
+ kfree(extoff);
+ kfree(sysoff);
+ return err;
+}
+
+__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+
+ poll_wait(fp, &ptp->tsev_wq, wait);
+
+ return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0;
+}
+
+#define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
+
+ssize_t ptp_read(struct posix_clock *pc,
+ uint rdflags, char __user *buf, size_t cnt)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct timestamp_event_queue *queue = &ptp->tsevq;
+ struct ptp_extts_event *event;
+ unsigned long flags;
+ size_t qcnt, i;
+ int result;
+
+ if (cnt % sizeof(struct ptp_extts_event) != 0)
+ return -EINVAL;
+
+ if (cnt > EXTTS_BUFSIZE)
+ cnt = EXTTS_BUFSIZE;
+
+ cnt = cnt / sizeof(struct ptp_extts_event);
+
+ if (mutex_lock_interruptible(&ptp->tsevq_mux))
+ return -ERESTARTSYS;
+
+ if (wait_event_interruptible(ptp->tsev_wq,
+ ptp->defunct || queue_cnt(queue))) {
+ mutex_unlock(&ptp->tsevq_mux);
+ return -ERESTARTSYS;
+ }
+
+ if (ptp->defunct) {
+ mutex_unlock(&ptp->tsevq_mux);
+ return -ENODEV;
+ }
+
+ event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
+ if (!event) {
+ mutex_unlock(&ptp->tsevq_mux);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&queue->lock, flags);
+
+ qcnt = queue_cnt(queue);
+
+ if (cnt > qcnt)
+ cnt = qcnt;
+
+ for (i = 0; i < cnt; i++) {
+ event[i] = queue->buf[queue->head];
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ cnt = cnt * sizeof(struct ptp_extts_event);
+
+ mutex_unlock(&ptp->tsevq_mux);
+
+ result = cnt;
+ if (copy_to_user(buf, event, cnt))
+ result = -EFAULT;
+
+ kfree(event);
+ return result;
+}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
new file mode 100644
index 000000000..ed766943a
--- /dev/null
+++ b/drivers/ptp/ptp_clock.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP 1588 clock support
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+#include <linux/idr.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/posix-clock.h>
+#include <linux/pps_kernel.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
+
+#include "ptp_private.h"
+
+#define PTP_MAX_ALARMS 4
+#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
+#define PTP_PPS_EVENT PPS_CAPTUREASSERT
+#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
+
+/* private globals */
+
+static dev_t ptp_devt;
+static struct class *ptp_class;
+
+static DEFINE_IDA(ptp_clocks_map);
+
+/* time stamp event queue operations */
+
+static inline int queue_free(struct timestamp_event_queue *q)
+{
+ return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
+}
+
+static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ struct ptp_clock_event *src)
+{
+ struct ptp_extts_event *dst;
+ unsigned long flags;
+ s64 seconds;
+ u32 remainder;
+
+ seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+
+ spin_lock_irqsave(&queue->lock, flags);
+
+ dst = &queue->buf[queue->tail];
+ dst->index = src->index;
+ dst->t.sec = seconds;
+ dst->t.nsec = remainder;
+
+ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ if (!queue_free(queue))
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+
+ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ return (long) ppb;
+}
+EXPORT_SYMBOL(scaled_ppm_to_ppb);
+
+/* posix clock implementation */
+
+static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
+{
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
+}
+
+static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+
+ return ptp->info->settime64(ptp->info, tp);
+}
+
+static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ int err;
+
+ if (ptp->info->gettimex64)
+ err = ptp->info->gettimex64(ptp->info, tp, NULL);
+ else
+ err = ptp->info->gettime64(ptp->info, tp);
+ return err;
+}
+
+static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock_info *ops;
+ int err = -EOPNOTSUPP;
+
+ ops = ptp->info;
+
+ if (tx->modes & ADJ_SETOFFSET) {
+ struct timespec64 ts;
+ ktime_t kt;
+ s64 delta;
+
+ ts.tv_sec = tx->time.tv_sec;
+ ts.tv_nsec = tx->time.tv_usec;
+
+ if (!(tx->modes & ADJ_NANO))
+ ts.tv_nsec *= 1000;
+
+ if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ kt = timespec64_to_ktime(ts);
+ delta = ktime_to_ns(kt);
+ err = ops->adjtime(ops, delta);
+ } else if (tx->modes & ADJ_FREQUENCY) {
+ long ppb = scaled_ppm_to_ppb(tx->freq);
+ if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ return -ERANGE;
+ if (ops->adjfine)
+ err = ops->adjfine(ops, tx->freq);
+ else
+ err = ops->adjfreq(ops, ppb);
+ ptp->dialed_frequency = tx->freq;
+ } else if (tx->modes & ADJ_OFFSET) {
+ if (ops->adjphase) {
+ s32 offset = tx->offset;
+
+ if (!(tx->modes & ADJ_NANO))
+ offset *= NSEC_PER_USEC;
+
+ err = ops->adjphase(ops, offset);
+ }
+ } else if (tx->modes == 0) {
+ tx->freq = ptp->dialed_frequency;
+ err = 0;
+ }
+
+ return err;
+}
+
+static struct posix_clock_operations ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .clock_adjtime = ptp_clock_adjtime,
+ .clock_gettime = ptp_clock_gettime,
+ .clock_getres = ptp_clock_getres,
+ .clock_settime = ptp_clock_settime,
+ .ioctl = ptp_ioctl,
+ .open = ptp_open,
+ .poll = ptp_poll,
+ .read = ptp_read,
+};
+
+static void ptp_clock_release(struct device *dev)
+{
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+
+ ptp_cleanup_pin_groups(ptp);
+ mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, ptp->index);
+ kfree(ptp);
+}
+
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+ struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+ aux_work.work);
+ struct ptp_clock_info *info = ptp->info;
+ long delay;
+
+ delay = info->do_aux_work(info);
+
+ if (delay >= 0)
+ kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
+/* public interface */
+
+struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent)
+{
+ struct ptp_clock *ptp;
+ int err = 0, index, major = MAJOR(ptp_devt);
+
+ if (info->n_alarm > PTP_MAX_ALARMS)
+ return ERR_PTR(-EINVAL);
+
+ /* Initialize a clock structure. */
+ err = -ENOMEM;
+ ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
+ if (ptp == NULL)
+ goto no_memory;
+
+ index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
+ if (index < 0) {
+ err = index;
+ goto no_slot;
+ }
+
+ ptp->clock.ops = ptp_clock_ops;
+ ptp->info = info;
+ ptp->devid = MKDEV(major, index);
+ ptp->index = index;
+ spin_lock_init(&ptp->tsevq.lock);
+ mutex_init(&ptp->tsevq_mux);
+ mutex_init(&ptp->pincfg_mux);
+ init_waitqueue_head(&ptp->tsev_wq);
+
+ if (ptp->info->do_aux_work) {
+ kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+ if (IS_ERR(ptp->kworker)) {
+ err = PTR_ERR(ptp->kworker);
+ pr_err("failed to create ptp aux_worker %d\n", err);
+ goto kworker_err;
+ }
+ }
+
+ err = ptp_populate_pin_groups(ptp);
+ if (err)
+ goto no_pin_groups;
+
+ /* Register a new PPS source. */
+ if (info->pps) {
+ struct pps_source_info pps;
+ memset(&pps, 0, sizeof(pps));
+ snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
+ pps.mode = PTP_PPS_MODE;
+ pps.owner = info->owner;
+ ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
+ if (IS_ERR(ptp->pps_source)) {
+ err = PTR_ERR(ptp->pps_source);
+ pr_err("failed to register pps source\n");
+ goto no_pps;
+ }
+ }
+
+ /* Initialize a new device of our class in our clock structure. */
+ device_initialize(&ptp->dev);
+ ptp->dev.devt = ptp->devid;
+ ptp->dev.class = ptp_class;
+ ptp->dev.parent = parent;
+ ptp->dev.groups = ptp->pin_attr_groups;
+ ptp->dev.release = ptp_clock_release;
+ dev_set_drvdata(&ptp->dev, ptp);
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+ /* Create a posix clock and link it to the device. */
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
+ if (err) {
+ pr_err("failed to create posix clock\n");
+ goto no_clock;
+ }
+
+ return ptp;
+
+no_clock:
+ if (ptp->pps_source)
+ pps_unregister_source(ptp->pps_source);
+no_pps:
+ ptp_cleanup_pin_groups(ptp);
+no_pin_groups:
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+kworker_err:
+ mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, index);
+no_slot:
+ kfree(ptp);
+no_memory:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ptp_clock_register);
+
+int ptp_clock_unregister(struct ptp_clock *ptp)
+{
+ ptp->defunct = 1;
+ wake_up_interruptible(&ptp->tsev_wq);
+
+ if (ptp->kworker) {
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+ kthread_destroy_worker(ptp->kworker);
+ }
+
+ /* Release the clock's resources. */
+ if (ptp->pps_source)
+ pps_unregister_source(ptp->pps_source);
+
+ posix_clock_unregister(&ptp->clock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptp_clock_unregister);
+
+void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
+{
+ struct pps_event_time evt;
+
+ switch (event->type) {
+
+ case PTP_CLOCK_ALARM:
+ break;
+
+ case PTP_CLOCK_EXTTS:
+ enqueue_external_timestamp(&ptp->tsevq, event);
+ wake_up_interruptible(&ptp->tsev_wq);
+ break;
+
+ case PTP_CLOCK_PPS:
+ pps_get_ts(&evt);
+ pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
+ break;
+
+ case PTP_CLOCK_PPSUSR:
+ pps_event(ptp->pps_source, &event->pps_times,
+ PTP_PPS_EVENT, NULL);
+ break;
+ }
+}
+EXPORT_SYMBOL(ptp_clock_event);
+
+int ptp_clock_index(struct ptp_clock *ptp)
+{
+ return ptp->index;
+}
+EXPORT_SYMBOL(ptp_clock_index);
+
+int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_pin_desc *pin = NULL;
+ int i;
+
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (ptp->info->pin_config[i].func == func &&
+ ptp->info->pin_config[i].chan == chan) {
+ pin = &ptp->info->pin_config[i];
+ break;
+ }
+ }
+
+ return pin ? i : -1;
+}
+EXPORT_SYMBOL(ptp_find_pin);
+
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int result;
+
+ mutex_lock(&ptp->pincfg_mux);
+
+ result = ptp_find_pin(ptp, func, chan);
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return result;
+}
+EXPORT_SYMBOL(ptp_find_pin_unlocked);
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+ return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
+void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+}
+EXPORT_SYMBOL(ptp_cancel_worker_sync);
+
+/* module operations */
+
+static void __exit ptp_exit(void)
+{
+ class_destroy(ptp_class);
+ unregister_chrdev_region(ptp_devt, MINORMASK + 1);
+ ida_destroy(&ptp_clocks_map);
+}
+
+static int __init ptp_init(void)
+{
+ int err;
+
+ ptp_class = class_create(THIS_MODULE, "ptp");
+ if (IS_ERR(ptp_class)) {
+ pr_err("ptp: failed to allocate class\n");
+ return PTR_ERR(ptp_class);
+ }
+
+ err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
+ if (err < 0) {
+ pr_err("ptp: failed to allocate device region\n");
+ goto no_region;
+ }
+
+ ptp_class->dev_groups = ptp_groups;
+ pr_info("PTP clock support registered\n");
+ return 0;
+
+no_region:
+ class_destroy(ptp_class);
+ return err;
+}
+
+subsys_initcall(ptp_init);
+module_exit(ptp_exit);
+
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_DESCRIPTION("PTP clocks support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
new file mode 100644
index 000000000..663255774
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -0,0 +1,2230 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/string.h>
+
+#include "ptp_private.h"
+#include "ptp_clockmatrix.h"
+
+MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
+#define SETTIME_CORRECTION (0)
+
+static long set_write_phase_ready(struct ptp_clock_info *ptp)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ channel->write_phase_ready = 1;
+
+ return 0;
+}
+
+static int char_array_to_timespec(u8 *buf,
+ u8 count,
+ struct timespec64 *ts)
+{
+ u8 i;
+ u64 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ nsec = buf[4];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[3 - i];
+ }
+
+ sec = buf[10];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[9 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
+static int timespec_to_char_array(struct timespec64 const *ts,
+ u8 *buf,
+ u8 count)
+{
+ u8 i;
+ s32 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ buf[0] = 0;
+ for (i = 1; i < 5; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 5; i < TOD_BYTE_COUNT; i++) {
+
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+
+ return 0;
+}
+
+static int idtcm_strverscmp(const char *version1, const char *version2)
+{
+ u8 ver1[3], ver2[3];
+ int i;
+
+ if (sscanf(version1, "%hhu.%hhu.%hhu",
+ &ver1[0], &ver1[1], &ver1[2]) != 3)
+ return -1;
+ if (sscanf(version2, "%hhu.%hhu.%hhu",
+ &ver2[0], &ver2[1], &ver2[2]) != 3)
+ return -1;
+
+ for (i = 0; i < 3; i++) {
+ if (ver1[i] > ver2[i])
+ return 1;
+ if (ver1[i] < ver2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer_read(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idtcm->client;
+ struct i2c_msg msg[2];
+ int cnt;
+ char *fmt = "i2c_transfer failed at %d in %s, at addr: %04X!\n";
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+
+ if (cnt < 0) {
+ dev_err(&client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ regaddr);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer_write(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idtcm->client;
+ /* we add 1 byte for device register */
+ u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
+ int cnt;
+ char *fmt = "i2c_master_send failed at %d in %s, at addr: %04X!\n";
+
+ if (count > IDTCM_MAX_WRITE_COUNT)
+ return -EINVAL;
+
+ msg[0] = regaddr;
+ memcpy(&msg[1], buf, count);
+
+ cnt = i2c_master_send(client, msg, count + 1);
+
+ if (cnt < 0) {
+ dev_err(&client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ regaddr);
+ return cnt;
+ }
+
+ return 0;
+}
+
+static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
+{
+ u8 buf[4];
+ int err;
+
+ if (idtcm->page_offset == val)
+ return 0;
+
+ buf[0] = 0x0;
+ buf[1] = val;
+ buf[2] = 0x10;
+ buf[3] = 0x20;
+
+ err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
+
+ if (err) {
+ idtcm->page_offset = 0xff;
+ dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ } else {
+ idtcm->page_offset = val;
+ }
+
+ return err;
+}
+
+static int _idtcm_rdwr(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ u8 hi;
+ u8 lo;
+ int err;
+
+ hi = (regaddr >> 8) & 0xff;
+ lo = regaddr & 0xff;
+
+ err = idtcm_page_offset(idtcm, hi);
+
+ if (err)
+ return err;
+
+ if (write)
+ return idtcm_xfer_write(idtcm, lo, buf, count);
+
+ return idtcm_xfer_read(idtcm, lo, buf, count);
+}
+
+static int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
+}
+
+static int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+}
+
+static int _idtcm_gettime(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 timeout = 10;
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
+ trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ /* wait trigger to be 0 */
+ while (trigger & TOD_READ_TRIGGER_MASK) {
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger,
+ sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (--timeout == 0)
+ return -EIO;
+ }
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = char_array_to_timespec(buf, sizeof(buf), ts);
+
+ return err;
+}
+
+static int _sync_pll_output(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 sync_ctrl0;
+ u16 sync_ctrl1;
+ u8 temp;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
+ break;
+ case 1:
+ sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
+ break;
+ case 2:
+ sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
+ break;
+ case 3:
+ sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
+ break;
+ case 4:
+ sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
+ break;
+ case 5:
+ sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
+ break;
+ case 6:
+ sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
+ break;
+ case 7:
+ sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = SYNCTRL1_MASTER_SYNC_RST;
+
+ /* Place master sync in reset */
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
+ if (err)
+ return err;
+
+ /* Set sync trigger mask */
+ val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
+
+ if (qn)
+ val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
+
+ if (qn_plus_1)
+ val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ /* PLL5 can have OUT8 as second additional output. */
+ if ((pll == 5) && (qn_plus_1 != 0)) {
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp &= ~(Q9_TO_Q8_SYNC_TRIG);
+
+ err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp |= Q9_TO_Q8_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+ }
+
+ /* PLL6 can have OUT11 as second additional output. */
+ if ((pll == 6) && (qn_plus_1 != 0)) {
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp &= ~(Q10_TO_Q11_SYNC_TRIG);
+
+ err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp |= Q10_TO_Q11_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+ }
+
+ /* Place master sync out of reset */
+ val &= ~(SYNCTRL1_MASTER_SYNC_RST);
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+
+ return err;
+}
+
+static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
+{
+ int err = 0;
+
+ switch (tod_addr) {
+ case TOD_0:
+ *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case TOD_1:
+ *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case TOD_2:
+ *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case TOD_3:
+ *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int idtcm_sync_pps_output(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ int err = 0;
+ u8 out8_mux = 0;
+ u8 out11_mux = 0;
+ u8 temp;
+
+ u16 output_mask = channel->output_mask;
+
+ err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
+ if (err)
+ return err;
+
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out8_mux = 1;
+
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out11_mux = 1;
+
+ for (pll = 0; pll < 8; pll++) {
+ qn = 0;
+ qn_plus_1 = 0;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 4) {
+ if (out8_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 5) {
+ if (out8_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 6) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ if (out11_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 7) {
+ if (out11_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _sync_pll_output(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[TOD_BYTE_COUNT];
+ u8 cmd;
+ int err;
+ struct timespec64 local_ts = *ts;
+ s64 total_overhead_ns;
+
+ /* Configure HW TOD write trigger. */
+ err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ cmd &= ~(0x0f);
+ cmd |= wr_trig | 0x08;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+ }
+
+ /* ARM HW TOD write trigger. */
+ cmd &= ~(0x08);
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
+
+ if (idtcm->calculate_overhead_flag) {
+ /* Assumption: I2C @ 400KHz */
+ total_overhead_ns = ktime_to_ns(ktime_get_raw()
+ - idtcm->start_time)
+ + idtcm->tod_write_overhead_ns
+ + SETTIME_CORRECTION;
+
+ timespec64_add_ns(&local_ts, total_overhead_ns);
+
+ idtcm->calculate_overhead_flag = 0;
+ }
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum scsr_tod_write_trig_sel wr_trig,
+ enum scsr_tod_write_type_sel wr_type)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ unsigned char buf[TOD_BYTE_COUNT], cmd;
+ struct timespec64 local_ts = *ts;
+ int err, count = 0;
+
+ timespec64_add_ns(&local_ts, SETTIME_CORRECTION);
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE,
+ buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* Trigger the write operation. */
+ err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ cmd &= ~(TOD_WRITE_SELECTION_MASK << TOD_WRITE_SELECTION_SHIFT);
+ cmd &= ~(TOD_WRITE_TYPE_MASK << TOD_WRITE_TYPE_SHIFT);
+ cmd |= (wr_trig << TOD_WRITE_SELECTION_SHIFT);
+ cmd |= (wr_type << TOD_WRITE_TYPE_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ while (1) {
+ /* pps trigger takes up to 1 sec to complete */
+ if (wr_trig == SCSR_TOD_WR_TRIG_SEL_TODPPS)
+ msleep(50);
+
+ err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd == 0)
+ break;
+
+ if (++count > 20) {
+ dev_err(&idtcm->client->dev,
+ "Timed out waiting for the write counter\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ int i;
+ u8 trig_sel;
+
+ err = _idtcm_set_dpll_hw_tod(channel, ts, wr_trig);
+
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ for (i = 0; i < 10000; i++) {
+ err = idtcm_read(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_CTRL_1, &trig_sel,
+ sizeof(trig_sel));
+
+ if (err)
+ return err;
+
+ if (trig_sel == 0x4a)
+ break;
+
+ err = 1;
+ }
+
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ return idtcm_sync_pps_output(channel);
+}
+
+static int _idtcm_settime_v487(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum scsr_tod_write_type_sel wr_type)
+{
+ return _idtcm_set_dpll_scsr_tod(channel, ts,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE,
+ wr_type);
+}
+
+static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
+ s32 offset_ns)
+{
+ int err;
+ int i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[4];
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = 0xff & (offset_ns);
+ offset_ns >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
+ u32 max_ffo_ppb)
+{
+ int err;
+ u8 i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[3];
+
+ if (max_ffo_ppb & 0xff000000)
+ max_ffo_ppb = 0;
+
+ for (i = 0; i < 3; i++) {
+ buf[i] = 0xff & (max_ffo_ppb);
+ max_ffo_ppb >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf;
+
+ err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
+ &buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ if (buf == 0) {
+ buf = 0x01;
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_CTRL, &buf, sizeof(buf));
+ } else {
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
+{
+ int err;
+
+ err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
+
+ if (err)
+ return err;
+
+ err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
+
+ if (err)
+ return err;
+
+ err = idtcm_start_phase_pull_in(channel);
+
+ return err;
+}
+
+static int set_tod_write_overhead(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT] = {0};
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ current_ns = ktime_to_ns(stop - start);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtcm->tod_write_overhead_ns = lowest_ns;
+
+ return err;
+}
+
+static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ s64 now;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ } else {
+ idtcm->calculate_overhead_flag = 1;
+
+ err = set_tod_write_overhead(channel);
+
+ if (err)
+ return err;
+
+ err = _idtcm_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now = timespec64_to_ns(&ts);
+ now += delta;
+
+ ts = ns_to_timespec64(now);
+
+ err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ }
+
+ return err;
+}
+
+static int idtcm_state_machine_reset(struct idtcm *idtcm)
+{
+ int err;
+ u8 byte = SM_RESET_CMD;
+
+ err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+
+ if (!err)
+ msleep_interruptible(POST_SM_RESET_DELAY_MS);
+
+ return err;
+}
+
+static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
+{
+ return idtcm_read(idtcm, HW_REVISION, REV_ID, hw_rev_id, sizeof(u8));
+}
+
+static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
+
+ *product_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
+{
+ int err;
+ u8 buf = 0;
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
+
+ *major = buf >> 1;
+
+ return err;
+}
+
+static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
+}
+
+static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HOTFIX_REL,
+ hotfix,
+ sizeof(u8));
+}
+
+static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
+ u8 *config_select)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, OTP_SCSR_CONFIG_SELECT,
+ config_select, sizeof(u8));
+}
+
+static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
+{
+ int err = 0;
+
+ switch (addr) {
+ case TOD0_OUT_ALIGN_MASK_ADDR:
+ SET_U16_LSB(idtcm->channel[0].output_mask, val);
+ break;
+ case TOD0_OUT_ALIGN_MASK_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[0].output_mask, val);
+ break;
+ case TOD1_OUT_ALIGN_MASK_ADDR:
+ SET_U16_LSB(idtcm->channel[1].output_mask, val);
+ break;
+ case TOD1_OUT_ALIGN_MASK_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[1].output_mask, val);
+ break;
+ case TOD2_OUT_ALIGN_MASK_ADDR:
+ SET_U16_LSB(idtcm->channel[2].output_mask, val);
+ break;
+ case TOD2_OUT_ALIGN_MASK_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[2].output_mask, val);
+ break;
+ case TOD3_OUT_ALIGN_MASK_ADDR:
+ SET_U16_LSB(idtcm->channel[3].output_mask, val);
+ break;
+ case TOD3_OUT_ALIGN_MASK_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[3].output_mask, val);
+ break;
+ default:
+ err = -EFAULT; /* Bad address */;
+ break;
+ }
+
+ return err;
+}
+
+static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
+{
+ if (index >= MAX_TOD) {
+ dev_err(&idtcm->client->dev, "ToD%d not supported\n", index);
+ return -EINVAL;
+ }
+
+ if (pll >= MAX_PLL) {
+ dev_err(&idtcm->client->dev, "Pll%d not supported\n", pll);
+ return -EINVAL;
+ }
+
+ idtcm->channel[index].pll = pll;
+
+ return 0;
+}
+
+static int check_and_set_masks(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 val)
+{
+ int err = 0;
+
+ switch (regaddr) {
+ case TOD_MASK_ADDR:
+ if ((val & 0xf0) || !(val & 0x0f)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid TOD mask 0x%hhx\n", val);
+ err = -EINVAL;
+ } else {
+ idtcm->tod_mask = val;
+ }
+ break;
+ case TOD0_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 0, val);
+ break;
+ case TOD1_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 1, val);
+ break;
+ case TOD2_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 2, val);
+ break;
+ case TOD3_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 3, val);
+ break;
+ default:
+ err = set_pll_output_mask(idtcm, regaddr, val);
+ break;
+ }
+
+ return err;
+}
+
+static void display_pll_and_masks(struct idtcm *idtcm)
+{
+ u8 i;
+ u8 mask;
+
+ dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x\n", idtcm->tod_mask);
+
+ for (i = 0; i < MAX_TOD; i++) {
+ mask = 1 << i;
+
+ if (mask & idtcm->tod_mask)
+ dev_dbg(&idtcm->client->dev,
+ "TOD%d pll = %d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].pll,
+ idtcm->channel[i].output_mask);
+ }
+}
+
+static int idtcm_load_firmware(struct idtcm *idtcm,
+ struct device *dev)
+{
+ char fname[128] = FW_FILENAME;
+ const struct firmware *fw;
+ struct idtcm_fwrc *rec;
+ u32 regaddr;
+ int err;
+ s32 len;
+ u8 val;
+ u8 loaddr;
+
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
+
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, dev);
+
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtcm_fwrc *) fw->data;
+
+ if (fw->size > 0)
+ idtcm_state_machine_reset(idtcm);
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idtcm->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
+
+ val = rec->value;
+ loaddr = rec->loaddr;
+
+ rec++;
+
+ err = check_and_set_masks(idtcm, regaddr, val);
+ }
+
+ if (err != -EINVAL) {
+ err = 0;
+
+ /* Top (status registers) and bottom are read-only */
+ if ((regaddr < GPIO_USER_CONTROL)
+ || (regaddr >= SCRATCH))
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || loaddr > 0xfb)
+ continue;
+
+ err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ display_pll_and_masks(idtcm);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtcm_output_enable(struct idtcm_channel *channel,
+ bool enable, unsigned int outn)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 val;
+
+ err = idtcm_read(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
+ OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ if (enable)
+ val |= SQUELCH_DISABLE;
+ else
+ val &= ~SQUELCH_DISABLE;
+
+ return idtcm_write(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
+ OUT_CTRL_1, &val, sizeof(val));
+}
+
+static int idtcm_output_mask_enable(struct idtcm_channel *channel,
+ bool enable)
+{
+ u16 mask;
+ int err;
+ u8 outn;
+
+ mask = channel->output_mask;
+ outn = 0;
+
+ while (mask) {
+
+ if (mask & 0x1) {
+
+ err = idtcm_output_enable(channel, enable, outn);
+
+ if (err)
+ return err;
+ }
+
+ mask >>= 0x1;
+ outn++;
+ }
+
+ return 0;
+}
+
+static int idtcm_perout_enable(struct idtcm_channel *channel,
+ bool enable,
+ struct ptp_perout_request *perout)
+{
+ unsigned int flags = perout->flags;
+
+ if (flags == PEROUT_ENABLE_OUTPUT_MASK)
+ return idtcm_output_mask_enable(channel, enable);
+
+ /* Enable/disable individual output instead */
+ return idtcm_output_enable(channel, enable, perout->index);
+}
+
+static int idtcm_set_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+
+ channel->pll_mode = pll_mode;
+
+ err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* PTP Hardware Clock interface */
+
+/**
+ * @brief Maximum absolute value for write phase offset in picoseconds
+ *
+ * Destination signed register is 32-bit register in resolution of 50ps
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+ u8 i;
+ u8 buf[4] = {0};
+ s32 phase_50ps;
+ s64 offset_ps;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+ if (err)
+ return err;
+
+ channel->write_phase_ready = 0;
+
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(WR_PHASE_SETUP_MS));
+ }
+
+ if (!channel->write_phase_ready)
+ delta_ns = 0;
+
+ offset_ps = (s64)delta_ns * 1000;
+
+ /*
+ * Check for 32-bit signed max * 50:
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+ if (offset_ps > MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = MAX_ABS_WRITE_PHASE_PICOSECONDS;
+ else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS;
+
+ phase_50ps = DIV_ROUND_CLOSEST(div64_s64(offset_ps, 50), 1);
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = phase_50ps & 0xff;
+ phase_50ps >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 i;
+ bool neg_adj = 0;
+ int err;
+ u8 buf[6] = {0};
+ s64 fcw;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Frequency Control Word unit is: 1.11 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 111
+ *
+ * adjfine:
+ * ppm_16 * 5^12
+ * FCW = -------------
+ * 111 * 2^4
+ */
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ fcw = scaled_ppm * 244140625ULL;
+
+ fcw = div_u64(fcw, 1776);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 6; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_gettime(channel, ts);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime_v487(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime_v487(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjtime(channel, delta);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ enum scsr_tod_write_type_sel type;
+ int err;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_V487) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ if (delta >= 0) {
+ ts = ns_to_timespec64(delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ } else {
+ ts = ns_to_timespec64(-delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime_v487(channel, &ts, type);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjphase(channel, delta);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjfine(channel, scaled_ppm);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ int err;
+
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on) {
+ err = idtcm_perout_enable(channel, false, &rq->perout);
+ if (err)
+ dev_err(&channel->idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ /* Only accept a 1-PPS aligned to the second. */
+ if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ return -ERANGE;
+
+ err = idtcm_perout_enable(channel, true, &rq->perout);
+ if (err)
+ dev_err(&channel->idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int _enable_pll_tod_sync(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 dpll;
+ u16 out0 = 0, out1 = 0;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ dpll = DPLL_0;
+ if (qn)
+ out0 = OUTPUT_0;
+ if (qn_plus_1)
+ out1 = OUTPUT_1;
+ break;
+ case 1:
+ dpll = DPLL_1;
+ if (qn)
+ out0 = OUTPUT_2;
+ if (qn_plus_1)
+ out1 = OUTPUT_3;
+ break;
+ case 2:
+ dpll = DPLL_2;
+ if (qn)
+ out0 = OUTPUT_4;
+ if (qn_plus_1)
+ out1 = OUTPUT_5;
+ break;
+ case 3:
+ dpll = DPLL_3;
+ if (qn)
+ out0 = OUTPUT_6;
+ if (qn_plus_1)
+ out1 = OUTPUT_7;
+ break;
+ case 4:
+ dpll = DPLL_4;
+ if (qn)
+ out0 = OUTPUT_8;
+ break;
+ case 5:
+ dpll = DPLL_5;
+ if (qn)
+ out0 = OUTPUT_9;
+ if (qn_plus_1)
+ out1 = OUTPUT_8;
+ break;
+ case 6:
+ dpll = DPLL_6;
+ if (qn)
+ out0 = OUTPUT_10;
+ if (qn_plus_1)
+ out1 = OUTPUT_11;
+ break;
+ case 7:
+ dpll = DPLL_7;
+ if (qn)
+ out0 = OUTPUT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Enable OUTPUT OUT_SYNC.
+ */
+ if (out0) {
+ err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ val &= ~OUT_SYNC_DISABLE;
+
+ err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+
+ if (out1) {
+ err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ val &= ~OUT_SYNC_DISABLE;
+
+ err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+
+ /* enable dpll sync tod pps, must be set before dpll_mode */
+ err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT);
+ val |= (sync_src << TOD_SYNC_SOURCE_SHIFT);
+ val |= TOD_SYNC_EN;
+
+ return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
+}
+
+static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ u8 cfg;
+ int err = 0;
+ u16 output_mask = channel->output_mask;
+ u8 out8_mux = 0;
+ u8 out11_mux = 0;
+ u8 temp;
+
+ /*
+ * set tod_out_sync_enable to 0.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg &= ~TOD_OUT_SYNC_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ switch (channel->tod_n) {
+ case TOD_0:
+ sync_src = 0;
+ break;
+ case TOD_1:
+ sync_src = 1;
+ break;
+ case TOD_2:
+ sync_src = 2;
+ break;
+ case TOD_3:
+ sync_src = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out8_mux = 1;
+
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out11_mux = 1;
+
+ for (pll = 0; pll < 8; pll++) {
+ qn = 0;
+ qn_plus_1 = 0;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 4) {
+ if (out8_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 5) {
+ if (out8_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 6) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ if (out11_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 7) {
+ if (out11_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
+ /*
+ * Start the TOD clock ticking.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg |= TOD_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+}
+
+static void idtcm_display_version_info(struct idtcm *idtcm)
+{
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u16 product_id;
+ u8 hw_rev_id;
+ u8 config_select;
+ char *fmt = "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d\n";
+
+ idtcm_read_major_release(idtcm, &major);
+ idtcm_read_minor_release(idtcm, &minor);
+ idtcm_read_hotfix_release(idtcm, &hotfix);
+
+ idtcm_read_product_id(idtcm, &product_id);
+ idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
+
+ idtcm_read_otp_scsr_config_select(idtcm, &config_select);
+
+ snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
+ major, minor, hotfix);
+
+ dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
+ product_id, hw_rev_id, config_select);
+}
+
+static const struct ptp_clock_info idtcm_caps_v487 = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 12,
+ .adjphase = &idtcm_adjphase,
+ .adjfine = &idtcm_adjfine,
+ .adjtime = &idtcm_adjtime_v487,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime_v487,
+ .enable = &idtcm_enable,
+ .do_aux_work = &set_write_phase_ready,
+};
+
+static const struct ptp_clock_info idtcm_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 12,
+ .adjphase = &idtcm_adjphase,
+ .adjfine = &idtcm_adjfine,
+ .adjtime = &idtcm_adjtime,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime,
+ .enable = &idtcm_enable,
+ .do_aux_work = &set_write_phase_ready,
+};
+
+static int configure_channel_pll(struct idtcm_channel *channel)
+{
+ int err = 0;
+
+ switch (channel->pll) {
+ case 0:
+ channel->dpll_freq = DPLL_FREQ_0;
+ channel->dpll_n = DPLL_0;
+ channel->hw_dpll_n = HW_DPLL_0;
+ channel->dpll_phase = DPLL_PHASE_0;
+ channel->dpll_ctrl_n = DPLL_CTRL_0;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
+ break;
+ case 1:
+ channel->dpll_freq = DPLL_FREQ_1;
+ channel->dpll_n = DPLL_1;
+ channel->hw_dpll_n = HW_DPLL_1;
+ channel->dpll_phase = DPLL_PHASE_1;
+ channel->dpll_ctrl_n = DPLL_CTRL_1;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
+ break;
+ case 2:
+ channel->dpll_freq = DPLL_FREQ_2;
+ channel->dpll_n = DPLL_2;
+ channel->hw_dpll_n = HW_DPLL_2;
+ channel->dpll_phase = DPLL_PHASE_2;
+ channel->dpll_ctrl_n = DPLL_CTRL_2;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
+ break;
+ case 3:
+ channel->dpll_freq = DPLL_FREQ_3;
+ channel->dpll_n = DPLL_3;
+ channel->hw_dpll_n = HW_DPLL_3;
+ channel->dpll_phase = DPLL_PHASE_3;
+ channel->dpll_ctrl_n = DPLL_CTRL_3;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
+ break;
+ case 4:
+ channel->dpll_freq = DPLL_FREQ_4;
+ channel->dpll_n = DPLL_4;
+ channel->hw_dpll_n = HW_DPLL_4;
+ channel->dpll_phase = DPLL_PHASE_4;
+ channel->dpll_ctrl_n = DPLL_CTRL_4;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_4;
+ break;
+ case 5:
+ channel->dpll_freq = DPLL_FREQ_5;
+ channel->dpll_n = DPLL_5;
+ channel->hw_dpll_n = HW_DPLL_5;
+ channel->dpll_phase = DPLL_PHASE_5;
+ channel->dpll_ctrl_n = DPLL_CTRL_5;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_5;
+ break;
+ case 6:
+ channel->dpll_freq = DPLL_FREQ_6;
+ channel->dpll_n = DPLL_6;
+ channel->hw_dpll_n = HW_DPLL_6;
+ channel->dpll_phase = DPLL_PHASE_6;
+ channel->dpll_ctrl_n = DPLL_CTRL_6;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_6;
+ break;
+ case 7:
+ channel->dpll_freq = DPLL_FREQ_7;
+ channel->dpll_n = DPLL_7;
+ channel->hw_dpll_n = HW_DPLL_7;
+ channel->dpll_phase = DPLL_PHASE_7;
+ channel->dpll_ctrl_n = DPLL_CTRL_7;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_7;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ /* Set pll addresses */
+ err = configure_channel_pll(channel);
+ if (err)
+ return err;
+
+ /* Set tod addresses */
+ switch (index) {
+ case 0:
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ break;
+ case 1:
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ break;
+ case 2:
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ break;
+ case 3:
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ channel->idtcm = idtcm;
+
+ if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
+ channel->caps = idtcm_caps_v487;
+ else
+ channel->caps = idtcm_caps;
+
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT CM TOD%u", index);
+
+ if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) {
+ err = idtcm_enable_tod_sync(channel);
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+ }
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ err = idtcm_enable_tod(channel);
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ channel->write_phase_ready = 0;
+
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static void ptp_clock_unregister_all(struct idtcm *idtcm)
+{
+ u8 i;
+ struct idtcm_channel *channel;
+
+ for (i = 0; i < MAX_TOD; i++) {
+
+ channel = &idtcm->channel[i];
+
+ if (channel->ptp_clock)
+ ptp_clock_unregister(channel->ptp_clock);
+ }
+}
+
+static void set_default_masks(struct idtcm *idtcm)
+{
+ idtcm->tod_mask = DEFAULT_TOD_MASK;
+
+ idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
+ idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
+ idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL;
+ idtcm->channel[3].pll = DEFAULT_TOD3_PTP_PLL;
+
+ idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
+ idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
+}
+
+static int idtcm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idtcm *idtcm;
+ int err;
+ u8 i;
+ char *fmt = "Failed at %d in line %s with channel output %d!\n";
+
+ /* Unused for now */
+ (void)id;
+
+ idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+
+ if (!idtcm)
+ return -ENOMEM;
+
+ idtcm->client = client;
+ idtcm->page_offset = 0xff;
+ idtcm->calculate_overhead_flag = 0;
+
+ set_default_masks(idtcm);
+
+ mutex_init(&idtcm->reg_lock);
+ mutex_lock(&idtcm->reg_lock);
+
+ idtcm_display_version_info(idtcm);
+
+ err = idtcm_load_firmware(idtcm, &client->dev);
+
+ if (err)
+ dev_warn(&idtcm->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idtcm->tod_mask) {
+ for (i = 0; i < MAX_TOD; i++) {
+ if (idtcm->tod_mask & (1 << i)) {
+ err = idtcm_enable_channel(idtcm, i);
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ i);
+ break;
+ }
+ }
+ }
+ } else {
+ dev_err(&idtcm->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ if (err) {
+ ptp_clock_unregister_all(idtcm);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idtcm);
+
+ return 0;
+}
+
+static int idtcm_remove(struct i2c_client *client)
+{
+ struct idtcm *idtcm = i2c_get_clientdata(client);
+
+ ptp_clock_unregister_all(idtcm);
+
+ mutex_destroy(&idtcm->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idtcm_dt_id[] = {
+ { .compatible = "idt,8a34000" },
+ { .compatible = "idt,8a34001" },
+ { .compatible = "idt,8a34002" },
+ { .compatible = "idt,8a34003" },
+ { .compatible = "idt,8a34004" },
+ { .compatible = "idt,8a34005" },
+ { .compatible = "idt,8a34006" },
+ { .compatible = "idt,8a34007" },
+ { .compatible = "idt,8a34008" },
+ { .compatible = "idt,8a34009" },
+ { .compatible = "idt,8a34010" },
+ { .compatible = "idt,8a34011" },
+ { .compatible = "idt,8a34012" },
+ { .compatible = "idt,8a34013" },
+ { .compatible = "idt,8a34014" },
+ { .compatible = "idt,8a34015" },
+ { .compatible = "idt,8a34016" },
+ { .compatible = "idt,8a34017" },
+ { .compatible = "idt,8a34018" },
+ { .compatible = "idt,8a34019" },
+ { .compatible = "idt,8a34040" },
+ { .compatible = "idt,8a34041" },
+ { .compatible = "idt,8a34042" },
+ { .compatible = "idt,8a34043" },
+ { .compatible = "idt,8a34044" },
+ { .compatible = "idt,8a34045" },
+ { .compatible = "idt,8a34046" },
+ { .compatible = "idt,8a34047" },
+ { .compatible = "idt,8a34048" },
+ { .compatible = "idt,8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idtcm_dt_id);
+#endif
+
+static const struct i2c_device_id idtcm_i2c_id[] = {
+ { "8a34000" },
+ { "8a34001" },
+ { "8a34002" },
+ { "8a34003" },
+ { "8a34004" },
+ { "8a34005" },
+ { "8a34006" },
+ { "8a34007" },
+ { "8a34008" },
+ { "8a34009" },
+ { "8a34010" },
+ { "8a34011" },
+ { "8a34012" },
+ { "8a34013" },
+ { "8a34014" },
+ { "8a34015" },
+ { "8a34016" },
+ { "8a34017" },
+ { "8a34018" },
+ { "8a34019" },
+ { "8a34040" },
+ { "8a34041" },
+ { "8a34042" },
+ { "8a34043" },
+ { "8a34044" },
+ { "8a34045" },
+ { "8a34046" },
+ { "8a34047" },
+ { "8a34048" },
+ { "8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
+
+static struct i2c_driver idtcm_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idtcm_dt_id),
+ .name = "idtcm",
+ },
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
+ .id_table = idtcm_i2c_id,
+};
+
+module_i2c_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
new file mode 100644
index 000000000..82840d723
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTCLOCKMATRIX_H
+#define PTP_IDTCLOCKMATRIX_H
+
+#include <linux/ktime.h>
+
+#include "idt8a340_reg.h"
+
+#define FW_FILENAME "idtcm.bin"
+#define MAX_TOD (4)
+#define MAX_PLL (8)
+
+#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
+
+#define TOD_MASK_ADDR (0xFFA5)
+#define DEFAULT_TOD_MASK (0x04)
+
+#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
+#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
+
+#define TOD0_PTP_PLL_ADDR (0xFFA8)
+#define TOD1_PTP_PLL_ADDR (0xFFA9)
+#define TOD2_PTP_PLL_ADDR (0xFFAA)
+#define TOD3_PTP_PLL_ADDR (0xFFAB)
+
+#define TOD0_OUT_ALIGN_MASK_ADDR (0xFFB0)
+#define TOD1_OUT_ALIGN_MASK_ADDR (0xFFB2)
+#define TOD2_OUT_ALIGN_MASK_ADDR (0xFFB4)
+#define TOD3_OUT_ALIGN_MASK_ADDR (0xFFB6)
+
+#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
+#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
+#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
+#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+
+#define DEFAULT_TOD0_PTP_PLL (0)
+#define DEFAULT_TOD1_PTP_PLL (1)
+#define DEFAULT_TOD2_PTP_PLL (2)
+#define DEFAULT_TOD3_PTP_PLL (3)
+
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS (150000)
+#define PHASE_PULL_IN_THRESHOLD_NS_V487 (15000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
+#define TOD_BYTE_COUNT (11)
+#define WR_PHASE_SETUP_MS (5000)
+
+#define OUTPUT_MODULE_FROM_INDEX(index) (OUTPUT_0 + (index) * 0x10)
+
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+
+#define IDTCM_MAX_WRITE_COUNT (512)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+
+struct idtcm;
+
+struct idtcm_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idtcm *idtcm;
+ u16 dpll_phase;
+ u16 dpll_freq;
+ u16 dpll_n;
+ u16 dpll_ctrl_n;
+ u16 dpll_phase_pull_in;
+ u16 tod_read_primary;
+ u16 tod_write;
+ u16 tod_n;
+ u16 hw_dpll_n;
+ enum pll_mode pll_mode;
+ u8 pll;
+ u16 output_mask;
+ int write_phase_ready;
+};
+
+struct idtcm {
+ struct idtcm_channel channel[MAX_TOD];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 tod_mask;
+ char version[16];
+
+ /* Overhead calculation for adjtime */
+ u8 calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ ktime_t start_time;
+
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+struct idtcm_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+#endif /* PTP_IDTCLOCKMATRIX_H */
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
new file mode 100644
index 000000000..82d31ba32
--- /dev/null
+++ b/drivers/ptp/ptp_dte.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+
+#define DTE_NCO_LOW_TIME_REG 0x00
+#define DTE_NCO_TIME_REG 0x04
+#define DTE_NCO_OVERFLOW_REG 0x08
+#define DTE_NCO_INC_REG 0x0c
+
+#define DTE_NCO_SUM2_MASK 0xffffffff
+#define DTE_NCO_SUM2_SHIFT 4ULL
+
+#define DTE_NCO_SUM3_MASK 0xff
+#define DTE_NCO_SUM3_SHIFT 36ULL
+#define DTE_NCO_SUM3_WR_SHIFT 8
+
+#define DTE_NCO_TS_WRAP_MASK 0xfff
+#define DTE_NCO_TS_WRAP_LSHIFT 32
+
+#define DTE_NCO_INC_DEFAULT 0x80000000
+#define DTE_NUM_REGS_TO_RESTORE 4
+
+/* Full wrap around is 44bits in ns (~4.887 hrs) */
+#define DTE_WRAP_AROUND_NSEC_SHIFT 44
+
+/* 44 bits NCO */
+#define DTE_NCO_MAX_NS 0xFFFFFFFFFFFLL
+
+/* 125MHz with 3.29 reg cfg */
+#define DTE_PPB_ADJ(ppb) (u32)(div64_u64((((u64)abs(ppb) * BIT(28)) +\
+ 62500000ULL), 125000000ULL))
+
+/* ptp dte priv structure */
+struct ptp_dte {
+ void __iomem *regs;
+ struct ptp_clock *ptp_clk;
+ struct ptp_clock_info caps;
+ struct device *dev;
+ u32 ts_ovf_last;
+ u32 ts_wrap_cnt;
+ spinlock_t lock;
+ u32 reg_val[DTE_NUM_REGS_TO_RESTORE];
+};
+
+static void dte_write_nco(void __iomem *regs, s64 ns)
+{
+ u32 sum2, sum3;
+
+ sum2 = (u32)((ns >> DTE_NCO_SUM2_SHIFT) & DTE_NCO_SUM2_MASK);
+ /* compensate for ignoring sum1 */
+ if (sum2 != DTE_NCO_SUM2_MASK)
+ sum2++;
+
+ /* to write sum3, bits [15:8] needs to be written */
+ sum3 = (u32)(((ns >> DTE_NCO_SUM3_SHIFT) & DTE_NCO_SUM3_MASK) <<
+ DTE_NCO_SUM3_WR_SHIFT);
+
+ writel(0, (regs + DTE_NCO_LOW_TIME_REG));
+ writel(sum2, (regs + DTE_NCO_TIME_REG));
+ writel(sum3, (regs + DTE_NCO_OVERFLOW_REG));
+}
+
+static s64 dte_read_nco(void __iomem *regs)
+{
+ u32 sum2, sum3;
+ s64 ns;
+
+ /*
+ * ignoring sum1 (4 bits) gives a 16ns resolution, which
+ * works due to the async register read.
+ */
+ sum3 = readl(regs + DTE_NCO_OVERFLOW_REG) & DTE_NCO_SUM3_MASK;
+ sum2 = readl(regs + DTE_NCO_TIME_REG);
+ ns = ((s64)sum3 << DTE_NCO_SUM3_SHIFT) |
+ ((s64)sum2 << DTE_NCO_SUM2_SHIFT);
+
+ return ns;
+}
+
+static void dte_write_nco_delta(struct ptp_dte *ptp_dte, s64 delta)
+{
+ s64 ns;
+
+ ns = dte_read_nco(ptp_dte->regs);
+
+ /* handle wraparound conditions */
+ if ((delta < 0) && (abs(delta) > ns)) {
+ if (ptp_dte->ts_wrap_cnt) {
+ ns += DTE_NCO_MAX_NS + delta;
+ ptp_dte->ts_wrap_cnt--;
+ } else {
+ ns = 0;
+ }
+ } else {
+ ns += delta;
+ if (ns > DTE_NCO_MAX_NS) {
+ ptp_dte->ts_wrap_cnt++;
+ ns -= DTE_NCO_MAX_NS;
+ }
+ }
+
+ dte_write_nco(ptp_dte->regs, ns);
+
+ ptp_dte->ts_ovf_last = (ns >> DTE_NCO_TS_WRAP_LSHIFT) &
+ DTE_NCO_TS_WRAP_MASK;
+}
+
+static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte)
+{
+ u32 ts_ovf;
+ s64 ns = 0;
+
+ ns = dte_read_nco(ptp_dte->regs);
+
+ /*Timestamp overflow: 8 LSB bits of sum3, 4 MSB bits of sum2 */
+ ts_ovf = (ns >> DTE_NCO_TS_WRAP_LSHIFT) & DTE_NCO_TS_WRAP_MASK;
+
+ /* Check for wrap around */
+ if (ts_ovf < ptp_dte->ts_ovf_last)
+ ptp_dte->ts_wrap_cnt++;
+
+ ptp_dte->ts_ovf_last = ts_ovf;
+
+ /* adjust for wraparounds */
+ ns += (s64)(BIT_ULL(DTE_WRAP_AROUND_NSEC_SHIFT) * ptp_dte->ts_wrap_cnt);
+
+ return ns;
+}
+
+static int ptp_dte_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u32 nco_incr;
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ if (abs(ppb) > ptp_dte->caps.max_adj) {
+ dev_err(ptp_dte->dev, "ppb adj too big\n");
+ return -EINVAL;
+ }
+
+ if (ppb < 0)
+ nco_incr = DTE_NCO_INC_DEFAULT - DTE_PPB_ADJ(ppb);
+ else
+ nco_incr = DTE_NCO_INC_DEFAULT + DTE_PPB_ADJ(ppb);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ writel(nco_incr, ptp_dte->regs + DTE_NCO_INC_REG);
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ dte_write_nco_delta(ptp_dte, delta);
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ *ts = ns_to_timespec64(dte_read_nco_with_ovf(ptp_dte));
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+
+ /* Disable nco increment */
+ writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ dte_write_nco(ptp_dte->regs, timespec64_to_ns(ts));
+
+ /* reset overflow and wrap counter */
+ ptp_dte->ts_ovf_last = 0;
+ ptp_dte->ts_wrap_cnt = 0;
+
+ /* Enable nco increment */
+ writel(DTE_NCO_INC_DEFAULT, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info ptp_dte_caps = {
+ .owner = THIS_MODULE,
+ .name = "DTE PTP timer",
+ .max_adj = 50000000,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_dte_adjfreq,
+ .adjtime = ptp_dte_adjtime,
+ .gettime64 = ptp_dte_gettime,
+ .settime64 = ptp_dte_settime,
+ .enable = ptp_dte_enable,
+};
+
+static int ptp_dte_probe(struct platform_device *pdev)
+{
+ struct ptp_dte *ptp_dte;
+ struct device *dev = &pdev->dev;
+
+ ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
+ if (!ptp_dte)
+ return -ENOMEM;
+
+ ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ptp_dte->regs))
+ return PTR_ERR(ptp_dte->regs);
+
+ spin_lock_init(&ptp_dte->lock);
+
+ ptp_dte->dev = dev;
+ ptp_dte->caps = ptp_dte_caps;
+ ptp_dte->ptp_clk = ptp_clock_register(&ptp_dte->caps, &pdev->dev);
+ if (IS_ERR(ptp_dte->ptp_clk)) {
+ dev_err(dev,
+ "%s: Failed to register ptp clock\n", __func__);
+ return PTR_ERR(ptp_dte->ptp_clk);
+ }
+
+ platform_set_drvdata(pdev, ptp_dte);
+
+ dev_info(dev, "ptp clk probe done\n");
+
+ return 0;
+}
+
+static int ptp_dte_remove(struct platform_device *pdev)
+{
+ struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ u8 i;
+
+ ptp_clock_unregister(ptp_dte->ptp_clk);
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++)
+ writel(0, ptp_dte->regs + (i * sizeof(u32)));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ptp_dte_suspend(struct device *dev)
+{
+ struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
+ u8 i;
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
+ ptp_dte->reg_val[i] =
+ readl(ptp_dte->regs + (i * sizeof(u32)));
+ }
+
+ /* disable the nco */
+ writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ return 0;
+}
+
+static int ptp_dte_resume(struct device *dev)
+{
+ struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
+ u8 i;
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
+ if ((i * sizeof(u32)) != DTE_NCO_OVERFLOW_REG)
+ writel(ptp_dte->reg_val[i],
+ (ptp_dte->regs + (i * sizeof(u32))));
+ else
+ writel(((ptp_dte->reg_val[i] &
+ DTE_NCO_SUM3_MASK) << DTE_NCO_SUM3_WR_SHIFT),
+ (ptp_dte->regs + (i * sizeof(u32))));
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ptp_dte_pm_ops = {
+ .suspend = ptp_dte_suspend,
+ .resume = ptp_dte_resume
+};
+
+#define PTP_DTE_PM_OPS (&ptp_dte_pm_ops)
+#else
+#define PTP_DTE_PM_OPS NULL
+#endif
+
+static const struct of_device_id ptp_dte_of_match[] = {
+ { .compatible = "brcm,ptp-dte", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ptp_dte_of_match);
+
+static struct platform_driver ptp_dte_driver = {
+ .driver = {
+ .name = "ptp-dte",
+ .pm = PTP_DTE_PM_OPS,
+ .of_match_table = ptp_dte_of_match,
+ },
+ .probe = ptp_dte_probe,
+ .remove = ptp_dte_remove,
+};
+module_platform_driver(ptp_dte_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom DTE PTP Clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
new file mode 100644
index 000000000..179f6c472
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 Integrated Device Technology, Inc
+//
+
+#define pr_fmt(fmt) "IDT_82p33xxx: " fmt
+
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/bitops.h>
+
+#include "ptp_private.h"
+#include "ptp_idt82p33.h"
+
+MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* Module Parameters */
+static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
+module_param(sync_tod_timeout, uint, 0);
+MODULE_PARM_DESC(sync_tod_timeout,
+"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
+
+static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
+module_param(phase_snap_threshold, uint, 0);
+MODULE_PARM_DESC(phase_snap_threshold,
+"threshold (150000ns by default) below which adjtime would ignore");
+
+static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = buf[3];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[2 - i];
+ }
+
+ sec = buf[9];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[8 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 4; i < TOD_BYTE_COUNT; i++) {
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+}
+
+static int idt82p33_xfer(struct idt82p33 *idt82p33,
+ unsigned char regaddr,
+ unsigned char *buf,
+ unsigned int count,
+ int write)
+{
+ struct i2c_client *client = idt82p33->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
+{
+ int err;
+
+ if (idt82p33->page_offset == val)
+ return 0;
+
+ err = idt82p33_xfer(idt82p33, PAGE_ADDR, &val, sizeof(val), 1);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "failed to set page offset %d\n", val);
+ else
+ idt82p33->page_offset = val;
+
+ return err;
+}
+
+static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count, bool write)
+{
+ u8 offset, page;
+ int err;
+
+ page = _PAGE(regaddr);
+ offset = _OFFSET(regaddr);
+
+ err = idt82p33_page_offset(idt82p33, page);
+ if (err)
+ goto out;
+
+ err = idt82p33_xfer(idt82p33, offset, buf, count, write);
+out:
+ return err;
+}
+
+static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, false);
+}
+
+static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, true);
+}
+
+static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
+ enum pll_mode mode)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 dpll_mode;
+ int err;
+
+ if (channel->pll_mode == mode)
+ return 0;
+
+ err = idt82p33_read(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (mode << PLL_MODE_SHIFT);
+
+ err = idt82p33_write(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ channel->pll_mode = dpll_mode;
+
+ return 0;
+}
+
+static int _idt82p33_gettime(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag)
+ idt82p33->start_time = ktime_get_raw();
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ idt82p33_byte_array_to_timespec(ts, buf);
+
+ return 0;
+}
+
+/*
+ * TOD Trigger:
+ * Bits[7:4] Write 0x9, MSB write
+ * Bits[3:0] Read 0x9, LSB read
+ */
+
+static int _idt82p33_settime(struct idt82p33_channel *channel,
+ struct timespec64 const *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 local_ts = *ts;
+ char buf[TOD_BYTE_COUNT];
+ s64 dynamic_overhead_ns;
+ unsigned char trigger;
+ int err;
+ u8 i;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag) {
+ dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
+ - ktime_to_ns(idt82p33->start_time);
+
+ timespec64_add_ns(&local_ts, dynamic_overhead_ns);
+
+ idt82p33->calculate_overhead_flag = 0;
+ }
+
+ idt82p33_timespec_to_byte_array(&local_ts, buf);
+
+ /*
+ * Store the new time value.
+ */
+ for (i = 0; i < TOD_BYTE_COUNT; i++) {
+ err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts;
+ s64 now_ns;
+ int err;
+
+ idt82p33->calculate_overhead_flag = 1;
+
+ err = _idt82p33_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now_ns = timespec64_to_ns(&ts);
+ now_ns += delta_ns + idt82p33->tod_write_overhead_ns;
+
+ ts = ns_to_timespec64(now_ns);
+
+ err = _idt82p33_settime(channel, &ts);
+
+ return err;
+}
+
+static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ unsigned char buf[5] = {0};
+ int neg_adj = 0;
+ int err, i;
+ s64 fcw;
+
+ if (scaled_ppm == channel->current_freq_ppb)
+ return 0;
+
+ /*
+ * Frequency Control Word unit is: 1.68 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 168
+ *
+ * adjfine:
+ * scaled_ppm * 5^12
+ * FCW = -------------
+ * 168 * 2^4
+ */
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ fcw = scaled_ppm * 244140625ULL;
+ fcw = div_u64(fcw, 2688);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 5; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+
+ if (err)
+ return err;
+
+ err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
+ buf, sizeof(buf));
+
+ if (err == 0)
+ channel->current_freq_ppb = scaled_ppm;
+
+ return err;
+}
+
+static int idt82p33_measure_one_byte_write_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ ktime_t start, stop;
+ s64 total_ns;
+ u8 trigger;
+ int err;
+ u8 i;
+
+ total_ns = 0;
+ *overhead_ns = 0;
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ stop = ktime_get_raw();
+
+ if (err)
+ return err;
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ *overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_9_byte_overhead(
+ struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ ktime_t start, stop;
+ s64 total_ns;
+ int err = 0;
+ u8 i, j;
+
+ total_ns = 0;
+ idt82p33->tod_write_overhead_ns = 0;
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ /* Need one less byte for applicable overhead */
+ for (j = 0; j < (TOD_BYTE_COUNT - 1); j++) {
+ err = idt82p33_write(idt82p33,
+ channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ idt82p33->tod_write_overhead_ns = div_s64(total_ns,
+ MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_settime_gettime_gap_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct timespec64 ts1 = {0, 0};
+ struct timespec64 ts2;
+ int err;
+
+ *overhead_ns = 0;
+
+ err = _idt82p33_settime(channel, &ts1);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_gettime(channel, &ts2);
+
+ if (!err)
+ *overhead_ns = timespec64_to_ns(&ts2) - timespec64_to_ns(&ts1);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
+{
+ s64 trailing_overhead_ns, one_byte_write_ns, gap_ns;
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ idt82p33->tod_write_overhead_ns = 0;
+
+ err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_one_byte_write_overhead(channel,
+ &one_byte_write_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_9_byte_overhead(channel);
+
+ if (err)
+ return err;
+
+ trailing_overhead_ns = gap_ns - (2 * one_byte_write_ns);
+
+ idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
+
+ return err;
+}
+
+static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
+ u8 page,
+ u8 offset,
+ u8 val)
+{
+ int err = 0;
+
+ if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
+ if ((val & 0xfc) || !(val & 0x3)) {
+ dev_err(&idt82p33->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ } else {
+ idt82p33->pll_mask = val;
+ }
+ } else if (page == PLL0_OUTMASK_ADDR_HI &&
+ offset == PLL0_OUTMASK_ADDR_LO) {
+ idt82p33->channel[0].output_mask = val;
+ } else if (page == PLL1_OUTMASK_ADDR_HI &&
+ offset == PLL1_OUTMASK_ADDR_LO) {
+ idt82p33->channel[1].output_mask = val;
+ }
+
+ return err;
+}
+
+static void idt82p33_display_masks(struct idt82p33 *idt82p33)
+{
+ u8 mask, i;
+
+ dev_info(&idt82p33->client->dev,
+ "pllmask = 0x%02x\n", idt82p33->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idt82p33->pll_mask)
+ dev_info(&idt82p33->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idt82p33->channel[i].output_mask);
+ }
+}
+
+static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 sync_cnfg;
+ int err;
+
+ if (enable == channel->sync_tod_on) {
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+ return 0;
+ }
+
+ err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ sync_cnfg &= ~SYNC_TOD;
+
+ if (enable)
+ sync_cnfg |= SYNC_TOD;
+
+ err = idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ channel->sync_tod_on = enable;
+
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+
+ return 0;
+}
+
+static void idt82p33_sync_tod_work_handler(struct work_struct *work)
+{
+ struct idt82p33_channel *channel =
+ container_of(work, struct idt82p33_channel, sync_tod_work.work);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ (void)idt82p33_sync_tod(channel, false);
+
+ mutex_unlock(&idt82p33->reg_lock);
+}
+
+static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 mask, outn, val;
+ int err;
+
+ mask = channel->output_mask;
+ outn = 0;
+
+ while (mask) {
+ if (mask & 0x1) {
+ err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable)
+ val &= ~SQUELCH_ENABLE;
+ else
+ val |= SQUELCH_ENABLE;
+
+ err = idt82p33_write(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+ mask >>= 0x1;
+ outn++;
+ }
+
+ return 0;
+}
+
+static int idt82p33_enable_tod(struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts = {0, 0};
+ int err;
+ u8 val;
+
+ val = 0;
+ err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg,
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idt82p33_pps_enable(channel, false);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_overhead(channel);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_settime(channel, &ts);
+
+ if (err)
+ return err;
+
+ return idt82p33_sync_tod(channel, true);
+}
+
+static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
+{
+ struct idt82p33_channel *channel;
+ u8 i;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idt82p33->channel[i];
+
+ if (channel->ptp_clock) {
+ ptp_clock_unregister(channel->ptp_clock);
+ cancel_delayed_work_sync(&channel->sync_tod_work);
+ }
+ }
+}
+
+static int idt82p33_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ err = -EOPNOTSUPP;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (rq->type == PTP_CLK_REQ_PEROUT) {
+ if (!on)
+ err = idt82p33_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec) {
+ err = -ERANGE;
+ } else
+ err = idt82p33_pps_enable(channel, true);
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_adjfine(channel, scaled_ppm);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (abs(delta_ns) < phase_snap_threshold) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return 0;
+ }
+
+ err = _idt82p33_adjtime(channel, delta_ns);
+
+ if (err) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return err;
+ }
+
+ err = idt82p33_sync_tod(channel, true);
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_gettime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_settime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
+{
+ switch (index) {
+ case 0:
+ channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL1_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL1_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL1_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL1_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL1_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL1_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL1_INPUT_MODE_CNFG;
+ break;
+ case 1:
+ channel->dpll_tod_cnfg = DPLL2_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL2_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL2_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL2_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL2_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL2_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL2_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL2_INPUT_MODE_CNFG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ INIT_DELAYED_WORK(&channel->sync_tod_work,
+ idt82p33_sync_tod_work_handler);
+ channel->sync_tod_on = false;
+ channel->current_freq_ppb = 0;
+
+ return 0;
+}
+
+static void idt82p33_caps_init(struct ptp_clock_info *caps)
+{
+ caps->owner = THIS_MODULE;
+ caps->max_adj = 92000;
+ caps->adjfine = idt82p33_adjfine;
+ caps->adjtime = idt82p33_adjtime;
+ caps->gettime64 = idt82p33_gettime;
+ caps->settime64 = idt82p33_settime;
+ caps->enable = idt82p33_enable;
+}
+
+static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
+{
+ struct idt82p33_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idt82p33->channel[index];
+
+ err = idt82p33_channel_init(channel, index);
+ if (err)
+ return err;
+
+ channel->idt82p33 = idt82p33;
+
+ idt82p33_caps_init(&channel->caps);
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT 82P33 PLL%u", index);
+ channel->caps.n_per_out = hweight8(channel->output_mask);
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+ if (err)
+ return err;
+
+ err = idt82p33_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
+{
+ const struct firmware *fw;
+ struct idt82p33_fwrc *rec;
+ u8 loaddr, page, val;
+ int err;
+ s32 len;
+
+ dev_dbg(&idt82p33->client->dev,
+ "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idt82p33_fwrc *) fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idt82p33->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ loaddr = rec->loaddr;
+ page = rec->hiaddr;
+
+ rec++;
+
+ err = idt82p33_check_and_set_masks(idt82p33, page,
+ loaddr, val);
+ }
+
+ if (err == 0) {
+ /* maximum 8 pages */
+ if (page >= PAGE_NUM)
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || loaddr > 0xfb)
+ continue;
+
+ err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ idt82p33_display_masks(idt82p33);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+
+static int idt82p33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idt82p33 *idt82p33;
+ int err;
+ u8 i;
+
+ (void)id;
+
+ idt82p33 = devm_kzalloc(&client->dev,
+ sizeof(struct idt82p33), GFP_KERNEL);
+ if (!idt82p33)
+ return -ENOMEM;
+
+ mutex_init(&idt82p33->reg_lock);
+
+ idt82p33->client = client;
+ idt82p33->page_offset = 0xff;
+ idt82p33->tod_write_overhead_ns = 0;
+ idt82p33->calculate_overhead_flag = 0;
+ idt82p33->pll_mask = DEFAULT_PLL_MASK;
+ idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ err = idt82p33_load_firmware(idt82p33);
+
+ if (err)
+ dev_warn(&idt82p33->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idt82p33->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idt82p33->pll_mask & (1 << i)) {
+ err = idt82p33_enable_channel(idt82p33, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idt82p33->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ if (err) {
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idt82p33);
+
+ return 0;
+}
+
+static int idt82p33_remove(struct i2c_client *client)
+{
+ struct idt82p33 *idt82p33 = i2c_get_clientdata(client);
+
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ mutex_destroy(&idt82p33->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idt82p33_dt_id[] = {
+ { .compatible = "idt,82p33810" },
+ { .compatible = "idt,82p33813" },
+ { .compatible = "idt,82p33814" },
+ { .compatible = "idt,82p33831" },
+ { .compatible = "idt,82p33910" },
+ { .compatible = "idt,82p33913" },
+ { .compatible = "idt,82p33914" },
+ { .compatible = "idt,82p33931" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt82p33_dt_id);
+#endif
+
+static const struct i2c_device_id idt82p33_i2c_id[] = {
+ { "idt82p33810", },
+ { "idt82p33813", },
+ { "idt82p33814", },
+ { "idt82p33831", },
+ { "idt82p33910", },
+ { "idt82p33913", },
+ { "idt82p33914", },
+ { "idt82p33931", },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id);
+
+static struct i2c_driver idt82p33_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idt82p33_dt_id),
+ .name = "idt82p33",
+ },
+ .probe = idt82p33_probe,
+ .remove = idt82p33_remove,
+ .id_table = idt82p33_i2c_id,
+};
+
+module_i2c_driver(idt82p33_driver);
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
new file mode 100644
index 000000000..9d46966d2
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT 82P33XXX family of clocks.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDT82P33_H
+#define PTP_IDT82P33_H
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+
+/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */
+#define PAGE_NUM (8)
+#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+#define _PAGE(addr) (((addr) >> 0x7) & 0x7)
+#define _OFFSET(addr) ((addr) & 0x7f)
+
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0X140
+#define DPLL2_SYNC_EDGE_CNFG 0X1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0X116
+#define DPLL2_INPUT_MODE_CNFG 0X196
+
+#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn)))
+
+#define PAGE_ADDR 0x7F
+/* Register Map end */
+
+/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/* Register bit definitions end */
+#define FW_FILENAME "idt82p33xxx.bin"
+#define MAX_PHC_PLL (2)
+#define TOD_BYTE_COUNT (10)
+#define MAX_MEASURMENT_COUNT (5)
+#define SNAP_THRESHOLD_NS (150000)
+#define SYNC_TOD_TIMEOUT_SEC (5)
+
+#define PLLMASK_ADDR_HI 0xFF
+#define PLLMASK_ADDR_LO 0xA5
+
+#define PLL0_OUTMASK_ADDR_HI 0xFF
+#define PLL0_OUTMASK_ADDR_LO 0xB0
+
+#define PLL1_OUTMASK_ADDR_HI 0xFF
+#define PLL1_OUTMASK_ADDR_LO 0xB2
+
+#define PLL2_OUTMASK_ADDR_HI 0xFF
+#define PLL2_OUTMASK_ADDR_LO 0xB4
+
+#define PLL3_OUTMASK_ADDR_HI 0xFF
+#define PLL3_OUTMASK_ADDR_LO 0xB6
+
+#define DEFAULT_PLL_MASK (0x01)
+#define DEFAULT_OUTPUT_MASK_PLL0 (0xc0)
+#define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0
+
+/* PTP Hardware Clock interface */
+struct idt82p33_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idt82p33 *idt82p33;
+ enum pll_mode pll_mode;
+ /* task to turn off SYNC_TOD bit after pps sync */
+ struct delayed_work sync_tod_work;
+ bool sync_tod_on;
+ s32 current_freq_ppb;
+ u8 output_mask;
+ u16 dpll_tod_cnfg;
+ u16 dpll_tod_trigger;
+ u16 dpll_tod_sts;
+ u16 dpll_mode_cnfg;
+ u16 dpll_freq_cnfg;
+ u16 dpll_phase_cnfg;
+ u16 dpll_sync_cnfg;
+ u16 dpll_input_mode_cnfg;
+};
+
+struct idt82p33 {
+ struct idt82p33_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+ ktime_t start_time;
+ int calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+/* firmware interface */
+struct idt82p33_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+/**
+ * @brief Maximum absolute value for write phase offset in femtoseconds
+ */
+#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
+
+/** @brief Phase offset resolution
+ *
+ * DPLL phase offset = 10^15 fs / ( System Clock * 2^13)
+ * = 10^15 fs / ( 1638400000 * 2^23)
+ * = 74.5058059692382 fs
+ */
+#define IDT_T0DPLL_PHASE_RESOL 74506
+
+
+#endif /* PTP_IDT82P33_H */
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
new file mode 100644
index 000000000..4700ffbdf
--- /dev/null
+++ b/drivers/ptp/ptp_ines.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 MOSER-BAER AG
+//
+
+#define pr_fmt(fmt) "InES_PTP: " fmt
+
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include <linux/if_vlan.h>
+#include <linux/mii_timestamper.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/stddef.h>
+
+MODULE_DESCRIPTION("Driver for the ZHAW InES PTP time stamping IP core");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* GLOBAL register */
+#define MCAST_MAC_SELECT_SHIFT 2
+#define MCAST_MAC_SELECT_MASK 0x3
+#define IO_RESET BIT(1)
+#define PTP_RESET BIT(0)
+
+/* VERSION register */
+#define IF_MAJOR_VER_SHIFT 12
+#define IF_MAJOR_VER_MASK 0xf
+#define IF_MINOR_VER_SHIFT 8
+#define IF_MINOR_VER_MASK 0xf
+#define FPGA_MAJOR_VER_SHIFT 4
+#define FPGA_MAJOR_VER_MASK 0xf
+#define FPGA_MINOR_VER_SHIFT 0
+#define FPGA_MINOR_VER_MASK 0xf
+
+/* INT_STAT register */
+#define RX_INTR_STATUS_3 BIT(5)
+#define RX_INTR_STATUS_2 BIT(4)
+#define RX_INTR_STATUS_1 BIT(3)
+#define TX_INTR_STATUS_3 BIT(2)
+#define TX_INTR_STATUS_2 BIT(1)
+#define TX_INTR_STATUS_1 BIT(0)
+
+/* INT_MSK register */
+#define RX_INTR_MASK_3 BIT(5)
+#define RX_INTR_MASK_2 BIT(4)
+#define RX_INTR_MASK_1 BIT(3)
+#define TX_INTR_MASK_3 BIT(2)
+#define TX_INTR_MASK_2 BIT(1)
+#define TX_INTR_MASK_1 BIT(0)
+
+/* BUF_STAT register */
+#define RX_FIFO_NE_3 BIT(5)
+#define RX_FIFO_NE_2 BIT(4)
+#define RX_FIFO_NE_1 BIT(3)
+#define TX_FIFO_NE_3 BIT(2)
+#define TX_FIFO_NE_2 BIT(1)
+#define TX_FIFO_NE_1 BIT(0)
+
+/* PORT_CONF register */
+#define CM_ONE_STEP BIT(6)
+#define PHY_SPEED_SHIFT 4
+#define PHY_SPEED_MASK 0x3
+#define P2P_DELAY_WR_POS_SHIFT 2
+#define P2P_DELAY_WR_POS_MASK 0x3
+#define PTP_MODE_SHIFT 0
+#define PTP_MODE_MASK 0x3
+
+/* TS_STAT_TX register */
+#define TS_ENABLE BIT(15)
+#define DATA_READ_POS_SHIFT 8
+#define DATA_READ_POS_MASK 0x1f
+#define DISCARDED_EVENTS_SHIFT 4
+#define DISCARDED_EVENTS_MASK 0xf
+
+#define INES_N_PORTS 3
+#define INES_REGISTER_SIZE 0x80
+#define INES_PORT_OFFSET 0x20
+#define INES_PORT_SIZE 0x20
+#define INES_FIFO_DEPTH 90
+#define INES_MAX_EVENTS 100
+
+#define BC_PTP_V1 0
+#define BC_PTP_V2 1
+#define TC_E2E_PTP_V2 2
+#define TC_P2P_PTP_V2 3
+
+#define PHY_SPEED_10 0
+#define PHY_SPEED_100 1
+#define PHY_SPEED_1000 2
+
+#define PORT_CONF \
+ ((PHY_SPEED_1000 << PHY_SPEED_SHIFT) | (BC_PTP_V2 << PTP_MODE_SHIFT))
+
+#define ines_read32(s, r) __raw_readl((void __iomem *)&s->regs->r)
+#define ines_write32(s, v, r) __raw_writel(v, (void __iomem *)&s->regs->r)
+
+#define MESSAGE_TYPE_SYNC 1
+#define MESSAGE_TYPE_P_DELAY_REQ 2
+#define MESSAGE_TYPE_P_DELAY_RESP 3
+#define MESSAGE_TYPE_DELAY_REQ 4
+
+#define SYNC 0x0
+#define DELAY_REQ 0x1
+#define PDELAY_REQ 0x2
+#define PDELAY_RESP 0x3
+
+static LIST_HEAD(ines_clocks);
+static DEFINE_MUTEX(ines_clocks_lock);
+
+struct ines_global_regs {
+ u32 id;
+ u32 test;
+ u32 global;
+ u32 version;
+ u32 test2;
+ u32 int_stat;
+ u32 int_msk;
+ u32 buf_stat;
+};
+
+struct ines_port_registers {
+ u32 port_conf;
+ u32 p_delay;
+ u32 ts_stat_tx;
+ u32 ts_stat_rx;
+ u32 ts_tx;
+ u32 ts_rx;
+};
+
+struct ines_timestamp {
+ struct list_head list;
+ unsigned long tmo;
+ u16 tag;
+ u64 sec;
+ u64 nsec;
+ u64 clkid;
+ u16 portnum;
+ u16 seqid;
+};
+
+struct ines_port {
+ struct ines_port_registers *regs;
+ struct mii_timestamper mii_ts;
+ struct ines_clock *clock;
+ bool rxts_enabled;
+ bool txts_enabled;
+ unsigned int index;
+ struct delayed_work ts_work;
+ /* lock protects event list and tx_skb */
+ spinlock_t lock;
+ struct sk_buff *tx_skb;
+ struct list_head events;
+ struct list_head pool;
+ struct ines_timestamp pool_data[INES_MAX_EVENTS];
+};
+
+struct ines_clock {
+ struct ines_port port[INES_N_PORTS];
+ struct ines_global_regs __iomem *regs;
+ void __iomem *base;
+ struct device_node *node;
+ struct device *dev;
+ struct list_head list;
+};
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev);
+static int ines_rxfifo_read(struct ines_port *port);
+static u64 ines_rxts64(struct ines_port *port, unsigned int words);
+static bool ines_timestamp_expired(struct ines_timestamp *ts);
+static u64 ines_txts64(struct ines_port *port, unsigned int words);
+static void ines_txtstamp_work(struct work_struct *work);
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type);
+static u8 tag_to_msgtype(u8 tag);
+
+static void ines_clock_cleanup(struct ines_clock *clock)
+{
+ struct ines_port *port;
+ int i;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ cancel_delayed_work_sync(&port->ts_work);
+ }
+}
+
+static int ines_clock_init(struct ines_clock *clock, struct device *device,
+ void __iomem *addr)
+{
+ struct device_node *node = device->of_node;
+ unsigned long port_addr;
+ struct ines_port *port;
+ int i, j;
+
+ INIT_LIST_HEAD(&clock->list);
+ clock->node = node;
+ clock->dev = device;
+ clock->base = addr;
+ clock->regs = clock->base;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ port_addr = (unsigned long) clock->base +
+ INES_PORT_OFFSET + i * INES_PORT_SIZE;
+ port->regs = (struct ines_port_registers *) port_addr;
+ port->clock = clock;
+ port->index = i;
+ INIT_DELAYED_WORK(&port->ts_work, ines_txtstamp_work);
+ spin_lock_init(&port->lock);
+ INIT_LIST_HEAD(&port->events);
+ INIT_LIST_HEAD(&port->pool);
+ for (j = 0; j < INES_MAX_EVENTS; j++)
+ list_add(&port->pool_data[j].list, &port->pool);
+ }
+
+ ines_write32(clock, 0xBEEF, test);
+ ines_write32(clock, 0xBEEF, test2);
+
+ dev_dbg(device, "ID 0x%x\n", ines_read32(clock, id));
+ dev_dbg(device, "TEST 0x%x\n", ines_read32(clock, test));
+ dev_dbg(device, "VERSION 0x%x\n", ines_read32(clock, version));
+ dev_dbg(device, "TEST2 0x%x\n", ines_read32(clock, test2));
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ ines_write32(port, PORT_CONF, port_conf);
+ }
+
+ return 0;
+}
+
+static struct ines_port *ines_find_port(struct device_node *node, u32 index)
+{
+ struct ines_port *port = NULL;
+ struct ines_clock *clock;
+ struct list_head *this;
+
+ mutex_lock(&ines_clocks_lock);
+ list_for_each(this, &ines_clocks) {
+ clock = list_entry(this, struct ines_clock, list);
+ if (clock->node == node) {
+ port = &clock->port[index];
+ break;
+ }
+ }
+ mutex_unlock(&ines_clocks_lock);
+ return port;
+}
+
+static u64 ines_find_rxts(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ struct list_head *this, *next;
+ struct ines_timestamp *ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ if (type == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ines_rxfifo_read(port);
+ list_for_each_safe(this, next, &port->events) {
+ ts = list_entry(this, struct ines_timestamp, list);
+ if (ines_timestamp_expired(ts)) {
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ continue;
+ }
+ if (ines_match(skb, type, ts, port->clock->dev)) {
+ ns = ts->sec * 1000000000ULL + ts->nsec;
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ns;
+}
+
+static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb), i;
+ u32 data_rd_pos, buf_stat, mask, ts_stat_tx;
+ struct ines_timestamp ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ mask = TX_FIFO_NE_1 << port->index;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask)) {
+ dev_dbg(port->clock->dev,
+ "Tx timestamp FIFO unexpectedly empty\n");
+ break;
+ }
+ ts_stat_tx = ines_read32(port, ts_stat_tx);
+ data_rd_pos = (ts_stat_tx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev,
+ "unexpected Tx read pos %u\n", data_rd_pos);
+ break;
+ }
+
+ ts.tag = ines_read32(port, ts_tx);
+ ts.sec = ines_txts64(port, 3);
+ ts.nsec = ines_txts64(port, 2);
+ ts.clkid = ines_txts64(port, 4);
+ ts.portnum = ines_read32(port, ts_tx);
+ ts.seqid = ines_read32(port, ts_tx);
+
+ if (ines_match(skb, class, &ts, port->clock->dev)) {
+ ns = ts.sec * 1000000000ULL + ts.nsec;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ns;
+}
+
+static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
+ struct hwtstamp_config cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ts_stat_tx = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ ts_stat_tx = TS_ENABLE;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ts_stat_tx = TS_ENABLE;
+ cm_one_step = CM_ONE_STEP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ts_stat_rx = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ts_stat_rx = TS_ENABLE;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~CM_ONE_STEP;
+ port_conf |= cm_one_step;
+
+ ines_write32(port, port_conf, port_conf);
+ ines_write32(port, ts_stat_rx, ts_stat_rx);
+ ines_write32(port, ts_stat_tx, ts_stat_tx);
+
+ port->rxts_enabled = ts_stat_rx == TS_ENABLE;
+ port->txts_enabled = ts_stat_tx == TS_ENABLE;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void ines_link_state(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 port_conf, speed_conf;
+ unsigned long flags;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_conf = PHY_SPEED_10 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ speed_conf = PHY_SPEED_100 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_1000:
+ speed_conf = PHY_SPEED_1000 << PHY_SPEED_SHIFT;
+ break;
+ default:
+ dev_err(port->clock->dev, "bad speed: %d\n", phydev->speed);
+ return;
+ }
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~(0x3 << PHY_SPEED_SHIFT);
+ port_conf |= speed_conf;
+
+ ines_write32(port, port_conf, port_conf);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev)
+{
+ struct ptp_header *hdr;
+ u16 portn, seqid;
+ u8 msgtype;
+ u64 clkid;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ return false;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ clkid = be64_to_cpup((__be64 *)&hdr->source_port_identity.clock_identity.id[0]);
+ portn = be16_to_cpu(hdr->source_port_identity.port_number);
+ seqid = be16_to_cpu(hdr->sequence_id);
+
+ if (tag_to_msgtype(ts->tag & 0x7) != msgtype) {
+ dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
+ tag_to_msgtype(ts->tag & 0x7), msgtype);
+ return false;
+ }
+ if (ts->clkid != clkid) {
+ dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
+ ts->clkid, clkid);
+ return false;
+ }
+ if (ts->portnum != portn) {
+ dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
+ ts->portnum, portn);
+ return false;
+ }
+ if (ts->seqid != seqid) {
+ dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
+ ts->seqid, seqid);
+ return false;
+ }
+
+ return true;
+}
+
+static bool ines_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (!port->rxts_enabled)
+ return false;
+
+ ns = ines_find_rxts(port, skb, type);
+ if (!ns)
+ return false;
+
+ ssh = skb_hwtstamps(skb);
+ ssh->hwtstamp = ns_to_ktime(ns);
+ netif_rx(skb);
+
+ return true;
+}
+
+static int ines_rxfifo_read(struct ines_port *port)
+{
+ u32 data_rd_pos, buf_stat, mask, ts_stat_rx;
+ struct ines_timestamp *ts;
+ unsigned int i;
+
+ mask = RX_FIFO_NE_1 << port->index;
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+ if (list_empty(&port->pool)) {
+ dev_err(port->clock->dev, "event pool is empty\n");
+ return -1;
+ }
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask))
+ break;
+
+ ts_stat_rx = ines_read32(port, ts_stat_rx);
+ data_rd_pos = (ts_stat_rx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev, "unexpected Rx read pos %u\n",
+ data_rd_pos);
+ break;
+ }
+
+ ts = list_first_entry(&port->pool, struct ines_timestamp, list);
+ ts->tmo = jiffies + HZ;
+ ts->tag = ines_read32(port, ts_rx);
+ ts->sec = ines_rxts64(port, 3);
+ ts->nsec = ines_rxts64(port, 2);
+ ts->clkid = ines_rxts64(port, 4);
+ ts->portnum = ines_read32(port, ts_rx);
+ ts->seqid = ines_read32(port, ts_rx);
+
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &port->events);
+ }
+
+ return 0;
+}
+
+static u64 ines_rxts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_rx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_rx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_timestamp_expired(struct ines_timestamp *ts)
+{
+ return time_after(jiffies, ts->tmo);
+}
+
+static int ines_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_P2P);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static u64 ines_txts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_tx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_tx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_txts_onestep(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ unsigned long flags;
+ u32 port_conf;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port_conf = ines_read32(port, port_conf);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port_conf & CM_ONE_STEP)
+ return is_sync_pdelay_resp(skb, type);
+
+ return false;
+}
+
+static void ines_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct sk_buff *old_skb = NULL;
+ unsigned long flags;
+
+ if (!port->txts_enabled || ines_txts_onestep(port, skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (port->tx_skb)
+ old_skb = port->tx_skb;
+
+ port->tx_skb = skb;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ kfree_skb(old_skb);
+
+ schedule_delayed_work(&port->ts_work, 1);
+}
+
+static void ines_txtstamp_work(struct work_struct *work)
+{
+ struct ines_port *port =
+ container_of(work, struct ines_port, ts_work.work);
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&port->lock, flags);
+ skb = port->tx_skb;
+ port->tx_skb = NULL;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ns = ines_find_txts(port, skb);
+ if (!ns) {
+ kfree_skb(skb);
+ return;
+ }
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_complete_tx_timestamp(skb, &ssh);
+}
+
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
+{
+ struct ptp_header *hdr;
+ u8 msgtype;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, type);
+
+ switch ((msgtype & 0xf)) {
+ case SYNC:
+ case PDELAY_RESP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 tag_to_msgtype(u8 tag)
+{
+ switch (tag) {
+ case MESSAGE_TYPE_SYNC:
+ return SYNC;
+ case MESSAGE_TYPE_P_DELAY_REQ:
+ return PDELAY_REQ;
+ case MESSAGE_TYPE_P_DELAY_RESP:
+ return PDELAY_RESP;
+ case MESSAGE_TYPE_DELAY_REQ:
+ return DELAY_REQ;
+ }
+ return 0xf;
+}
+
+static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
+ unsigned int index)
+{
+ struct device_node *node = device->of_node;
+ struct ines_port *port;
+
+ if (index > INES_N_PORTS - 1) {
+ dev_err(device, "bad port index %u\n", index);
+ return ERR_PTR(-EINVAL);
+ }
+ port = ines_find_port(node, index);
+ if (!port) {
+ dev_err(device, "missing port index %u\n", index);
+ return ERR_PTR(-ENODEV);
+ }
+ port->mii_ts.rxtstamp = ines_rxtstamp;
+ port->mii_ts.txtstamp = ines_txtstamp;
+ port->mii_ts.hwtstamp = ines_hwtstamp;
+ port->mii_ts.link_state = ines_link_state;
+ port->mii_ts.ts_info = ines_ts_info;
+
+ return &port->mii_ts;
+}
+
+static void ines_ptp_release_channel(struct device *device,
+ struct mii_timestamper *mii_ts)
+{
+}
+
+static struct mii_timestamping_ctrl ines_ctrl = {
+ .probe_channel = ines_ptp_probe_channel,
+ .release_channel = ines_ptp_release_channel,
+};
+
+static int ines_ptp_ctrl_probe(struct platform_device *pld)
+{
+ struct ines_clock *clock;
+ void __iomem *addr;
+ int err = 0;
+
+ addr = devm_platform_ioremap_resource(pld, 0);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto out;
+ }
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (ines_clock_init(clock, &pld->dev, addr)) {
+ kfree(clock);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = register_mii_tstamp_controller(&pld->dev, &ines_ctrl);
+ if (err) {
+ kfree(clock);
+ goto out;
+ }
+ mutex_lock(&ines_clocks_lock);
+ list_add_tail(&ines_clocks, &clock->list);
+ mutex_unlock(&ines_clocks_lock);
+
+ dev_set_drvdata(&pld->dev, clock);
+out:
+ return err;
+}
+
+static int ines_ptp_ctrl_remove(struct platform_device *pld)
+{
+ struct ines_clock *clock = dev_get_drvdata(&pld->dev);
+
+ unregister_mii_tstamp_controller(&pld->dev);
+ mutex_lock(&ines_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&ines_clocks_lock);
+ ines_clock_cleanup(clock);
+ kfree(clock);
+ return 0;
+}
+
+static const struct of_device_id ines_ptp_ctrl_of_match[] = {
+ { .compatible = "ines,ptp-ctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ines_ptp_ctrl_of_match);
+
+static struct platform_driver ines_ptp_ctrl_driver = {
+ .probe = ines_ptp_ctrl_probe,
+ .remove = ines_ptp_ctrl_remove,
+ .driver = {
+ .name = "ines_ptp_ctrl",
+ .of_match_table = of_match_ptr(ines_ptp_ctrl_of_match),
+ },
+};
+module_platform_driver(ines_ptp_ctrl_driver);
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
new file mode 100644
index 000000000..658d33fc3
--- /dev/null
+++ b/drivers/ptp/ptp_kvm.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <uapi/linux/kvm_para.h>
+#include <asm/kvm_para.h>
+#include <asm/pvclock.h>
+#include <asm/kvmclock.h>
+#include <uapi/asm/kvm_para.h>
+
+#include <linux/ptp_clock_kernel.h>
+
+struct kvm_ptp_clock {
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+};
+
+static DEFINE_SPINLOCK(kvm_ptp_lock);
+
+static struct pvclock_vsyscall_time_info *hv_clock;
+
+static struct kvm_clock_pairing clock_pair;
+static phys_addr_t clock_pair_gpa;
+
+static int ptp_kvm_get_time_fn(ktime_t *device_time,
+ struct system_counterval_t *system_counter,
+ void *ctx)
+{
+ unsigned long ret;
+ struct timespec64 tspec;
+ unsigned version;
+ int cpu;
+ struct pvclock_vcpu_time_info *src;
+
+ spin_lock(&kvm_ptp_lock);
+
+ preempt_disable_notrace();
+ cpu = smp_processor_id();
+ src = &hv_clock[cpu].pvti;
+
+ do {
+ /*
+ * We are using a TSC value read in the hosts
+ * kvm_hc_clock_pairing handling.
+ * So any changes to tsc_to_system_mul
+ * and tsc_shift or any other pvclock
+ * data invalidate that measurement.
+ */
+ version = pvclock_read_begin(src);
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
+ spin_unlock(&kvm_ptp_lock);
+ preempt_enable_notrace();
+ return -EOPNOTSUPP;
+ }
+
+ tspec.tv_sec = clock_pair.sec;
+ tspec.tv_nsec = clock_pair.nsec;
+ ret = __pvclock_read_cycles(src, clock_pair.tsc);
+ } while (pvclock_read_retry(src, version));
+
+ preempt_enable_notrace();
+
+ system_counter->cycles = ret;
+ system_counter->cs = &kvm_clock;
+
+ *device_time = timespec64_to_ktime(tspec);
+
+ spin_unlock(&kvm_ptp_lock);
+
+ return 0;
+}
+
+static int ptp_kvm_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ return get_device_system_crosststamp(ptp_kvm_get_time_fn, NULL,
+ NULL, xtstamp);
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_kvm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_kvm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_kvm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long ret;
+ struct timespec64 tspec;
+
+ spin_lock(&kvm_ptp_lock);
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
+ spin_unlock(&kvm_ptp_lock);
+ return -EOPNOTSUPP;
+ }
+
+ tspec.tv_sec = clock_pair.sec;
+ tspec.tv_nsec = clock_pair.nsec;
+ spin_unlock(&kvm_ptp_lock);
+
+ memcpy(ts, &tspec, sizeof(struct timespec64));
+
+ return 0;
+}
+
+static int ptp_kvm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info ptp_kvm_caps = {
+ .owner = THIS_MODULE,
+ .name = "KVM virtual PTP",
+ .max_adj = 0,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_kvm_adjfreq,
+ .adjtime = ptp_kvm_adjtime,
+ .gettime64 = ptp_kvm_gettime,
+ .settime64 = ptp_kvm_settime,
+ .enable = ptp_kvm_enable,
+ .getcrosststamp = ptp_kvm_getcrosststamp,
+};
+
+/* module operations */
+
+static struct kvm_ptp_clock kvm_ptp_clock;
+
+static void __exit ptp_kvm_exit(void)
+{
+ ptp_clock_unregister(kvm_ptp_clock.ptp_clock);
+}
+
+static int __init ptp_kvm_init(void)
+{
+ long ret;
+
+ if (!kvm_para_available())
+ return -ENODEV;
+
+ clock_pair_gpa = slow_virt_to_phys(&clock_pair);
+ hv_clock = pvclock_get_pvti_cpu0_va();
+
+ if (!hv_clock)
+ return -ENODEV;
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ return -ENODEV;
+
+ kvm_ptp_clock.caps = ptp_kvm_caps;
+
+ kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
+
+ return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
+}
+
+module_init(ptp_kvm_init);
+module_exit(ptp_kvm_exit);
+
+MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>");
+MODULE_DESCRIPTION("PTP clock using KVMCLOCK");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
new file mode 100644
index 000000000..9492ed095
--- /dev/null
+++ b/drivers/ptp/ptp_pch.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PTP 1588 clock using the EG20T PCH
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the IXP46X driver.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+
+#define STATION_ADDR_LEN 20
+#define PCI_DEVICE_ID_PCH_1588 0x8819
+#define IO_MEM_BAR 1
+
+#define DEFAULT_ADDEND 0xA0000000
+#define TICKS_NS_SHIFT 5
+#define N_EXT_TS 2
+
+enum pch_status {
+ PCH_SUCCESS,
+ PCH_INVALIDPARAM,
+ PCH_NOTIMESTAMP,
+ PCH_INTERRUPTMODEINUSE,
+ PCH_FAILED,
+ PCH_UNSUPPORTED,
+};
+/**
+ * struct pch_ts_regs - IEEE 1588 registers
+ */
+struct pch_ts_regs {
+ u32 control;
+ u32 event;
+ u32 addend;
+ u32 accum;
+ u32 test;
+ u32 ts_compare;
+ u32 rsystime_lo;
+ u32 rsystime_hi;
+ u32 systime_lo;
+ u32 systime_hi;
+ u32 trgt_lo;
+ u32 trgt_hi;
+ u32 asms_lo;
+ u32 asms_hi;
+ u32 amms_lo;
+ u32 amms_hi;
+ u32 ch_control;
+ u32 ch_event;
+ u32 tx_snap_lo;
+ u32 tx_snap_hi;
+ u32 rx_snap_lo;
+ u32 rx_snap_hi;
+ u32 src_uuid_lo;
+ u32 src_uuid_hi;
+ u32 can_status;
+ u32 can_snap_lo;
+ u32 can_snap_hi;
+ u32 ts_sel;
+ u32 ts_st[6];
+ u32 reserve1[14];
+ u32 stl_max_set_en;
+ u32 stl_max_set;
+ u32 reserve2[13];
+ u32 srst;
+};
+
+#define PCH_TSC_RESET (1 << 0)
+#define PCH_TSC_TTM_MASK (1 << 1)
+#define PCH_TSC_ASMS_MASK (1 << 2)
+#define PCH_TSC_AMMS_MASK (1 << 3)
+#define PCH_TSC_PPSM_MASK (1 << 4)
+#define PCH_TSE_TTIPEND (1 << 1)
+#define PCH_TSE_SNS (1 << 2)
+#define PCH_TSE_SNM (1 << 3)
+#define PCH_TSE_PPS (1 << 4)
+#define PCH_CC_MM (1 << 0)
+#define PCH_CC_TA (1 << 1)
+
+#define PCH_CC_MODE_SHIFT 16
+#define PCH_CC_MODE_MASK 0x001F0000
+#define PCH_CC_VERSION (1 << 31)
+#define PCH_CE_TXS (1 << 0)
+#define PCH_CE_RXS (1 << 1)
+#define PCH_CE_OVR (1 << 0)
+#define PCH_CE_VAL (1 << 1)
+#define PCH_ECS_ETH (1 << 0)
+
+#define PCH_ECS_CAN (1 << 1)
+#define PCH_STATION_BYTES 6
+
+#define PCH_IEEE1588_ETH (1 << 0)
+#define PCH_IEEE1588_CAN (1 << 1)
+/**
+ * struct pch_dev - Driver private data
+ */
+struct pch_dev {
+ struct pch_ts_regs __iomem *regs;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+ int exts0_enabled;
+ int exts1_enabled;
+
+ u32 mem_base;
+ u32 mem_size;
+ u32 irq;
+ struct pci_dev *pdev;
+ spinlock_t register_lock;
+};
+
+/**
+ * struct pch_params - 1588 module parameter
+ */
+struct pch_params {
+ u8 station[STATION_ADDR_LEN];
+};
+
+/* structure to hold the module parameters */
+static struct pch_params pch_param = {
+ "00:00:00:00:00:00"
+};
+
+/*
+ * Register access functions
+ */
+static inline void pch_eth_enable_set(struct pch_dev *chip)
+{
+ u32 val;
+ /* SET the eth_enable bit */
+ val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH);
+ iowrite32(val, (&chip->regs->ts_sel));
+}
+
+static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(&regs->systime_lo);
+ hi = ioread32(&regs->systime_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ return ns;
+}
+
+static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
+{
+ u32 hi, lo;
+
+ ns >>= TICKS_NS_SHIFT;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+
+ iowrite32(lo, &regs->systime_lo);
+ iowrite32(hi, &regs->systime_hi);
+}
+
+static inline void pch_block_reset(struct pch_dev *chip)
+{
+ u32 val;
+ /* Reset Hardware Assist block */
+ val = ioread32(&chip->regs->control) | PCH_TSC_RESET;
+ iowrite32(val, (&chip->regs->control));
+ val = val & ~PCH_TSC_RESET;
+ iowrite32(val, (&chip->regs->control));
+}
+
+u32 pch_ch_control_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->ch_control);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_ch_control_read);
+
+void pch_ch_control_write(struct pci_dev *pdev, u32 val)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ iowrite32(val, (&chip->regs->ch_control));
+}
+EXPORT_SYMBOL(pch_ch_control_write);
+
+u32 pch_ch_event_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->ch_event);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_ch_event_read);
+
+void pch_ch_event_write(struct pci_dev *pdev, u32 val)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ iowrite32(val, (&chip->regs->ch_event));
+}
+EXPORT_SYMBOL(pch_ch_event_write);
+
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->src_uuid_lo);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_src_uuid_lo_read);
+
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u32 val;
+
+ val = ioread32(&chip->regs->src_uuid_hi);
+
+ return val;
+}
+EXPORT_SYMBOL(pch_src_uuid_hi_read);
+
+u64 pch_rx_snap_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(&chip->regs->rx_snap_lo);
+ hi = ioread32(&chip->regs->rx_snap_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ return ns;
+}
+EXPORT_SYMBOL(pch_rx_snap_read);
+
+u64 pch_tx_snap_read(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ioread32(&chip->regs->tx_snap_lo);
+ hi = ioread32(&chip->regs->tx_snap_hi);
+
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ return ns;
+}
+EXPORT_SYMBOL(pch_tx_snap_read);
+
+/* This function enables all 64 bits in system time registers [high & low].
+This is a work-around for non continuous value in the SystemTime Register*/
+static void pch_set_system_time_count(struct pch_dev *chip)
+{
+ iowrite32(0x01, &chip->regs->stl_max_set_en);
+ iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set);
+ iowrite32(0x00, &chip->regs->stl_max_set_en);
+}
+
+static void pch_reset(struct pch_dev *chip)
+{
+ /* Reset Hardware Assist */
+ pch_block_reset(chip);
+
+ /* enable all 32 bits in system time registers */
+ pch_set_system_time_count(chip);
+}
+
+/**
+ * pch_set_station_address() - This API sets the station address used by
+ * IEEE 1588 hardware when looking at PTP
+ * traffic on the ethernet interface
+ * @addr: dress which contain the column separated address to be used.
+ */
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
+{
+ s32 i;
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ /* Verify the parameter */
+ if ((chip->regs == NULL) || addr == (u8 *)NULL) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ /* For all station address bytes */
+ for (i = 0; i < PCH_STATION_BYTES; i++) {
+ u32 val;
+ s32 tmp;
+
+ tmp = hex_to_bin(addr[i * 3]);
+ if (tmp < 0) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ val = tmp * 16;
+ tmp = hex_to_bin(addr[(i * 3) + 1]);
+ if (tmp < 0) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+ val += tmp;
+ /* Expects ':' separated addresses */
+ if ((i < 5) && (addr[(i * 3) + 2] != ':')) {
+ dev_err(&pdev->dev,
+ "invalid params returning PCH_INVALIDPARAM\n");
+ return PCH_INVALIDPARAM;
+ }
+
+ /* Ideally we should set the address only after validating
+ entire string */
+ dev_dbg(&pdev->dev, "invoking pch_station_set\n");
+ iowrite32(val, &chip->regs->ts_st[i]);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(pch_set_station_address);
+
+/*
+ * Interrupt service routine
+ */
+static irqreturn_t isr(int irq, void *priv)
+{
+ struct pch_dev *pch_dev = priv;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
+ struct ptp_clock_event event;
+ u32 ack = 0, lo, hi, val;
+
+ val = ioread32(&regs->event);
+
+ if (val & PCH_TSE_SNS) {
+ ack |= PCH_TSE_SNS;
+ if (pch_dev->exts0_enabled) {
+ hi = ioread32(&regs->asms_hi);
+ lo = ioread32(&regs->asms_lo);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ event.timestamp <<= TICKS_NS_SHIFT;
+ ptp_clock_event(pch_dev->ptp_clock, &event);
+ }
+ }
+
+ if (val & PCH_TSE_SNM) {
+ ack |= PCH_TSE_SNM;
+ if (pch_dev->exts1_enabled) {
+ hi = ioread32(&regs->amms_hi);
+ lo = ioread32(&regs->amms_lo);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ event.timestamp <<= TICKS_NS_SHIFT;
+ ptp_clock_event(pch_dev->ptp_clock, &event);
+ }
+ }
+
+ if (val & PCH_TSE_TTIPEND)
+ ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */
+
+ if (ack) {
+ iowrite32(ack, &regs->event);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, addend;
+ int neg_adj = 0;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ addend = DEFAULT_ADDEND;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ iowrite32(addend, &regs->addend);
+
+ return 0;
+}
+
+static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ now = pch_systime_read(regs);
+ now += delta;
+ pch_systime_write(regs, now);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ return 0;
+}
+
+static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ ns = pch_systime_read(regs);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int ptp_pch_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&pch_dev->register_lock, flags);
+ pch_systime_write(regs, ns);
+ spin_unlock_irqrestore(&pch_dev->register_lock, flags);
+
+ return 0;
+}
+
+static int ptp_pch_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ pch_dev->exts0_enabled = on ? 1 : 0;
+ break;
+ case 1:
+ pch_dev->exts1_enabled = on ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info ptp_pch_caps = {
+ .owner = THIS_MODULE,
+ .name = "PCH timer",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_pch_adjfreq,
+ .adjtime = ptp_pch_adjtime,
+ .gettime64 = ptp_pch_gettime,
+ .settime64 = ptp_pch_settime,
+ .enable = ptp_pch_enable,
+};
+
+#define pch_suspend NULL
+#define pch_resume NULL
+
+static void pch_remove(struct pci_dev *pdev)
+{
+ struct pch_dev *chip = pci_get_drvdata(pdev);
+
+ ptp_clock_unregister(chip->ptp_clock);
+ /* free the interrupt */
+ if (pdev->irq != 0)
+ free_irq(pdev->irq, chip);
+
+ /* unmap the virtual IO memory space */
+ if (chip->regs != NULL) {
+ iounmap(chip->regs);
+ chip->regs = NULL;
+ }
+ /* release the reserved IO memory space */
+ if (chip->mem_base != 0) {
+ release_mem_region(chip->mem_base, chip->mem_size);
+ chip->mem_base = 0;
+ }
+ pci_disable_device(pdev);
+ kfree(chip);
+ dev_info(&pdev->dev, "complete\n");
+}
+
+static s32
+pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ s32 ret;
+ unsigned long flags;
+ struct pch_dev *chip;
+
+ chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* enable the 1588 pci device */
+ ret = pci_enable_device(pdev);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "could not enable the pci device\n");
+ goto err_pci_en;
+ }
+
+ chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR);
+ if (!chip->mem_base) {
+ dev_err(&pdev->dev, "could not locate IO memory address\n");
+ ret = -ENODEV;
+ goto err_pci_start;
+ }
+
+ /* retrieve the available length of the IO memory space */
+ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR);
+
+ /* allocate the memory for the device registers */
+ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) {
+ dev_err(&pdev->dev,
+ "could not allocate register memory space\n");
+ ret = -EBUSY;
+ goto err_req_mem_region;
+ }
+
+ /* get the virtual address to the 1588 registers */
+ chip->regs = ioremap(chip->mem_base, chip->mem_size);
+
+ if (!chip->regs) {
+ dev_err(&pdev->dev, "Could not get virtual address\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ chip->caps = ptp_pch_caps;
+ chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
+ if (IS_ERR(chip->ptp_clock)) {
+ ret = PTR_ERR(chip->ptp_clock);
+ goto err_ptp_clock_reg;
+ }
+
+ spin_lock_init(&chip->register_lock);
+
+ ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq);
+ goto err_req_irq;
+ }
+
+ /* indicate success */
+ chip->irq = pdev->irq;
+ chip->pdev = pdev;
+ pci_set_drvdata(pdev, chip);
+
+ spin_lock_irqsave(&chip->register_lock, flags);
+ /* reset the ieee1588 h/w */
+ pch_reset(chip);
+
+ iowrite32(DEFAULT_ADDEND, &chip->regs->addend);
+ iowrite32(1, &chip->regs->trgt_lo);
+ iowrite32(0, &chip->regs->trgt_hi);
+ iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
+
+ pch_eth_enable_set(chip);
+
+ if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) {
+ if (pch_set_station_address(pch_param.station, pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Invalid station address parameter\n"
+ "Module loaded but station address not set correctly\n"
+ );
+ }
+ }
+ spin_unlock_irqrestore(&chip->register_lock, flags);
+ return 0;
+
+err_req_irq:
+ ptp_clock_unregister(chip->ptp_clock);
+err_ptp_clock_reg:
+ iounmap(chip->regs);
+ chip->regs = NULL;
+
+err_ioremap:
+ release_mem_region(chip->mem_base, chip->mem_size);
+
+err_req_mem_region:
+ chip->mem_base = 0;
+
+err_pci_start:
+ pci_disable_device(pdev);
+
+err_pci_en:
+ kfree(chip);
+ dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret);
+
+ return ret;
+}
+
+static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_PCH_1588
+ },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
+
+static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
+
+static struct pci_driver pch_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_ieee1588_pcidev_id,
+ .probe = pch_probe,
+ .remove = pch_remove,
+ .driver.pm = &pch_pm_ops,
+};
+
+static void __exit ptp_pch_exit(void)
+{
+ pci_unregister_driver(&pch_driver);
+}
+
+static s32 __init ptp_pch_init(void)
+{
+ s32 ret;
+
+ /* register the driver with the pci core */
+ ret = pci_register_driver(&pch_driver);
+
+ return ret;
+}
+
+module_init(ptp_pch_init);
+module_exit(ptp_pch_exit);
+
+module_param_string(station,
+ pch_param.station, sizeof(pch_param.station), 0444);
+MODULE_PARM_DESC(station,
+ "IEEE 1588 station address to use - colon separated hex values");
+
+MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
+MODULE_DESCRIPTION("PTP clock using the EG20T timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
new file mode 100644
index 000000000..d2cb95670
--- /dev/null
+++ b/drivers/ptp/ptp_private.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP 1588 clock support - private declarations for the core module.
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+#ifndef _PTP_PRIVATE_H_
+#define _PTP_PRIVATE_H_
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/posix-clock.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/time.h>
+
+#define PTP_MAX_TIMESTAMPS 128
+#define PTP_BUF_TIMESTAMPS 30
+
+struct timestamp_event_queue {
+ struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
+ int head;
+ int tail;
+ spinlock_t lock;
+};
+
+struct ptp_clock {
+ struct posix_clock clock;
+ struct device dev;
+ struct ptp_clock_info *info;
+ dev_t devid;
+ int index; /* index into clocks.map */
+ struct pps_device *pps_source;
+ long dialed_frequency; /* remembers the frequency adjustment */
+ struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
+ struct mutex tsevq_mux; /* one process at a time reading the fifo */
+ struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
+ wait_queue_head_t tsev_wq;
+ int defunct; /* tells readers to go away when clock is being removed */
+ struct device_attribute *pin_dev_attr;
+ struct attribute **pin_attr;
+ struct attribute_group pin_attr_group;
+ /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
+ const struct attribute_group *pin_attr_groups[2];
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work aux_work;
+};
+
+/*
+ * The function queue_cnt() is safe for readers to call without
+ * holding q->lock. Readers use this function to verify that the queue
+ * is nonempty before proceeding with a dequeue operation. The fact
+ * that a writer might concurrently increment the tail does not
+ * matter, since the queue remains nonempty nonetheless.
+ */
+static inline int queue_cnt(const struct timestamp_event_queue *q)
+{
+ /*
+ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
+ * ptp_read(), extts_fifo_show().
+ */
+ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+}
+
+/*
+ * see ptp_chardev.c
+ */
+
+/* caller must hold pincfg_mux */
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+
+long ptp_ioctl(struct posix_clock *pc,
+ unsigned int cmd, unsigned long arg);
+
+int ptp_open(struct posix_clock *pc, fmode_t fmode);
+
+ssize_t ptp_read(struct posix_clock *pc,
+ uint flags, char __user *buf, size_t cnt);
+
+__poll_t ptp_poll(struct posix_clock *pc,
+ struct file *fp, poll_table *wait);
+
+/*
+ * see ptp_sysfs.c
+ */
+
+extern const struct attribute_group *ptp_groups[];
+
+int ptp_populate_pin_groups(struct ptp_clock *ptp);
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
+
+#endif
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
new file mode 100644
index 000000000..8fa9772ac
--- /dev/null
+++ b/drivers/ptp/ptp_qoriq.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP 1588 clock for Freescale QorIQ 1588 timer
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <linux/fsl/ptp_qoriq.h>
+
+/*
+ * Register access functions
+ */
+
+/* Caller must hold ptp_qoriq->lock. */
+static u64 tmr_cnt_read(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u64 ns;
+ u32 lo, hi;
+
+ lo = ptp_qoriq->read(&regs->ctrl_regs->tmr_cnt_l);
+ hi = ptp_qoriq->read(&regs->ctrl_regs->tmr_cnt_h);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ return ns;
+}
+
+/* Caller must hold ptp_qoriq->lock. */
+static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 hi = ns >> 32;
+ u32 lo = ns & 0xffffffff;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_l, lo);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_h, hi);
+}
+
+/* Caller must hold ptp_qoriq->lock. */
+static void set_alarm(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u64 ns;
+ u32 lo, hi;
+
+ ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL;
+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
+ ns -= ptp_qoriq->tclk_period;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm1_l, lo);
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm1_h, hi);
+}
+
+/* Caller must hold ptp_qoriq->lock. */
+static void set_fipers(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+
+ set_alarm(ptp_qoriq);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
+}
+
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ struct ptp_clock_event event;
+ void __iomem *reg_etts_l;
+ void __iomem *reg_etts_h;
+ u32 valid, lo, hi;
+
+ switch (index) {
+ case 0:
+ valid = ETS1_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts1_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts1_h;
+ break;
+ case 1:
+ valid = ETS2_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts2_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts2_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = index;
+
+ if (ptp_qoriq->extts_fifo_support)
+ if (!(ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid))
+ return 0;
+
+ do {
+ lo = ptp_qoriq->read(reg_etts_l);
+ hi = ptp_qoriq->read(reg_etts_h);
+
+ if (update_event) {
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ if (!ptp_qoriq->extts_fifo_support)
+ break;
+ } while (ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(extts_clean_up);
+
+/*
+ * Interrupt service routine
+ */
+
+irqreturn_t ptp_qoriq_isr(int irq, void *priv)
+{
+ struct ptp_qoriq *ptp_qoriq = priv;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ struct ptp_clock_event event;
+ u32 ack = 0, mask, val, irqs;
+
+ spin_lock(&ptp_qoriq->lock);
+
+ val = ptp_qoriq->read(&regs->ctrl_regs->tmr_tevent);
+ mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
+
+ spin_unlock(&ptp_qoriq->lock);
+
+ irqs = val & mask;
+
+ if (irqs & ETS1) {
+ ack |= ETS1;
+ extts_clean_up(ptp_qoriq, 0, true);
+ }
+
+ if (irqs & ETS2) {
+ ack |= ETS2;
+ extts_clean_up(ptp_qoriq, 1, true);
+ }
+
+ if (irqs & PP1) {
+ ack |= PP1;
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ if (ack) {
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_tevent, ack);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_isr);
+
+/*
+ * PTP clock operations
+ */
+
+int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ u64 adj, diff;
+ u32 tmr_add;
+ int neg_adj = 0;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+ tmr_add = ptp_qoriq->tmr_add;
+ adj = tmr_add;
+
+ /*
+ * Calculate diff and round() to the nearest integer
+ *
+ * diff = adj * (ppb / 1000000000)
+ * = adj * scaled_ppm / 65536000000
+ */
+ diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
+ diff = DIV64_U64_ROUND_UP(diff, 2);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine);
+
+int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ now = tmr_cnt_read(ptp_qoriq);
+ now += delta;
+ tmr_cnt_write(ptp_qoriq, now);
+ set_fipers(ptp_qoriq);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_adjtime);
+
+int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ ns = tmr_cnt_read(ptp_qoriq);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_gettime);
+
+int ptp_qoriq_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ tmr_cnt_write(ptp_qoriq, ns);
+ set_fipers(ptp_qoriq);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_settime);
+
+int ptp_qoriq_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ unsigned long flags;
+ u32 bit, mask = 0;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = ETS1EN;
+ break;
+ case 1:
+ bit = ETS2EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+
+ break;
+ case PTP_CLK_REQ_PPS:
+ bit = PP1EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
+ if (on) {
+ mask |= bit;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_tevent, bit);
+ } else {
+ mask &= ~bit;
+ }
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, mask);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_enable);
+
+static const struct ptp_clock_info ptp_qoriq_caps = {
+ .owner = THIS_MODULE,
+ .name = "qoriq ptp clock",
+ .max_adj = 512000,
+ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = ptp_qoriq_enable,
+};
+
+/**
+ * ptp_qoriq_nominal_freq - calculate nominal frequency according to
+ * reference clock frequency
+ *
+ * @clk_src: reference clock frequency
+ *
+ * The nominal frequency is the desired clock frequency.
+ * It should be less than the reference clock frequency.
+ * It should be a factor of 1000MHz.
+ *
+ * Return the nominal frequency
+ */
+static u32 ptp_qoriq_nominal_freq(u32 clk_src)
+{
+ u32 remainder = 0;
+
+ clk_src /= 1000000;
+ remainder = clk_src % 100;
+ if (remainder) {
+ clk_src -= remainder;
+ clk_src += 100;
+ }
+
+ do {
+ clk_src -= 100;
+
+ } while (1000 % clk_src);
+
+ return clk_src * 1000000;
+}
+
+/**
+ * ptp_qoriq_auto_config - calculate a set of default configurations
+ *
+ * @ptp_qoriq: pointer to ptp_qoriq
+ * @node: pointer to device_node
+ *
+ * If below dts properties are not provided, this function will be
+ * called to calculate a set of default configurations for them.
+ * "fsl,tclk-period"
+ * "fsl,tmr-prsc"
+ * "fsl,tmr-add"
+ * "fsl,tmr-fiper1"
+ * "fsl,tmr-fiper2"
+ * "fsl,tmr-fiper3" (required only for DPAA2 and ENETC hardware)
+ * "fsl,max-adj"
+ *
+ * Return 0 if success
+ */
+static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
+ struct device_node *node)
+{
+ struct clk *clk;
+ u64 freq_comp;
+ u64 max_adj;
+ u32 nominal_freq;
+ u32 remainder = 0;
+ u32 clk_src = 0;
+
+ ptp_qoriq->cksel = DEFAULT_CKSEL;
+
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ clk_src = clk_get_rate(clk);
+ clk_put(clk);
+ }
+
+ if (clk_src <= 100000000UL) {
+ pr_err("error reference clock value, or lower than 100MHz\n");
+ return -EINVAL;
+ }
+
+ nominal_freq = ptp_qoriq_nominal_freq(clk_src);
+ if (!nominal_freq)
+ return -EINVAL;
+
+ ptp_qoriq->tclk_period = 1000000000UL / nominal_freq;
+ ptp_qoriq->tmr_prsc = DEFAULT_TMR_PRSC;
+
+ /* Calculate initial frequency compensation value for TMR_ADD register.
+ * freq_comp = ceil(2^32 / freq_ratio)
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ freq_comp = ((u64)1 << 32) * nominal_freq;
+ freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
+ if (remainder)
+ freq_comp++;
+
+ ptp_qoriq->tmr_add = freq_comp;
+ ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper3 = DEFAULT_FIPER3_PERIOD - ptp_qoriq->tclk_period;
+
+ /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ max_adj = 1000000000ULL * (clk_src - nominal_freq);
+ max_adj = div_u64(max_adj, nominal_freq) - 1;
+ ptp_qoriq->caps.max_adj = max_adj;
+
+ return 0;
+}
+
+int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ const struct ptp_clock_info *caps)
+{
+ struct device_node *node = ptp_qoriq->dev->of_node;
+ struct ptp_qoriq_registers *regs;
+ struct timespec64 now;
+ unsigned long flags;
+ u32 tmr_ctrl;
+
+ if (!node)
+ return -ENODEV;
+
+ ptp_qoriq->base = base;
+ ptp_qoriq->caps = *caps;
+
+ if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
+ ptp_qoriq->cksel = DEFAULT_CKSEL;
+
+ if (of_property_read_bool(node, "fsl,extts-fifo"))
+ ptp_qoriq->extts_fifo_support = true;
+ else
+ ptp_qoriq->extts_fifo_support = false;
+
+ if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
+ of_device_is_compatible(node, "fsl,enetc-ptp"))
+ ptp_qoriq->fiper3_support = true;
+
+ if (of_property_read_u32(node,
+ "fsl,tclk-period", &ptp_qoriq->tclk_period) ||
+ of_property_read_u32(node,
+ "fsl,tmr-prsc", &ptp_qoriq->tmr_prsc) ||
+ of_property_read_u32(node,
+ "fsl,tmr-add", &ptp_qoriq->tmr_add) ||
+ of_property_read_u32(node,
+ "fsl,tmr-fiper1", &ptp_qoriq->tmr_fiper1) ||
+ of_property_read_u32(node,
+ "fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
+ of_property_read_u32(node,
+ "fsl,max-adj", &ptp_qoriq->caps.max_adj) ||
+ (ptp_qoriq->fiper3_support &&
+ of_property_read_u32(node, "fsl,tmr-fiper3",
+ &ptp_qoriq->tmr_fiper3))) {
+ pr_warn("device tree node missing required elements, try automatic configuration\n");
+
+ if (ptp_qoriq_auto_config(ptp_qoriq, node))
+ return -ENODEV;
+ }
+
+ if (of_property_read_bool(node, "little-endian")) {
+ ptp_qoriq->read = qoriq_read_le;
+ ptp_qoriq->write = qoriq_write_le;
+ } else {
+ ptp_qoriq->read = qoriq_read_be;
+ ptp_qoriq->write = qoriq_write_be;
+ }
+
+ /* The eTSEC uses differnt memory map with DPAA/ENETC */
+ if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
+ ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET;
+ ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET;
+ ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET;
+ ptp_qoriq->regs.etts_regs = base + ETSEC_ETTS_REGS_OFFSET;
+ } else {
+ ptp_qoriq->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
+ ptp_qoriq->regs.alarm_regs = base + ALARM_REGS_OFFSET;
+ ptp_qoriq->regs.fiper_regs = base + FIPER_REGS_OFFSET;
+ ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
+ spin_lock_init(&ptp_qoriq->lock);
+
+ ktime_get_real_ts64(&now);
+ ptp_qoriq_settime(&ptp_qoriq->caps, &now);
+
+ tmr_ctrl =
+ (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ regs = &ptp_qoriq->regs;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_add, ptp_qoriq->tmr_add);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
+
+ set_alarm(ptp_qoriq);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl,
+ tmr_ctrl|FIPERST|RTPE|TE|FRD);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+
+ ptp_qoriq->clock = ptp_clock_register(&ptp_qoriq->caps, ptp_qoriq->dev);
+ if (IS_ERR(ptp_qoriq->clock))
+ return PTR_ERR(ptp_qoriq->clock);
+
+ ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock);
+ ptp_qoriq_create_debugfs(ptp_qoriq);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_init);
+
+void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, 0);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, 0);
+
+ ptp_qoriq_remove_debugfs(ptp_qoriq);
+ ptp_clock_unregister(ptp_qoriq->clock);
+ iounmap(ptp_qoriq->base);
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_free);
+
+static int ptp_qoriq_probe(struct platform_device *dev)
+{
+ struct ptp_qoriq *ptp_qoriq;
+ int err = -ENOMEM;
+ void __iomem *base;
+
+ ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq)
+ goto no_memory;
+
+ ptp_qoriq->dev = &dev->dev;
+
+ err = -ENODEV;
+
+ ptp_qoriq->irq = platform_get_irq(dev, 0);
+ if (ptp_qoriq->irq < 0) {
+ pr_err("irq not in device tree\n");
+ goto no_node;
+ }
+ if (request_irq(ptp_qoriq->irq, ptp_qoriq_isr, IRQF_SHARED,
+ DRIVER, ptp_qoriq)) {
+ pr_err("request_irq failed\n");
+ goto no_node;
+ }
+
+ ptp_qoriq->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!ptp_qoriq->rsrc) {
+ pr_err("no resource\n");
+ goto no_resource;
+ }
+ if (request_resource(&iomem_resource, ptp_qoriq->rsrc)) {
+ pr_err("resource busy\n");
+ goto no_resource;
+ }
+
+ base = ioremap(ptp_qoriq->rsrc->start,
+ resource_size(ptp_qoriq->rsrc));
+ if (!base) {
+ pr_err("ioremap ptp registers failed\n");
+ goto no_ioremap;
+ }
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps);
+ if (err)
+ goto no_clock;
+
+ platform_set_drvdata(dev, ptp_qoriq);
+ return 0;
+
+no_clock:
+ iounmap(base);
+no_ioremap:
+ release_resource(ptp_qoriq->rsrc);
+no_resource:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+no_node:
+ kfree(ptp_qoriq);
+no_memory:
+ return err;
+}
+
+static int ptp_qoriq_remove(struct platform_device *dev)
+{
+ struct ptp_qoriq *ptp_qoriq = platform_get_drvdata(dev);
+
+ ptp_qoriq_free(ptp_qoriq);
+ release_resource(ptp_qoriq->rsrc);
+ kfree(ptp_qoriq);
+ return 0;
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "fsl,etsec-ptp" },
+ { .compatible = "fsl,fman-ptp-timer" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, match_table);
+
+static struct platform_driver ptp_qoriq_driver = {
+ .driver = {
+ .name = "ptp_qoriq",
+ .of_match_table = match_table,
+ },
+ .probe = ptp_qoriq_probe,
+ .remove = ptp_qoriq_remove,
+};
+
+module_platform_driver(ptp_qoriq_driver);
+
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c
new file mode 100644
index 000000000..e8dddcedf
--- /dev/null
+++ b/drivers/ptp/ptp_qoriq_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2019 NXP
+ */
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP1L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP1L;
+ else
+ ctrl |= PP1L;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
+ ptp_qoriq_fiper1_lpbk_set, "%llu\n");
+
+static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP2L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP2L;
+ else
+ ctrl |= PP2L;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
+ ptp_qoriq_fiper2_lpbk_set, "%llu\n");
+
+void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL);
+ if (IS_ERR(root))
+ return;
+ if (!root)
+ goto err_root;
+
+ ptp_qoriq->debugfs_root = root;
+
+ if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
+ ptp_qoriq, &ptp_qoriq_fiper1_fops))
+ goto err_node;
+ if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
+ ptp_qoriq, &ptp_qoriq_fiper2_fops))
+ goto err_node;
+ return;
+
+err_node:
+ debugfs_remove_recursive(root);
+ ptp_qoriq->debugfs_root = NULL;
+err_root:
+ dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n");
+}
+
+void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
+{
+ debugfs_remove_recursive(ptp_qoriq->debugfs_root);
+ ptp_qoriq->debugfs_root = NULL;
+}
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
new file mode 100644
index 000000000..8d52815e0
--- /dev/null
+++ b/drivers/ptp/ptp_sysfs.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP 1588 clock support - sysfs interface.
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+#include <linux/capability.h>
+#include <linux/slab.h>
+
+#include "ptp_private.h"
+
+static ssize_t clock_name_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ return sysfs_emit(page, "%s\n", ptp->info->name);
+}
+static DEVICE_ATTR_RO(clock_name);
+
+#define PTP_SHOW_INT(name, var) \
+static ssize_t var##_show(struct device *dev, \
+ struct device_attribute *attr, char *page) \
+{ \
+ struct ptp_clock *ptp = dev_get_drvdata(dev); \
+ return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+} \
+static DEVICE_ATTR(name, 0444, var##_show, NULL);
+
+PTP_SHOW_INT(max_adjustment, max_adj);
+PTP_SHOW_INT(n_alarms, n_alarm);
+PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
+PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+PTP_SHOW_INT(n_programmable_pins, n_pins);
+PTP_SHOW_INT(pps_available, pps);
+
+static ssize_t extts_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
+ int cnt, enable;
+ int err = -EINVAL;
+
+ cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
+ if (cnt != 2)
+ goto out;
+ if (req.extts.index >= ops->n_ext_ts)
+ goto out;
+
+ err = ops->enable(ops, &req, enable ? 1 : 0);
+ if (err)
+ goto out;
+
+ return count;
+out:
+ return err;
+}
+static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
+
+static ssize_t extts_fifo_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct timestamp_event_queue *queue = &ptp->tsevq;
+ struct ptp_extts_event event;
+ unsigned long flags;
+ size_t qcnt;
+ int cnt = 0;
+
+ memset(&event, 0, sizeof(event));
+
+ if (mutex_lock_interruptible(&ptp->tsevq_mux))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ qcnt = queue_cnt(queue);
+ if (qcnt) {
+ event = queue->buf[queue->head];
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ if (!qcnt)
+ goto out;
+
+ cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
+ event.index, event.t.sec, event.t.nsec);
+out:
+ mutex_unlock(&ptp->tsevq_mux);
+ return cnt;
+}
+static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
+
+static ssize_t period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
+ int cnt, enable, err = -EINVAL;
+
+ cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
+ &req.perout.start.sec, &req.perout.start.nsec,
+ &req.perout.period.sec, &req.perout.period.nsec);
+ if (cnt != 5)
+ goto out;
+ if (req.perout.index >= ops->n_per_out)
+ goto out;
+
+ enable = req.perout.period.sec || req.perout.period.nsec;
+ err = ops->enable(ops, &req, enable);
+ if (err)
+ goto out;
+
+ return count;
+out:
+ return err;
+}
+static DEVICE_ATTR(period, 0220, NULL, period_store);
+
+static ssize_t pps_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
+ int cnt, enable;
+ int err = -EINVAL;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt != 1)
+ goto out;
+
+ err = ops->enable(ops, &req, enable ? 1 : 0);
+ if (err)
+ goto out;
+
+ return count;
+out:
+ return err;
+}
+static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
+
+static struct attribute *ptp_attrs[] = {
+ &dev_attr_clock_name.attr,
+
+ &dev_attr_max_adjustment.attr,
+ &dev_attr_n_alarms.attr,
+ &dev_attr_n_external_timestamps.attr,
+ &dev_attr_n_periodic_outputs.attr,
+ &dev_attr_n_programmable_pins.attr,
+ &dev_attr_pps_available.attr,
+
+ &dev_attr_extts_enable.attr,
+ &dev_attr_fifo.attr,
+ &dev_attr_period.attr,
+ &dev_attr_pps_enable.attr,
+ NULL
+};
+
+static umode_t ptp_is_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *info = ptp->info;
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_extts_enable.attr ||
+ attr == &dev_attr_fifo.attr) {
+ if (!info->n_ext_ts)
+ mode = 0;
+ } else if (attr == &dev_attr_period.attr) {
+ if (!info->n_per_out)
+ mode = 0;
+ } else if (attr == &dev_attr_pps_enable.attr) {
+ if (!info->pps)
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static const struct attribute_group ptp_group = {
+ .is_visible = ptp_is_attribute_visible,
+ .attrs = ptp_attrs,
+};
+
+const struct attribute_group *ptp_groups[] = {
+ &ptp_group,
+ NULL
+};
+
+static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
+{
+ int i;
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (!strcmp(ptp->info->pin_config[i].name, name))
+ return i;
+ }
+ return -1;
+}
+
+static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int index;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+
+ func = ptp->info->pin_config[index].func;
+ chan = ptp->info->pin_config[index].chan;
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return sysfs_emit(page, "%u %u\n", func, chan);
+}
+
+static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int cnt, err, index;
+
+ cnt = sscanf(buf, "%u %u", &func, &chan);
+ if (cnt != 2)
+ return -EINVAL;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, index, func, chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ if (err)
+ return err;
+
+ return count;
+}
+
+int ptp_populate_pin_groups(struct ptp_clock *ptp)
+{
+ struct ptp_clock_info *info = ptp->info;
+ int err = -ENOMEM, i, n_pins = info->n_pins;
+
+ if (!n_pins)
+ return 0;
+
+ ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr),
+ GFP_KERNEL);
+ if (!ptp->pin_dev_attr)
+ goto no_dev_attr;
+
+ ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL);
+ if (!ptp->pin_attr)
+ goto no_pin_attr;
+
+ for (i = 0; i < n_pins; i++) {
+ struct device_attribute *da = &ptp->pin_dev_attr[i];
+ sysfs_attr_init(&da->attr);
+ da->attr.name = info->pin_config[i].name;
+ da->attr.mode = 0644;
+ da->show = ptp_pin_show;
+ da->store = ptp_pin_store;
+ ptp->pin_attr[i] = &da->attr;
+ }
+
+ ptp->pin_attr_group.name = "pins";
+ ptp->pin_attr_group.attrs = ptp->pin_attr;
+
+ ptp->pin_attr_groups[0] = &ptp->pin_attr_group;
+
+ return 0;
+
+no_pin_attr:
+ kfree(ptp->pin_dev_attr);
+no_dev_attr:
+ return err;
+}
+
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp)
+{
+ kfree(ptp->pin_attr);
+ kfree(ptp->pin_dev_attr);
+}
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
new file mode 100644
index 000000000..5dca26e14
--- /dev/null
+++ b/drivers/ptp/ptp_vmw.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (C) 2020 VMware, Inc., Palo Alto, CA., USA
+ *
+ * PTP clock driver for VMware precision clock virtual device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/hypervisor.h>
+#include <asm/vmware.h>
+
+#define VMWARE_MAGIC 0x564D5868
+#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
+#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
+
+static struct acpi_device *ptp_vmw_acpi_device;
+static struct ptp_clock *ptp_vmw_clock;
+
+
+static int ptp_vmw_pclk_read(u64 *ns)
+{
+ u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
+
+ asm volatile (VMWARE_HYPERCALL :
+ "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
+ "=S"(unused2), "=D"(unused3) :
+ "a"(VMWARE_MAGIC), "b"(0),
+ "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
+ "memory");
+
+ if (ret == 0)
+ *ns = ((u64)nsec_hi << 32) | nsec_lo;
+ return ret;
+}
+
+/*
+ * PTP clock ops.
+ */
+
+static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ u64 ns;
+
+ if (ptp_vmw_pclk_read(&ns) != 0)
+ return -EIO;
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int ptp_vmw_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_vmw_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp_vmw",
+ .max_adj = 0,
+ .adjtime = ptp_vmw_adjtime,
+ .adjfreq = ptp_vmw_adjfreq,
+ .gettime64 = ptp_vmw_gettime,
+ .settime64 = ptp_vmw_settime,
+ .enable = ptp_vmw_enable,
+};
+
+/*
+ * ACPI driver ops for VMware "precision clock" virtual device.
+ */
+
+static int ptp_vmw_acpi_add(struct acpi_device *device)
+{
+ ptp_vmw_clock = ptp_clock_register(&ptp_vmw_clock_info, NULL);
+ if (IS_ERR(ptp_vmw_clock)) {
+ pr_err("failed to register ptp clock\n");
+ return PTR_ERR(ptp_vmw_clock);
+ }
+
+ ptp_vmw_acpi_device = device;
+ return 0;
+}
+
+static int ptp_vmw_acpi_remove(struct acpi_device *device)
+{
+ ptp_clock_unregister(ptp_vmw_clock);
+ return 0;
+}
+
+static const struct acpi_device_id ptp_vmw_acpi_device_ids[] = {
+ { "VMW0005", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(acpi, ptp_vmw_acpi_device_ids);
+
+static struct acpi_driver ptp_vmw_acpi_driver = {
+ .name = "ptp_vmw",
+ .ids = ptp_vmw_acpi_device_ids,
+ .ops = {
+ .add = ptp_vmw_acpi_add,
+ .remove = ptp_vmw_acpi_remove
+ },
+ .owner = THIS_MODULE
+};
+
+static int __init ptp_vmw_init(void)
+{
+ if (x86_hyper_type != X86_HYPER_VMWARE)
+ return -1;
+ return acpi_bus_register_driver(&ptp_vmw_acpi_driver);
+}
+
+static void __exit ptp_vmw_exit(void)
+{
+ acpi_bus_unregister_driver(&ptp_vmw_acpi_driver);
+}
+
+module_init(ptp_vmw_init);
+module_exit(ptp_vmw_exit);
+
+MODULE_DESCRIPTION("VMware virtual PTP clock driver");
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");