summaryrefslogtreecommitdiffstats
path: root/drivers/hte
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hte')
-rw-r--r--drivers/hte/Kconfig33
-rw-r--r--drivers/hte/Makefile3
-rw-r--r--drivers/hte/hte-tegra194-test.c240
-rw-r--r--drivers/hte/hte-tegra194.c870
-rw-r--r--drivers/hte/hte.c947
5 files changed, 2093 insertions, 0 deletions
diff --git a/drivers/hte/Kconfig b/drivers/hte/Kconfig
new file mode 100644
index 0000000000..cf29e0218b
--- /dev/null
+++ b/drivers/hte/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig HTE
+ bool "Hardware Timestamping Engine (HTE) Support"
+ help
+ Hardware Timestamping Engine (HTE) Support.
+
+ Some devices provide a hardware timestamping engine which can
+ timestamp certain device lines/signals in realtime. It comes with a
+ benefit for the applications needing accurate timestamping event with
+ less jitter. This framework provides a generic interface to such HTE
+ providers and consumer devices.
+
+ If unsure, say no.
+
+if HTE
+
+config HTE_TEGRA194
+ tristate "NVIDIA Tegra194 HTE Support"
+ depends on ARCH_TEGRA_194_SOC
+ help
+ Enable this option for integrated hardware timestamping engine also
+ known as generic timestamping engine (GTE) support on NVIDIA Tegra194
+ systems-on-chip. The driver supports 352 LIC IRQs and 39 AON GPIOs
+ lines for timestamping in realtime.
+
+config HTE_TEGRA194_TEST
+ tristate "NVIDIA Tegra194 HTE Test"
+ depends on HTE_TEGRA194
+ help
+ The NVIDIA Tegra194 GTE test driver demonstrates how to use HTE
+ framework to timestamp GPIO and LIC IRQ lines.
+
+endif
diff --git a/drivers/hte/Makefile b/drivers/hte/Makefile
new file mode 100644
index 0000000000..8cca124849
--- /dev/null
+++ b/drivers/hte/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HTE) += hte.o
+obj-$(CONFIG_HTE_TEGRA194) += hte-tegra194.o
+obj-$(CONFIG_HTE_TEGRA194_TEST) += hte-tegra194-test.o
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
new file mode 100644
index 0000000000..ab2edff018
--- /dev/null
+++ b/drivers/hte/hte-tegra194-test.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hte.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+/*
+ * This sample HTE test driver demonstrates HTE API usage by enabling
+ * hardware timestamp on gpio_in and specified LIC IRQ lines.
+ *
+ * Note: gpio_out and gpio_in need to be shorted externally in order for this
+ * test driver to work for the GPIO monitoring. The test driver has been
+ * tested on Jetson AGX Xavier platform by shorting pin 32 and 16 on 40 pin
+ * header.
+ *
+ * Device tree snippet to activate this driver:
+ * tegra_hte_test {
+ * compatible = "nvidia,tegra194-hte-test";
+ * in-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 1)>;
+ * out-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 0)>;
+ * timestamps = <&tegra_hte_aon TEGRA194_AON_GPIO(BB, 1)>,
+ * <&tegra_hte_lic 0x19>;
+ * timestamp-names = "hte-gpio", "hte-i2c-irq";
+ * status = "okay";
+ * };
+ *
+ * How to run test driver:
+ * - Load test driver.
+ * - For the GPIO, at regular interval gpio_out pin toggles triggering
+ * HTE for rising edge on gpio_in pin.
+ *
+ * - For the LIC IRQ line, it uses 0x19 interrupt which is i2c controller 1.
+ * - Run i2cdetect -y 1 1>/dev/null, this command will generate i2c bus
+ * transactions which creates timestamp data.
+ * - It prints below message for both the lines.
+ * HW timestamp(<line id>:<ts seq number>): <timestamp>, edge: <edge>.
+ * - Unloading the driver disables and deallocate the HTE.
+ */
+
+static struct tegra_hte_test {
+ int gpio_in_irq;
+ struct device *pdev;
+ struct gpio_desc *gpio_in;
+ struct gpio_desc *gpio_out;
+ struct hte_ts_desc *desc;
+ struct timer_list timer;
+ struct kobject *kobj;
+} hte;
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+ char *edge;
+ struct hte_ts_desc *desc = p;
+
+ if (!ts || !p)
+ return HTE_CB_HANDLED;
+
+ if (ts->raw_level < 0)
+ edge = "Unknown";
+
+ pr_info("HW timestamp(%u: %llu): %llu, edge: %s\n",
+ desc->attr.line_id, ts->seq, ts->tsc,
+ (ts->raw_level >= 0) ? ((ts->raw_level == 0) ?
+ "falling" : "rising") : edge);
+
+ return HTE_CB_HANDLED;
+}
+
+static void gpio_timer_cb(struct timer_list *t)
+{
+ (void)t;
+
+ gpiod_set_value(hte.gpio_out, !gpiod_get_value(hte.gpio_out));
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(8000));
+}
+
+static irqreturn_t tegra_hte_test_gpio_isr(int irq, void *data)
+{
+ (void)irq;
+ (void)data;
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id tegra_hte_test_of_match[] = {
+ { .compatible = "nvidia,tegra194-hte-test"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_test_of_match);
+
+static int tegra_hte_test_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i, cnt;
+
+ dev_set_drvdata(&pdev->dev, &hte);
+ hte.pdev = &pdev->dev;
+
+ hte.gpio_out = gpiod_get(&pdev->dev, "out", 0);
+ if (IS_ERR(hte.gpio_out)) {
+ dev_err(&pdev->dev, "failed to get gpio out\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hte.gpio_in = gpiod_get(&pdev->dev, "in", 0);
+ if (IS_ERR(hte.gpio_in)) {
+ dev_err(&pdev->dev, "failed to get gpio in\n");
+ ret = -EINVAL;
+ goto free_gpio_out;
+ }
+
+ ret = gpiod_direction_output(hte.gpio_out, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set output\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_direction_input(hte.gpio_in);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set input\n");
+ ret = -EINVAL;
+ goto free_gpio_in;
+ }
+
+ ret = gpiod_to_irq(hte.gpio_in);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+ ret = -ENXIO;
+ goto free_gpio_in;
+ }
+
+ hte.gpio_in_irq = ret;
+ ret = request_irq(ret, tegra_hte_test_gpio_isr,
+ IRQF_TRIGGER_RISING,
+ "tegra_hte_gpio_test_isr", &hte);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire IRQ\n");
+ ret = -ENXIO;
+ goto free_irq;
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+ if (cnt < 0) {
+ ret = cnt;
+ goto free_irq;
+ }
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+ hte.desc = devm_kzalloc(hte.pdev, sizeof(*hte.desc) * cnt, GFP_KERNEL);
+ if (!hte.desc) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (i == 0)
+ /*
+ * GPIO hte init, line_id and name will be parsed from
+ * the device tree node. The edge_flag is implicitly
+ * set by request_irq call. Only line_data is needed to be
+ * set.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL,
+ hte.gpio_in);
+ else
+ /*
+ * same comment as above except that IRQ does not need
+ * line data.
+ */
+ hte_init_line_attr(&hte.desc[i], 0, 0, NULL, NULL);
+
+ ret = hte_ts_get(hte.pdev, &hte.desc[i], i);
+ if (ret)
+ goto ts_put;
+
+ ret = devm_hte_request_ts_ns(hte.pdev, &hte.desc[i],
+ process_hw_ts, NULL,
+ &hte.desc[i]);
+ if (ret) /* no need to ts_put, request API takes care */
+ goto free_irq;
+ }
+
+ timer_setup(&hte.timer, gpio_timer_cb, 0);
+ mod_timer(&hte.timer, jiffies + msecs_to_jiffies(5000));
+
+ return 0;
+
+ts_put:
+ cnt = i;
+ for (i = 0; i < cnt; i++)
+ hte_ts_put(&hte.desc[i]);
+free_irq:
+ free_irq(hte.gpio_in_irq, &hte);
+free_gpio_in:
+ gpiod_put(hte.gpio_in);
+free_gpio_out:
+ gpiod_put(hte.gpio_out);
+out:
+
+ return ret;
+}
+
+static int tegra_hte_test_remove(struct platform_device *pdev)
+{
+ (void)pdev;
+
+ free_irq(hte.gpio_in_irq, &hte);
+ gpiod_put(hte.gpio_in);
+ gpiod_put(hte.gpio_out);
+ del_timer_sync(&hte.timer);
+
+ return 0;
+}
+
+static struct platform_driver tegra_hte_test_driver = {
+ .probe = tegra_hte_test_probe,
+ .remove = tegra_hte_test_remove,
+ .driver = {
+ .name = "tegra_hte_test",
+ .of_match_table = tegra_hte_test_of_match,
+ },
+};
+module_platform_driver(tegra_hte_test_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
new file mode 100644
index 0000000000..6fe6897047
--- /dev/null
+++ b/drivers/hte/hte-tegra194.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/hte.h>
+#include <linux/uaccess.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#define HTE_SUSPEND 0
+
+/* HTE source clock TSC is 31.25MHz */
+#define HTE_TS_CLK_RATE_HZ 31250000ULL
+#define HTE_CLK_RATE_NS 32
+#define HTE_TS_NS_SHIFT __builtin_ctz(HTE_CLK_RATE_NS)
+
+#define NV_AON_SLICE_INVALID -1
+#define NV_LINES_IN_SLICE 32
+
+/* AON HTE line map For slice 1 */
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_28 12
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_29 13
+
+/* AON HTE line map For slice 2 */
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_0 0
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_1 1
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_2 2
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_3 3
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_4 4
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_5 5
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_6 6
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_7 7
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_8 8
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_9 9
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_10 10
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_11 11
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_12 12
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_13 13
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_14 14
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_15 15
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_16 16
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_17 17
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_18 18
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_19 19
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_20 20
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_21 21
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_22 22
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_23 23
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_24 24
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_25 25
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_26 26
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_27 27
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_28 28
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_29 29
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_30 30
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_31 31
+
+#define HTE_TECTRL 0x0
+#define HTE_TETSCH 0x4
+#define HTE_TETSCL 0x8
+#define HTE_TESRC 0xC
+#define HTE_TECCV 0x10
+#define HTE_TEPCV 0x14
+#define HTE_TECMD 0x1C
+#define HTE_TESTATUS 0x20
+#define HTE_SLICE0_TETEN 0x40
+#define HTE_SLICE1_TETEN 0x60
+
+#define HTE_SLICE_SIZE (HTE_SLICE1_TETEN - HTE_SLICE0_TETEN)
+
+#define HTE_TECTRL_ENABLE_ENABLE 0x1
+
+#define HTE_TECTRL_OCCU_SHIFT 0x8
+#define HTE_TECTRL_INTR_SHIFT 0x1
+#define HTE_TECTRL_INTR_ENABLE 0x1
+
+#define HTE_TESRC_SLICE_SHIFT 16
+#define HTE_TESRC_SLICE_DEFAULT_MASK 0xFF
+
+#define HTE_TECMD_CMD_POP 0x1
+
+#define HTE_TESTATUS_OCCUPANCY_SHIFT 8
+#define HTE_TESTATUS_OCCUPANCY_MASK 0xFF
+
+enum tegra_hte_type {
+ HTE_TEGRA_TYPE_GPIO = 1U << 0,
+ HTE_TEGRA_TYPE_LIC = 1U << 1,
+};
+
+struct hte_slices {
+ u32 r_val;
+ unsigned long flags;
+ /* to prevent lines mapped to same slice updating its register */
+ spinlock_t s_lock;
+};
+
+struct tegra_hte_line_mapped {
+ int slice;
+ u32 bit_index;
+};
+
+struct tegra_hte_line_data {
+ unsigned long flags;
+ void *data;
+};
+
+struct tegra_hte_data {
+ enum tegra_hte_type type;
+ u32 slices;
+ u32 map_sz;
+ u32 sec_map_sz;
+ const struct tegra_hte_line_mapped *map;
+ const struct tegra_hte_line_mapped *sec_map;
+};
+
+struct tegra_hte_soc {
+ int hte_irq;
+ u32 itr_thrshld;
+ u32 conf_rval;
+ struct hte_slices *sl;
+ const struct tegra_hte_data *prov_data;
+ struct tegra_hte_line_data *line_data;
+ struct hte_chip *chip;
+ struct gpio_chip *c;
+ void __iomem *regs;
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ /* CC port */
+ [12] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [13] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [14] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [15] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ /* EE port */
+ [23] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [24] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [27] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [28] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [29] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_sec_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ [12] = {NV_AON_SLICE_INVALID, 0},
+ [13] = {NV_AON_SLICE_INVALID, 0},
+ [14] = {NV_AON_SLICE_INVALID, 0},
+ [15] = {NV_AON_SLICE_INVALID, 0},
+ /* CC port */
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ [27] = {NV_AON_SLICE_INVALID, 0},
+ [28] = {NV_AON_SLICE_INVALID, 0},
+ [29] = {NV_AON_SLICE_INVALID, 0},
+ [30] = {NV_AON_SLICE_INVALID, 0},
+ [31] = {NV_AON_SLICE_INVALID, 0},
+ /* EE port */
+ [32] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+ [33] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+ [34] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [35] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [36] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [37] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ [38] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+ [39] = {NV_AON_SLICE_INVALID, 0},
+};
+
+static const struct tegra_hte_line_mapped tegra234_aon_gpio_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ /* CC port */
+ [12] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [13] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [14] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [15] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ /* EE port */
+ [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_31},
+ [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_30},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_29},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_28},
+ [27] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [28] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [29] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [30] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ /* GG port */
+ [31] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_line_mapped tegra234_aon_gpio_sec_map[] = {
+ /* gpio, slice, bit_index */
+ /* AA port */
+ [0] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+ [1] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+ [2] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+ [3] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+ [4] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+ [5] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+ [6] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+ [7] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+ /* BB port */
+ [8] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+ [9] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+ [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+ [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+ [12] = {NV_AON_SLICE_INVALID, 0},
+ [13] = {NV_AON_SLICE_INVALID, 0},
+ [14] = {NV_AON_SLICE_INVALID, 0},
+ [15] = {NV_AON_SLICE_INVALID, 0},
+ /* CC port */
+ [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+ [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+ [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+ [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+ [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+ [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+ [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+ [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+ /* DD port */
+ [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+ [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+ [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+ [27] = {NV_AON_SLICE_INVALID, 0},
+ [28] = {NV_AON_SLICE_INVALID, 0},
+ [29] = {NV_AON_SLICE_INVALID, 0},
+ [30] = {NV_AON_SLICE_INVALID, 0},
+ [31] = {NV_AON_SLICE_INVALID, 0},
+ /* EE port */
+ [32] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_31},
+ [33] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_30},
+ [34] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_29},
+ [35] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_28},
+ [36] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+ [37] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+ [38] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+ [39] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+ /* GG port */
+ [40] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_data t194_aon_hte = {
+ .map_sz = ARRAY_SIZE(tegra194_aon_gpio_map),
+ .map = tegra194_aon_gpio_map,
+ .sec_map_sz = ARRAY_SIZE(tegra194_aon_gpio_sec_map),
+ .sec_map = tegra194_aon_gpio_sec_map,
+ .type = HTE_TEGRA_TYPE_GPIO,
+ .slices = 3,
+};
+
+static const struct tegra_hte_data t234_aon_hte = {
+ .map_sz = ARRAY_SIZE(tegra234_aon_gpio_map),
+ .map = tegra234_aon_gpio_map,
+ .sec_map_sz = ARRAY_SIZE(tegra234_aon_gpio_sec_map),
+ .sec_map = tegra234_aon_gpio_sec_map,
+ .type = HTE_TEGRA_TYPE_GPIO,
+ .slices = 3,
+};
+
+static const struct tegra_hte_data t194_lic_hte = {
+ .map_sz = 0,
+ .map = NULL,
+ .type = HTE_TEGRA_TYPE_LIC,
+ .slices = 11,
+};
+
+static const struct tegra_hte_data t234_lic_hte = {
+ .map_sz = 0,
+ .map = NULL,
+ .type = HTE_TEGRA_TYPE_LIC,
+ .slices = 17,
+};
+
+static inline u32 tegra_hte_readl(struct tegra_hte_soc *hte, u32 reg)
+{
+ return readl(hte->regs + reg);
+}
+
+static inline void tegra_hte_writel(struct tegra_hte_soc *hte, u32 reg,
+ u32 val)
+{
+ writel(val, hte->regs + reg);
+}
+
+static int tegra_hte_map_to_line_id(u32 eid,
+ const struct tegra_hte_line_mapped *m,
+ u32 map_sz, u32 *mapped)
+{
+
+ if (m) {
+ if (eid >= map_sz)
+ return -EINVAL;
+ if (m[eid].slice == NV_AON_SLICE_INVALID)
+ return -EINVAL;
+
+ *mapped = (m[eid].slice << 5) + m[eid].bit_index;
+ } else {
+ *mapped = eid;
+ }
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ int ret = 0;
+ u32 line_id;
+ struct tegra_hte_soc *gs;
+ const struct tegra_hte_line_mapped *map = NULL;
+ u32 map_sz = 0;
+
+ if (!gc || !desc || !xlated_id)
+ return -EINVAL;
+
+ if (args) {
+ if (gc->of_hte_n_cells < 1)
+ return -EINVAL;
+
+ if (args->args_count != gc->of_hte_n_cells)
+ return -EINVAL;
+
+ desc->attr.line_id = args->args[0];
+ }
+
+ gs = gc->data;
+ if (!gs || !gs->prov_data)
+ return -EINVAL;
+
+ /*
+ *
+ * There are two paths GPIO consumers can take as follows:
+ * 1) The consumer (gpiolib-cdev for example) which uses GPIO global
+ * number which gets assigned run time.
+ * 2) The consumer passing GPIO from the DT which is assigned
+ * statically for example by using TEGRA194_AON_GPIO gpio DT binding.
+ *
+ * The code below addresses both the consumer use cases and maps into
+ * HTE/GTE namespace.
+ */
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && !args) {
+ line_id = desc->attr.line_id - gs->c->base;
+ map = gs->prov_data->map;
+ map_sz = gs->prov_data->map_sz;
+ } else if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && args) {
+ line_id = desc->attr.line_id;
+ map = gs->prov_data->sec_map;
+ map_sz = gs->prov_data->sec_map_sz;
+ } else {
+ line_id = desc->attr.line_id;
+ }
+
+ ret = tegra_hte_map_to_line_id(line_id, map, map_sz, xlated_id);
+ if (ret < 0) {
+ dev_err(gc->dev, "line_id:%u mapping failed\n",
+ desc->attr.line_id);
+ return ret;
+ }
+
+ if (*xlated_id > gc->nlines)
+ return -EINVAL;
+
+ dev_dbg(gc->dev, "requested id:%u, xlated id:%u\n",
+ desc->attr.line_id, *xlated_id);
+
+ return 0;
+}
+
+static int tegra_hte_line_xlate_plat(struct hte_chip *gc,
+ struct hte_ts_desc *desc, u32 *xlated_id)
+{
+ return tegra_hte_line_xlate(gc, NULL, desc, xlated_id);
+}
+
+static int tegra_hte_en_dis_common(struct hte_chip *chip, u32 line_id, bool en)
+{
+ u32 slice, sl_bit_shift, line_bit, val, reg;
+ struct tegra_hte_soc *gs;
+
+ sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ if (!chip)
+ return -EINVAL;
+
+ gs = chip->data;
+
+ if (line_id > chip->nlines) {
+ dev_err(chip->dev,
+ "line id: %u is not supported by this controller\n",
+ line_id);
+ return -EINVAL;
+ }
+
+ slice = line_id >> sl_bit_shift;
+ line_bit = line_id & (HTE_SLICE_SIZE - 1);
+ reg = (slice << sl_bit_shift) + HTE_SLICE0_TETEN;
+
+ spin_lock(&gs->sl[slice].s_lock);
+
+ if (test_bit(HTE_SUSPEND, &gs->sl[slice].flags)) {
+ spin_unlock(&gs->sl[slice].s_lock);
+ dev_dbg(chip->dev, "device suspended");
+ return -EBUSY;
+ }
+
+ val = tegra_hte_readl(gs, reg);
+ if (en)
+ val = val | (1 << line_bit);
+ else
+ val = val & (~(1 << line_bit));
+ tegra_hte_writel(gs, reg, val);
+
+ spin_unlock(&gs->sl[slice].s_lock);
+
+ dev_dbg(chip->dev, "line: %u, slice %u, line_bit %u, reg:0x%x\n",
+ line_id, slice, line_bit, reg);
+
+ return 0;
+}
+
+static int tegra_hte_enable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_disable(struct hte_chip *chip, u32 line_id)
+{
+ if (!chip)
+ return -EINVAL;
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_request(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ int ret;
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ if (!attr->line_data)
+ return -EINVAL;
+
+ ret = gpiod_enable_hw_timestamp_ns(attr->line_data,
+ attr->edge_flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = attr->line_data;
+ gs->line_data[line_id].flags = attr->edge_flags;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_release(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 line_id)
+{
+ struct tegra_hte_soc *gs;
+ struct hte_line_attr *attr;
+ int ret;
+
+ if (!chip || !chip->data || !desc)
+ return -EINVAL;
+
+ gs = chip->data;
+ attr = &desc->attr;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ ret = gpiod_disable_hw_timestamp_ns(attr->line_data,
+ gs->line_data[line_id].flags);
+ if (ret)
+ return ret;
+
+ gs->line_data[line_id].data = NULL;
+ gs->line_data[line_id].flags = 0;
+ }
+
+ return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_clk_src_info(struct hte_chip *chip,
+ struct hte_clk_info *ci)
+{
+ (void)chip;
+
+ if (!ci)
+ return -EINVAL;
+
+ ci->hz = HTE_TS_CLK_RATE_HZ;
+ ci->type = CLOCK_MONOTONIC;
+
+ return 0;
+}
+
+static int tegra_hte_get_level(struct tegra_hte_soc *gs, u32 line_id)
+{
+ struct gpio_desc *desc;
+
+ if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ desc = gs->line_data[line_id].data;
+ if (desc)
+ return gpiod_get_raw_value(desc);
+ }
+
+ return -1;
+}
+
+static void tegra_hte_read_fifo(struct tegra_hte_soc *gs)
+{
+ u32 tsh, tsl, src, pv, cv, acv, slice, bit_index, line_id;
+ u64 tsc;
+ struct hte_ts_data el;
+
+ while ((tegra_hte_readl(gs, HTE_TESTATUS) >>
+ HTE_TESTATUS_OCCUPANCY_SHIFT) &
+ HTE_TESTATUS_OCCUPANCY_MASK) {
+ tsh = tegra_hte_readl(gs, HTE_TETSCH);
+ tsl = tegra_hte_readl(gs, HTE_TETSCL);
+ tsc = (((u64)tsh << 32) | tsl);
+
+ src = tegra_hte_readl(gs, HTE_TESRC);
+ slice = (src >> HTE_TESRC_SLICE_SHIFT) &
+ HTE_TESRC_SLICE_DEFAULT_MASK;
+
+ pv = tegra_hte_readl(gs, HTE_TEPCV);
+ cv = tegra_hte_readl(gs, HTE_TECCV);
+ acv = pv ^ cv;
+ while (acv) {
+ bit_index = __builtin_ctz(acv);
+ line_id = bit_index + (slice << 5);
+ el.tsc = tsc << HTE_TS_NS_SHIFT;
+ el.raw_level = tegra_hte_get_level(gs, line_id);
+ hte_push_ts_ns(gs->chip, line_id, &el);
+ acv &= ~BIT(bit_index);
+ }
+ tegra_hte_writel(gs, HTE_TECMD, HTE_TECMD_CMD_POP);
+ }
+}
+
+static irqreturn_t tegra_hte_isr(int irq, void *dev_id)
+{
+ struct tegra_hte_soc *gs = dev_id;
+ (void)irq;
+
+ tegra_hte_read_fifo(gs);
+
+ return IRQ_HANDLED;
+}
+
+static bool tegra_hte_match_from_linedata(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc)
+{
+ struct tegra_hte_soc *hte_dev = chip->data;
+
+ if (!hte_dev || (hte_dev->prov_data->type != HTE_TEGRA_TYPE_GPIO))
+ return false;
+
+ return hte_dev->c == gpiod_to_chip(hdesc->attr.line_data);
+}
+
+static const struct of_device_id tegra_hte_of_match[] = {
+ { .compatible = "nvidia,tegra194-gte-lic", .data = &t194_lic_hte},
+ { .compatible = "nvidia,tegra194-gte-aon", .data = &t194_aon_hte},
+ { .compatible = "nvidia,tegra234-gte-lic", .data = &t234_lic_hte},
+ { .compatible = "nvidia,tegra234-gte-aon", .data = &t234_aon_hte},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_of_match);
+
+static const struct hte_ops g_ops = {
+ .request = tegra_hte_request,
+ .release = tegra_hte_release,
+ .enable = tegra_hte_enable,
+ .disable = tegra_hte_disable,
+ .get_clk_src_info = tegra_hte_clk_src_info,
+};
+
+static void tegra_gte_disable(void *data)
+{
+ struct platform_device *pdev = data;
+ struct tegra_hte_soc *gs = dev_get_drvdata(&pdev->dev);
+
+ tegra_hte_writel(gs, HTE_TECTRL, 0);
+}
+
+static int tegra_get_gpiochip_from_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
+static int tegra_gpiochip_match(struct gpio_chip *chip, void *data)
+{
+ return chip->fwnode == of_node_to_fwnode(data);
+}
+
+static int tegra_hte_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 i, slices, val = 0;
+ u32 nlines;
+ struct device *dev;
+ struct tegra_hte_soc *hte_dev;
+ struct hte_chip *gc;
+ struct device_node *gpio_ctrl;
+
+ dev = &pdev->dev;
+
+ hte_dev = devm_kzalloc(dev, sizeof(*hte_dev), GFP_KERNEL);
+ if (!hte_dev)
+ return -ENOMEM;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, hte_dev);
+ hte_dev->prov_data = of_device_get_match_data(&pdev->dev);
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,slices", &slices);
+ if (ret != 0)
+ slices = hte_dev->prov_data->slices;
+
+ dev_dbg(dev, "slices:%d\n", slices);
+ nlines = slices << 5;
+
+ hte_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hte_dev->regs))
+ return PTR_ERR(hte_dev->regs);
+
+ ret = of_property_read_u32(dev->of_node, "nvidia,int-threshold",
+ &hte_dev->itr_thrshld);
+ if (ret != 0)
+ hte_dev->itr_thrshld = 1;
+
+ hte_dev->sl = devm_kcalloc(dev, slices, sizeof(*hte_dev->sl),
+ GFP_KERNEL);
+ if (!hte_dev->sl)
+ return -ENOMEM;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to get irq\n");
+ return ret;
+ }
+ hte_dev->hte_irq = ret;
+ ret = devm_request_irq(dev, hte_dev->hte_irq, tegra_hte_isr, 0,
+ dev_name(dev), hte_dev);
+ if (ret < 0) {
+ dev_err(dev, "request irq failed.\n");
+ return ret;
+ }
+
+ gc->nlines = nlines;
+ gc->ops = &g_ops;
+ gc->dev = dev;
+ gc->data = hte_dev;
+ gc->xlate_of = tegra_hte_line_xlate;
+ gc->xlate_plat = tegra_hte_line_xlate_plat;
+ gc->of_hte_n_cells = 1;
+
+ if (hte_dev->prov_data &&
+ hte_dev->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+ hte_dev->line_data = devm_kcalloc(dev, nlines,
+ sizeof(*hte_dev->line_data),
+ GFP_KERNEL);
+ if (!hte_dev->line_data)
+ return -ENOMEM;
+
+ gc->match_from_linedata = tegra_hte_match_from_linedata;
+
+ if (of_device_is_compatible(dev->of_node,
+ "nvidia,tegra194-gte-aon")) {
+ hte_dev->c = gpiochip_find("tegra194-gpio-aon",
+ tegra_get_gpiochip_from_name);
+ } else {
+ gpio_ctrl = of_parse_phandle(dev->of_node,
+ "nvidia,gpio-controller",
+ 0);
+ if (!gpio_ctrl) {
+ dev_err(dev,
+ "gpio controller node not found\n");
+ return -ENODEV;
+ }
+
+ hte_dev->c = gpiochip_find(gpio_ctrl,
+ tegra_gpiochip_match);
+ of_node_put(gpio_ctrl);
+ }
+
+ if (!hte_dev->c)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for gpio controller\n");
+ }
+
+ hte_dev->chip = gc;
+
+ ret = devm_hte_register_chip(hte_dev->chip);
+ if (ret) {
+ dev_err(gc->dev, "hte chip register failed");
+ return ret;
+ }
+
+ for (i = 0; i < slices; i++) {
+ hte_dev->sl[i].flags = 0;
+ spin_lock_init(&hte_dev->sl[i].s_lock);
+ }
+
+ val = HTE_TECTRL_ENABLE_ENABLE |
+ (HTE_TECTRL_INTR_ENABLE << HTE_TECTRL_INTR_SHIFT) |
+ (hte_dev->itr_thrshld << HTE_TECTRL_OCCU_SHIFT);
+ tegra_hte_writel(hte_dev, HTE_TECTRL, val);
+
+ ret = devm_add_action_or_reset(&pdev->dev, tegra_gte_disable, pdev);
+ if (ret)
+ return ret;
+
+ dev_dbg(gc->dev, "lines: %d, slices:%d", gc->nlines, slices);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_resume_early(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ tegra_hte_writel(gs, HTE_TECTRL, gs->conf_rval);
+
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ tegra_hte_writel(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN),
+ gs->sl[i].r_val);
+ clear_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hte_suspend_late(struct device *dev)
+{
+ u32 i;
+ struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+ u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+ u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+ gs->conf_rval = tegra_hte_readl(gs, HTE_TECTRL);
+ for (i = 0; i < slices; i++) {
+ spin_lock(&gs->sl[i].s_lock);
+ gs->sl[i].r_val = tegra_hte_readl(gs,
+ ((i << sl_bit_shift) + HTE_SLICE0_TETEN));
+ set_bit(HTE_SUSPEND, &gs->sl[i].flags);
+ spin_unlock(&gs->sl[i].s_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_hte_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(tegra_hte_suspend_late,
+ tegra_hte_resume_early)
+};
+
+static struct platform_driver tegra_hte_driver = {
+ .probe = tegra_hte_probe,
+ .driver = {
+ .name = "tegra_hte",
+ .pm = &tegra_hte_pm,
+ .of_match_table = tegra_hte_of_match,
+ },
+};
+
+module_platform_driver(tegra_hte_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra HTE (Hardware Timestamping Engine) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte.c b/drivers/hte/hte.c
new file mode 100644
index 0000000000..598a716b73
--- /dev/null
+++ b/drivers/hte/hte.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/hte.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#define HTE_TS_NAME_LEN 10
+
+/* Global list of the HTE devices */
+static DEFINE_SPINLOCK(hte_lock);
+static LIST_HEAD(hte_devices);
+
+enum {
+ HTE_TS_REGISTERED,
+ HTE_TS_REQ,
+ HTE_TS_DISABLE,
+ HTE_TS_QUEUE_WK,
+};
+
+/**
+ * struct hte_ts_info - Information related to requested timestamp.
+ *
+ * @xlated_id: Timestamp ID as understood between HTE subsys and HTE provider,
+ * See xlate callback API.
+ * @flags: Flags holding state information.
+ * @hte_cb_flags: Callback related flags.
+ * @seq: Timestamp sequence counter.
+ * @line_name: HTE allocated line name.
+ * @free_attr_name: If set, free the attr name.
+ * @cb: A nonsleeping callback function provided by clients.
+ * @tcb: A secondary sleeping callback function provided by clients.
+ * @dropped_ts: Dropped timestamps.
+ * @slock: Spin lock to synchronize between disable/enable,
+ * request/release APIs.
+ * @cb_work: callback workqueue, used when tcb is specified.
+ * @req_mlock: Lock during timestamp request/release APIs.
+ * @ts_dbg_root: Root for the debug fs.
+ * @gdev: HTE abstract device that this timestamp information belongs to.
+ * @cl_data: Client specific data.
+ */
+struct hte_ts_info {
+ u32 xlated_id;
+ unsigned long flags;
+ unsigned long hte_cb_flags;
+ u64 seq;
+ char *line_name;
+ bool free_attr_name;
+ hte_ts_cb_t cb;
+ hte_ts_sec_cb_t tcb;
+ atomic_t dropped_ts;
+ spinlock_t slock;
+ struct work_struct cb_work;
+ struct mutex req_mlock;
+ struct dentry *ts_dbg_root;
+ struct hte_device *gdev;
+ void *cl_data;
+};
+
+/**
+ * struct hte_device - HTE abstract device
+ * @nlines: Number of entities this device supports.
+ * @ts_req: Total number of entities requested.
+ * @sdev: Device used at various debug prints.
+ * @dbg_root: Root directory for debug fs.
+ * @list: List node to store hte_device for each provider.
+ * @chip: HTE chip providing this HTE device.
+ * @owner: helps prevent removal of modules when in use.
+ * @ei: Timestamp information.
+ */
+struct hte_device {
+ u32 nlines;
+ atomic_t ts_req;
+ struct device *sdev;
+ struct dentry *dbg_root;
+ struct list_head list;
+ struct hte_chip *chip;
+ struct module *owner;
+ struct hte_ts_info ei[];
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *hte_root;
+
+static int __init hte_subsys_dbgfs_init(void)
+{
+ /* creates /sys/kernel/debug/hte/ */
+ hte_root = debugfs_create_dir("hte", NULL);
+
+ return 0;
+}
+subsys_initcall(hte_subsys_dbgfs_init);
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+ const struct hte_chip *chip = gdev->chip;
+ const char *name = chip->name ? chip->name : dev_name(chip->dev);
+
+ gdev->dbg_root = debugfs_create_dir(name, hte_root);
+
+ debugfs_create_atomic_t("ts_requested", 0444, gdev->dbg_root,
+ &gdev->ts_req);
+ debugfs_create_u32("total_ts", 0444, gdev->dbg_root,
+ &gdev->nlines);
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+ if (!ei->gdev->dbg_root || !name)
+ return;
+
+ ei->ts_dbg_root = debugfs_create_dir(name, ei->gdev->dbg_root);
+
+ debugfs_create_atomic_t("dropped_timestamps", 0444, ei->ts_dbg_root,
+ &ei->dropped_ts);
+}
+
+#else
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+}
+
+#endif
+
+/**
+ * hte_ts_put() - Release and disable timestamp for the given desc.
+ *
+ * @desc: timestamp descriptor.
+ *
+ * Context: debugfs_remove_recursive() function call may use sleeping locks,
+ * not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_ts_put(struct hte_ts_desc *desc)
+{
+ int ret = 0;
+ unsigned long flag;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+ test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+ dev_info(gdev->sdev, "id:%d is registered but not requested\n",
+ desc->attr.line_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (test_bit(HTE_TS_REQ, &ei->flags) &&
+ !test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ clear_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = NULL;
+ ret = 0;
+ goto mod_put;
+ }
+
+ ret = gdev->chip->ops->release(gdev->chip, desc, ei->xlated_id);
+ if (ret) {
+ dev_err(gdev->sdev, "id: %d free failed\n",
+ desc->attr.line_id);
+ goto unlock;
+ }
+
+ kfree(ei->line_name);
+ if (ei->free_attr_name)
+ kfree_const(desc->attr.name);
+
+ debugfs_remove_recursive(ei->ts_dbg_root);
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (test_bit(HTE_TS_QUEUE_WK, &ei->flags)) {
+ spin_unlock_irqrestore(&ei->slock, flag);
+ flush_work(&ei->cb_work);
+ spin_lock_irqsave(&ei->slock, flag);
+ }
+
+ atomic_dec(&gdev->ts_req);
+ atomic_set(&ei->dropped_ts, 0);
+
+ ei->seq = 1;
+ ei->flags = 0;
+ desc->hte_data = NULL;
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ ei->cb = NULL;
+ ei->tcb = NULL;
+ ei->cl_data = NULL;
+
+mod_put:
+ module_put(gdev->owner);
+unlock:
+ mutex_unlock(&ei->req_mlock);
+ dev_dbg(gdev->sdev, "release id: %d\n", desc->attr.line_id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_put);
+
+static int hte_ts_dis_en_common(struct hte_ts_desc *desc, bool en)
+{
+ u32 ts_id;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ int ret;
+ unsigned long flag;
+
+ if (!desc)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ gdev = ei->gdev;
+ ts_id = desc->attr.line_id;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+ dev_dbg(gdev->sdev, "id:%d is not registered", ts_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ if (en) {
+ if (!test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->enable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d enable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_DISABLE, &ei->flags);
+ } else {
+ if (test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ spin_unlock_irqrestore(&ei->slock, flag);
+ ret = gdev->chip->ops->disable(gdev->chip, ei->xlated_id);
+ if (ret) {
+ dev_warn(gdev->sdev, "id: %d disable failed\n",
+ ts_id);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ei->slock, flag);
+ set_bit(HTE_TS_DISABLE, &ei->flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+out:
+ mutex_unlock(&ei->req_mlock);
+ return ret;
+}
+
+/**
+ * hte_disable_ts() - Disable timestamp on given descriptor.
+ *
+ * The API does not release any resources associated with desc.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, false);
+}
+EXPORT_SYMBOL_GPL(hte_disable_ts);
+
+/**
+ * hte_enable_ts() - Enable timestamp on given descriptor.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return hte_ts_dis_en_common(desc, true);
+}
+EXPORT_SYMBOL_GPL(hte_enable_ts);
+
+static void hte_do_cb_work(struct work_struct *w)
+{
+ unsigned long flag;
+ struct hte_ts_info *ei = container_of(w, struct hte_ts_info, cb_work);
+
+ if (unlikely(!ei->tcb))
+ return;
+
+ ei->tcb(ei->cl_data);
+
+ spin_lock_irqsave(&ei->slock, flag);
+ clear_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ spin_unlock_irqrestore(&ei->slock, flag);
+}
+
+static int __hte_req_ts(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_device *gdev;
+ struct hte_ts_info *ei = desc->hte_data;
+
+ gdev = ei->gdev;
+ /*
+ * There is a chance that multiple consumers requesting same entity,
+ * lock here.
+ */
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ !test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(gdev->chip->dev, "id:%u req failed\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto unlock;
+ }
+
+ ei->cb = cb;
+ ei->tcb = tcb;
+ if (tcb)
+ INIT_WORK(&ei->cb_work, hte_do_cb_work);
+
+ ret = gdev->chip->ops->request(gdev->chip, desc, ei->xlated_id);
+ if (ret < 0) {
+ dev_err(gdev->chip->dev, "ts request failed\n");
+ goto unlock;
+ }
+
+ ei->cl_data = data;
+ ei->seq = 1;
+
+ atomic_inc(&gdev->ts_req);
+
+ ei->line_name = NULL;
+ if (!desc->attr.name) {
+ ei->line_name = kzalloc(HTE_TS_NAME_LEN, GFP_KERNEL);
+ if (ei->line_name)
+ scnprintf(ei->line_name, HTE_TS_NAME_LEN, "ts_%u",
+ desc->attr.line_id);
+ }
+
+ hte_ts_dbgfs_init(desc->attr.name == NULL ?
+ ei->line_name : desc->attr.name, ei);
+ set_bit(HTE_TS_REGISTERED, &ei->flags);
+
+ dev_dbg(gdev->chip->dev, "id: %u, xlated id:%u",
+ desc->attr.line_id, ei->xlated_id);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static int hte_bind_ts_info_locked(struct hte_ts_info *ei,
+ struct hte_ts_desc *desc, u32 x_id)
+{
+ int ret = 0;
+
+ mutex_lock(&ei->req_mlock);
+
+ if (test_bit(HTE_TS_REQ, &ei->flags)) {
+ dev_dbg(ei->gdev->chip->dev, "id:%u is already requested\n",
+ desc->attr.line_id);
+ ret = -EUSERS;
+ goto out;
+ }
+
+ set_bit(HTE_TS_REQ, &ei->flags);
+ desc->hte_data = ei;
+ ei->xlated_id = x_id;
+
+out:
+ mutex_unlock(&ei->req_mlock);
+
+ return ret;
+}
+
+static struct hte_device *of_node_to_htedevice(struct device_node *np)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->dev &&
+ device_match_of_node(gdev->chip->dev, np)) {
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct hte_device *hte_find_dev_from_linedata(struct hte_ts_desc *desc)
+{
+ struct hte_device *gdev;
+
+ spin_lock(&hte_lock);
+
+ list_for_each_entry(gdev, &hte_devices, list)
+ if (gdev->chip && gdev->chip->match_from_linedata) {
+ if (!gdev->chip->match_from_linedata(gdev->chip, desc))
+ continue;
+ spin_unlock(&hte_lock);
+ return gdev;
+ }
+
+ spin_unlock(&hte_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * of_hte_req_count - Return the number of entities to timestamp.
+ *
+ * The function returns the total count of the requested entities to timestamp
+ * by parsing device tree.
+ *
+ * @dev: The HTE consumer.
+ *
+ * Returns: Positive number on success, -ENOENT if no entries,
+ * -EINVAL for other errors.
+ */
+int of_hte_req_count(struct device *dev)
+{
+ int count;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_count_phandle_with_args(dev->of_node, "timestamps",
+ "#timestamp-cells");
+
+ return count ? count : -ENOENT;
+}
+EXPORT_SYMBOL_GPL(of_hte_req_count);
+
+static inline struct hte_device *hte_get_dev(struct hte_ts_desc *desc)
+{
+ return hte_find_dev_from_linedata(desc);
+}
+
+static struct hte_device *hte_of_get_dev(struct device *dev,
+ struct hte_ts_desc *desc,
+ int index,
+ struct of_phandle_args *args,
+ bool *free_name)
+{
+ int ret;
+ struct device_node *np;
+ char *temp;
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = dev->of_node;
+
+ if (!of_property_present(np, "timestamp-names")) {
+ /* Let hte core construct it during request time */
+ desc->attr.name = NULL;
+ } else {
+ ret = of_property_read_string_index(np, "timestamp-names",
+ index, &desc->attr.name);
+ if (ret) {
+ pr_err("can't parse \"timestamp-names\" property\n");
+ return ERR_PTR(ret);
+ }
+ *free_name = false;
+ if (desc->attr.name) {
+ temp = skip_spaces(desc->attr.name);
+ if (!*temp)
+ desc->attr.name = NULL;
+ }
+ }
+
+ ret = of_parse_phandle_with_args(np, "timestamps", "#timestamp-cells",
+ index, args);
+ if (ret) {
+ pr_err("%s(): can't parse \"timestamps\" property\n",
+ __func__);
+ return ERR_PTR(ret);
+ }
+
+ of_node_put(args->np);
+
+ return of_node_to_htedevice(args->np);
+}
+
+/**
+ * hte_ts_get() - The function to initialize and obtain HTE desc.
+ *
+ * The function initializes the consumer provided HTE descriptor. If consumer
+ * has device tree node, index is used to parse the line id and other details.
+ * The function needs to be called before using any request APIs.
+ *
+ * @dev: HTE consumer/client device, used in case of parsing device tree node.
+ * @desc: Pre-allocated timestamp descriptor.
+ * @index: The index will be used as an index to parse line_id from the
+ * device tree node if node is present.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index)
+{
+ struct hte_device *gdev;
+ struct hte_ts_info *ei;
+ const struct fwnode_handle *fwnode;
+ struct of_phandle_args args;
+ u32 xlated_id;
+ int ret;
+ bool free_name = false;
+
+ if (!desc)
+ return -EINVAL;
+
+ fwnode = dev ? dev_fwnode(dev) : NULL;
+
+ if (is_of_node(fwnode))
+ gdev = hte_of_get_dev(dev, desc, index, &args, &free_name);
+ else
+ gdev = hte_get_dev(desc);
+
+ if (IS_ERR(gdev)) {
+ pr_err("%s() no hte dev found\n", __func__);
+ return PTR_ERR(gdev);
+ }
+
+ if (!try_module_get(gdev->owner))
+ return -ENODEV;
+
+ if (!gdev->chip) {
+ pr_err("%s(): requested id does not have provider\n",
+ __func__);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ if (is_of_node(fwnode)) {
+ if (!gdev->chip->xlate_of)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_of(gdev->chip, &args,
+ desc, &xlated_id);
+ } else {
+ if (!gdev->chip->xlate_plat)
+ ret = -EINVAL;
+ else
+ ret = gdev->chip->xlate_plat(gdev->chip, desc,
+ &xlated_id);
+ }
+
+ if (ret < 0)
+ goto put;
+
+ ei = &gdev->ei[xlated_id];
+
+ ret = hte_bind_ts_info_locked(ei, desc, xlated_id);
+ if (ret)
+ goto put;
+
+ ei->free_attr_name = free_name;
+
+ return 0;
+
+put:
+ module_put(gdev->owner);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_get);
+
+static void __devm_hte_release_ts(void *res)
+{
+ hte_ts_put(res);
+}
+
+/**
+ * hte_request_ts_ns() - The API to request and enable hardware timestamp in
+ * nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp.
+ *
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ int ret;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !cb)
+ return -EINVAL;
+
+ ei = desc->hte_data;
+ if (!ei || !ei->gdev)
+ return -EINVAL;
+
+ ret = __hte_req_ts(desc, cb, tcb, data);
+ if (ret < 0) {
+ dev_err(ei->gdev->chip->dev,
+ "failed to request id: %d\n", desc->attr.line_id);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_request_ts_ns);
+
+/**
+ * devm_hte_request_ts_ns() - Resource managed API to request and enable
+ * hardware timestamp in nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp. It
+ * deallocates and disables automatically when the consumer exits.
+ *
+ * @dev: HTE consumer/client device.
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ err = hte_request_ts_ns(desc, cb, tcb, data);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, __devm_hte_release_ts, desc);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_request_ts_ns);
+
+/**
+ * hte_init_line_attr() - Initialize line attributes.
+ *
+ * Zeroes out line attributes and initializes with provided arguments.
+ * The function needs to be called before calling any consumer facing
+ * functions.
+ *
+ * @desc: Pre-allocated timestamp descriptor.
+ * @line_id: line id.
+ * @edge_flags: edge flags related to line_id.
+ * @name: name of the line.
+ * @data: line data related to line_id.
+ *
+ * Context: Any.
+ * Returns: 0 on success or negative error code for the failure.
+ */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name, void *data)
+{
+ if (!desc)
+ return -EINVAL;
+
+ memset(&desc->attr, 0, sizeof(desc->attr));
+
+ desc->attr.edge_flags = edge_flags;
+ desc->attr.line_id = line_id;
+ desc->attr.line_data = data;
+ if (name) {
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ }
+
+ desc->attr.name = name;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hte_init_line_attr);
+
+/**
+ * hte_get_clk_src_info() - Get the clock source information for a ts
+ * descriptor.
+ *
+ * @desc: ts descriptor, same as returned from request API.
+ * @ci: The API fills this structure with the clock information data.
+ *
+ * Context: Any context.
+ * Returns: 0 on success else negative error code on failure.
+ */
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ struct hte_chip *chip;
+ struct hte_ts_info *ei;
+
+ if (!desc || !desc->hte_data || !ci) {
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ ei = desc->hte_data;
+ if (!ei->gdev || !ei->gdev->chip)
+ return -EINVAL;
+
+ chip = ei->gdev->chip;
+ if (!chip->ops->get_clk_src_info)
+ return -EOPNOTSUPP;
+
+ return chip->ops->get_clk_src_info(chip, ci);
+}
+EXPORT_SYMBOL_GPL(hte_get_clk_src_info);
+
+/**
+ * hte_push_ts_ns() - Push timestamp data in nanoseconds.
+ *
+ * It is used by the provider to push timestamp data.
+ *
+ * @chip: The HTE chip, used during the registration.
+ * @xlated_id: entity id understood by both subsystem and provider, this is
+ * obtained from xlate callback during request API.
+ * @data: timestamp data.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data)
+{
+ enum hte_return ret;
+ int st = 0;
+ struct hte_ts_info *ei;
+ unsigned long flag;
+
+ if (!chip || !data || !chip->gdev)
+ return -EINVAL;
+
+ if (xlated_id >= chip->nlines)
+ return -EINVAL;
+
+ ei = &chip->gdev->ei[xlated_id];
+
+ spin_lock_irqsave(&ei->slock, flag);
+
+ /* timestamp sequence counter */
+ data->seq = ei->seq++;
+
+ if (!test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+ test_bit(HTE_TS_DISABLE, &ei->flags)) {
+ dev_dbg(chip->dev, "Unknown timestamp push\n");
+ atomic_inc(&ei->dropped_ts);
+ st = -EINVAL;
+ goto unlock;
+ }
+
+ ret = ei->cb(data, ei->cl_data);
+ if (ret == HTE_RUN_SECOND_CB && ei->tcb) {
+ queue_work(system_unbound_wq, &ei->cb_work);
+ set_bit(HTE_TS_QUEUE_WK, &ei->flags);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ei->slock, flag);
+
+ return st;
+}
+EXPORT_SYMBOL_GPL(hte_push_ts_ns);
+
+static int hte_register_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+ u32 i;
+
+ if (!chip || !chip->dev || !chip->dev->of_node)
+ return -EINVAL;
+
+ if (!chip->ops || !chip->ops->request || !chip->ops->release) {
+ dev_err(chip->dev, "Driver needs to provide ops\n");
+ return -EINVAL;
+ }
+
+ gdev = kzalloc(struct_size(gdev, ei, chip->nlines), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->chip = chip;
+ chip->gdev = gdev;
+ gdev->nlines = chip->nlines;
+ gdev->sdev = chip->dev;
+
+ for (i = 0; i < chip->nlines; i++) {
+ gdev->ei[i].gdev = gdev;
+ mutex_init(&gdev->ei[i].req_mlock);
+ spin_lock_init(&gdev->ei[i].slock);
+ }
+
+ if (chip->dev->driver)
+ gdev->owner = chip->dev->driver->owner;
+ else
+ gdev->owner = THIS_MODULE;
+
+ of_node_get(chip->dev->of_node);
+
+ INIT_LIST_HEAD(&gdev->list);
+
+ spin_lock(&hte_lock);
+ list_add_tail(&gdev->list, &hte_devices);
+ spin_unlock(&hte_lock);
+
+ hte_chip_dbgfs_init(gdev);
+
+ dev_dbg(chip->dev, "Added hte chip\n");
+
+ return 0;
+}
+
+static int hte_unregister_chip(struct hte_chip *chip)
+{
+ struct hte_device *gdev;
+
+ if (!chip)
+ return -EINVAL;
+
+ gdev = chip->gdev;
+
+ spin_lock(&hte_lock);
+ list_del(&gdev->list);
+ spin_unlock(&hte_lock);
+
+ gdev->chip = NULL;
+
+ of_node_put(chip->dev->of_node);
+ debugfs_remove_recursive(gdev->dbg_root);
+ kfree(gdev);
+
+ dev_dbg(chip->dev, "Removed hte chip\n");
+
+ return 0;
+}
+
+static void _hte_devm_unregister_chip(void *chip)
+{
+ hte_unregister_chip(chip);
+}
+
+/**
+ * devm_hte_register_chip() - Resource managed API to register HTE chip.
+ *
+ * It is used by the provider to register itself with the HTE subsystem.
+ * The unregistration is done automatically when the provider exits.
+ *
+ * @chip: the HTE chip to add to subsystem.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int devm_hte_register_chip(struct hte_chip *chip)
+{
+ int err;
+
+ err = hte_register_chip(chip);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(chip->dev, _hte_devm_unregister_chip,
+ chip);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_register_chip);