summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/csrc-r4k.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/mips/kernel/csrc-r4k.c
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/mips/kernel/csrc-r4k.c')
-rw-r--r--arch/mips/kernel/csrc-r4k.c130
1 files changed, 130 insertions, 0 deletions
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
new file mode 100644
index 000000000..edc4afc08
--- /dev/null
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched_clock.h>
+
+#include <asm/time.h>
+
+static u64 c0_hpt_read(struct clocksource *cs)
+{
+ return read_c0_count();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "MIPS",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
+{
+ return read_c0_count();
+}
+
+static inline unsigned int rdhwr_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static bool rdhwr_count_usable(void)
+{
+ unsigned int prev, curr, i;
+
+ /*
+ * Older QEMUs have a broken implementation of RDHWR for the CP0 count
+ * which always returns a constant value. Try to identify this and don't
+ * use it in the VDSO if it is broken. This workaround can be removed
+ * once the fix has been in QEMU stable for a reasonable amount of time.
+ */
+ for (i = 0, prev = rdhwr_count(); i < 100; i++) {
+ curr = rdhwr_count();
+
+ if (curr != prev)
+ return true;
+
+ prev = curr;
+ }
+
+ pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
+ return false;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static bool __read_mostly r4k_clock_unstable;
+
+static void r4k_clocksource_unstable(char *reason)
+{
+ if (r4k_clock_unstable)
+ return;
+
+ r4k_clock_unstable = true;
+
+ pr_info("R4K timer is unstable due to %s\n", reason);
+
+ clocksource_mark_unstable(&clocksource_mips);
+}
+
+static int r4k_cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ r4k_clocksource_unstable("CPU frequency change");
+
+ return 0;
+}
+
+static struct notifier_block r4k_cpufreq_notifier = {
+ .notifier_call = r4k_cpufreq_callback,
+};
+
+static int __init r4k_register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&r4k_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+}
+core_initcall(r4k_register_cpufreq_notifier);
+
+#endif /* !CONFIG_CPU_FREQ */
+
+int __init init_r4k_clocksource(void)
+{
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ /* Calculate a somewhat reasonable rating value */
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+
+ /*
+ * R2 onwards makes the count accessible to user mode so it can be used
+ * by the VDSO (HWREna is configured by configure_hwrena()).
+ */
+ if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
+ clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
+
+ clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+
+#ifndef CONFIG_CPU_FREQ
+ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
+
+ return 0;
+}