summaryrefslogtreecommitdiffstats
path: root/include/vdso
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
commit638a9e433ecd61e64761352dbec1fa4f5874c941 (patch)
treefdbff74a238d7a5a7d1cef071b7230bc064b9f25 /include/vdso
parentReleasing progress-linux version 6.9.12-1~progress7.99u1. (diff)
downloadlinux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz
linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/vdso')
-rw-r--r--include/vdso/datapage.h4
-rw-r--r--include/vdso/math64.h38
2 files changed, 42 insertions, 0 deletions
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index c71ddb6d46..d04d394db0 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -61,6 +61,7 @@ struct vdso_timestamp {
* @seq: timebase sequence counter
* @clock_mode: clock mode
* @cycle_last: timebase at clocksource init
+ * @max_cycles: maximum cycles which won't overflow 64bit multiplication
* @mask: clocksource mask
* @mult: clocksource multiplier
* @shift: clocksource shift
@@ -92,6 +93,9 @@ struct vdso_data {
s32 clock_mode;
u64 cycle_last;
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+ u64 max_cycles;
+#endif
u64 mask;
u32 mult;
u32 shift;
diff --git a/include/vdso/math64.h b/include/vdso/math64.h
index 7da703ee55..22ae212f8b 100644
--- a/include/vdso/math64.h
+++ b/include/vdso/math64.h
@@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_add_u64_shr
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ return (u64)((((unsigned __int128)a * mul) + b) >> shift);
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#else
+
+#ifndef mul_u64_u32_add_u64_shr
+#ifndef mul_u32_u32
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#define mul_u32_u32 mul_u32_u32
+#endif
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ u32 ah = a >> 32, al = a;
+ bool ovf;
+ u64 ret;
+
+ ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
+ ret >>= shift;
+ if (ovf && shift)
+ ret += 1ULL << (64 - shift);
+ if (ah)
+ ret += mul_u32_u32(ah, mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#endif
+
#endif /* __VDSO_MATH64_H */