blob: 9a2af9fca45e21a1d87947e8ba0f221225dc2cb6 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __VDSO_HELPERS_H
#define __VDSO_HELPERS_H
#ifndef __ASSEMBLY__
#include <vdso/datapage.h>
static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
{
u32 seq;
while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
cpu_relax();
smp_rmb();
return seq;
}
static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
u32 start)
{
u32 seq;
smp_rmb();
seq = READ_ONCE(vd->seq);
return seq != start;
}
static __always_inline void vdso_write_begin(struct vdso_data *vd)
{
/*
* WRITE_ONCE it is required otherwise the compiler can validly tear
* updates to vd[x].seq and it is possible that the value seen by the
* reader it is inconsistent.
*/
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
smp_wmb();
}
static __always_inline void vdso_write_end(struct vdso_data *vd)
{
smp_wmb();
/*
* WRITE_ONCE it is required otherwise the compiler can validly tear
* updates to vd[x].seq and it is possible that the value seen by the
* reader it is inconsistent.
*/
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
}
#endif /* !__ASSEMBLY__ */
#endif /* __VDSO_HELPERS_H */
|