1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_QSPINLOCK_H
#define _ASM_POWERPC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
#ifdef CONFIG_PARAVIRT_SPINLOCKS
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void __pv_queued_spin_unlock(struct qspinlock *lock);
static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
if (!is_shared_processor())
native_queued_spin_lock_slowpath(lock, val);
else
__pv_queued_spin_lock_slowpath(lock, val);
}
#define queued_spin_unlock queued_spin_unlock
static inline void queued_spin_unlock(struct qspinlock *lock)
{
if (!is_shared_processor())
smp_store_release(&lock->locked, 0);
else
__pv_queued_spin_unlock(lock);
}
#else
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#endif
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
}
#define queued_spin_lock queued_spin_lock
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
* This barrier was added to simple spinlocks by commit 51d7d5205d338,
* but it should now be possible to remove it, asm arm64 has done with
* commit c6f5d02b6a0f.
*/
smp_mb();
return atomic_read(&lock->val);
}
#define queued_spin_is_locked queued_spin_is_locked
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define SPIN_THRESHOLD (1<<15) /* not tuned */
static __always_inline void pv_wait(u8 *ptr, u8 val)
{
if (*ptr != val)
return;
yield_to_any();
/*
* We could pass in a CPU here if waiting in the queue and yield to
* the previous CPU in the queue.
*/
}
static __always_inline void pv_kick(int cpu)
{
prod_cpu(cpu);
}
extern void __pv_init_lock_hash(void);
static inline void pv_spinlocks_init(void)
{
__pv_init_lock_hash();
}
#endif
#include <asm-generic/qspinlock.h>
#endif /* _ASM_POWERPC_QSPINLOCK_H */
|