diff options
Diffstat (limited to 'arch/x86/kernel/paravirt-spinlocks.c')
-rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c new file mode 100644 index 000000000..9e1ea99ad --- /dev/null +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Split spinlock implementation out into its own file, so it can be + * compiled in a FTRACE-compatible way. + */ +#include <linux/spinlock.h> +#include <linux/export.h> +#include <linux/jump_label.h> + +#include <asm/paravirt.h> + +__visible void __native_queued_spin_unlock(struct qspinlock *lock) +{ + native_queued_spin_unlock(lock); +} +PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); + +bool pv_is_native_spin_unlock(void) +{ + return pv_ops.lock.queued_spin_unlock.func == + __raw_callee_save___native_queued_spin_unlock; +} + +__visible bool __native_vcpu_is_preempted(long cpu) +{ + return false; +} +PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); + +bool pv_is_native_vcpu_is_preempted(void) +{ + return pv_ops.lock.vcpu_is_preempted.func == + __raw_callee_save___native_vcpu_is_preempted; +} + +void __init paravirt_set_cap(void) +{ + if (!pv_is_native_spin_unlock()) + setup_force_cpu_cap(X86_FEATURE_PVUNLOCK); + + if (!pv_is_native_vcpu_is_preempted()) + setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); +} |