summaryrefslogtreecommitdiffstats
path: root/drivers/ptp/ptp_kvm_x86.c
blob: 617c8d6706d3d00f7167fbf7e5b624ced29a206d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Virtual PTP 1588 clock for use with KVM guests
 *
 * Copyright (C) 2017 Red Hat Inc.
 */

#include <linux/device.h>
#include <linux/kernel.h>
#include <asm/pvclock.h>
#include <asm/kvmclock.h>
#include <linux/module.h>
#include <uapi/asm/kvm_para.h>
#include <uapi/linux/kvm_para.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_kvm.h>
#include <linux/set_memory.h>

static phys_addr_t clock_pair_gpa;
static struct kvm_clock_pairing clock_pair_glbl;
static struct kvm_clock_pairing *clock_pair;

int kvm_arch_ptp_init(void)
{
	struct page *p;
	long ret;

	if (!kvm_para_available())
		return -ENODEV;

	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
		p = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (!p)
			return -ENOMEM;

		clock_pair = page_address(p);
		ret = set_memory_decrypted((unsigned long)clock_pair, 1);
		if (ret) {
			__free_page(p);
			clock_pair = NULL;
			goto nofree;
		}
	} else {
		clock_pair = &clock_pair_glbl;
	}

	clock_pair_gpa = slow_virt_to_phys(clock_pair);
	if (!pvclock_get_pvti_cpu0_va()) {
		ret = -ENODEV;
		goto err;
	}

	ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
			     KVM_CLOCK_PAIRING_WALLCLOCK);
	if (ret == -KVM_ENOSYS) {
		ret = -ENODEV;
		goto err;
	}

	return ret;

err:
	kvm_arch_ptp_exit();
nofree:
	return ret;
}

void kvm_arch_ptp_exit(void)
{
	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
		WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1));
		free_page((unsigned long)clock_pair);
		clock_pair = NULL;
	}
}

int kvm_arch_ptp_get_clock(struct timespec64 *ts)
{
	long ret;

	ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
			     clock_pair_gpa,
			     KVM_CLOCK_PAIRING_WALLCLOCK);
	if (ret != 0) {
		pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
		return -EOPNOTSUPP;
	}

	ts->tv_sec = clock_pair->sec;
	ts->tv_nsec = clock_pair->nsec;

	return 0;
}

int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
			      enum clocksource_ids *cs_id)
{
	struct pvclock_vcpu_time_info *src;
	unsigned int version;
	long ret;

	src = this_cpu_pvti();

	do {
		/*
		 * We are using a TSC value read in the hosts
		 * kvm_hc_clock_pairing handling.
		 * So any changes to tsc_to_system_mul
		 * and tsc_shift or any other pvclock
		 * data invalidate that measurement.
		 */
		version = pvclock_read_begin(src);

		ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
				     clock_pair_gpa,
				     KVM_CLOCK_PAIRING_WALLCLOCK);
		if (ret != 0) {
			pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
			return -EOPNOTSUPP;
		}
		tspec->tv_sec = clock_pair->sec;
		tspec->tv_nsec = clock_pair->nsec;
		*cycle = __pvclock_read_cycles(src, clock_pair->tsc);
	} while (pvclock_read_retry(src, version));

	*cs_id = CSID_X86_KVM_CLK;

	return 0;
}