aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/virt/kvm/arm/pvtime.c
blob: 1e0f4c2848889030192e60a344c315b727f668d8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Arm Ltd.

#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>

#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>

#include <kvm/arm_hypercalls.h>

void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = vcpu->kvm;
	u64 steal;
	__le64 steal_le;
	u64 offset;
	int idx;
	u64 base = vcpu->arch.steal.base;

	if (base == GPA_INVALID)
		return;

	/* Let's do the local bookkeeping */
	steal = vcpu->arch.steal.steal;
	steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
	vcpu->arch.steal.steal = steal;

	steal_le = cpu_to_le64(steal);
	idx = srcu_read_lock(&kvm->srcu);
	offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
	kvm_put_guest(kvm, base + offset, steal_le, u64);
	srcu_read_unlock(&kvm->srcu, idx);
}

long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
{
	u32 feature = smccc_get_arg1(vcpu);
	long val = SMCCC_RET_NOT_SUPPORTED;

	switch (feature) {
	case ARM_SMCCC_HV_PV_TIME_FEATURES:
	case ARM_SMCCC_HV_PV_TIME_ST:
		val = SMCCC_RET_SUCCESS;
		break;
	}

	return val;
}

gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
{
	struct pvclock_vcpu_stolen_time init_values = {};
	struct kvm *kvm = vcpu->kvm;
	u64 base = vcpu->arch.steal.base;
	int idx;

	if (base == GPA_INVALID)
		return base;

	/*
	 * Start counting stolen time from the time the guest requests
	 * the feature enabled.
	 */
	vcpu->arch.steal.steal = 0;
	vcpu->arch.steal.last_steal = current->sched_info.run_delay;

	idx = srcu_read_lock(&kvm->srcu);
	kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
	srcu_read_unlock(&kvm->srcu, idx);

	return base;
}

int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
			    struct kvm_device_attr *attr)
{
	u64 __user *user = (u64 __user *)attr->addr;
	struct kvm *kvm = vcpu->kvm;
	u64 ipa;
	int ret = 0;
	int idx;

	if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
		return -ENXIO;

	if (get_user(ipa, user))
		return -EFAULT;
	if (!IS_ALIGNED(ipa, 64))
		return -EINVAL;
	if (vcpu->arch.steal.base != GPA_INVALID)
		return -EEXIST;

	/* Check the address is in a valid memslot */
	idx = srcu_read_lock(&kvm->srcu);
	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
		ret = -EINVAL;
	srcu_read_unlock(&kvm->srcu, idx);

	if (!ret)
		vcpu->arch.steal.base = ipa;

	return ret;
}

int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
			    struct kvm_device_attr *attr)
{
	u64 __user *user = (u64 __user *)attr->addr;
	u64 ipa;

	if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
		return -ENXIO;

	ipa = vcpu->arch.steal.base;

	if (put_user(ipa, user))
		return -EFAULT;
	return 0;
}

int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
			    struct kvm_device_attr *attr)
{
	switch (attr->attr) {
	case KVM_ARM_VCPU_PVTIME_IPA:
		return 0;
	}
	return -ENXIO;
}