aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/riscv/kvm/vcpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
-rw-r--r--arch/riscv/kvm/vcpu.c625
1 files changed, 233 insertions, 392 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index f3455dc013fa..b5ca9f2e98ac 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -7,18 +7,19 @@
*/
#include <linux/bitops.h>
+#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/percpu.h>
-#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
-#include <asm/hwcap.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_vcpu_vector.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
@@ -26,6 +27,9 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, csr_exit_user),
+ STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
STATS_DESC_COUNTER(VCPU, exits)
};
@@ -38,17 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
- riscv_isa_extension_mask(f))
-
-#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
- riscv_isa_extension_mask(c) | \
- riscv_isa_extension_mask(i) | \
- riscv_isa_extension_mask(m))
-
-#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
- KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
-
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
@@ -75,15 +68,23 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_fp_reset(vcpu);
+ kvm_riscv_vcpu_vector_reset(vcpu);
+
kvm_riscv_vcpu_timer_reset(vcpu);
- WRITE_ONCE(vcpu->arch.irqs_pending, 0);
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+ kvm_riscv_vcpu_aia_reset(vcpu);
+
+ bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
+
+ kvm_riscv_vcpu_pmu_reset(vcpu);
vcpu->arch.hfence_head = 0;
vcpu->arch.hfence_tail = 0;
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
+ kvm_riscv_vcpu_sbi_sta_reset(vcpu);
+
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
@@ -97,15 +98,22 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
+ int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+ bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
- vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
+ kvm_riscv_vcpu_setup_isa(vcpu);
+
+ /* Setup vendor, arch, and implementation details */
+ vcpu->arch.mvendorid = sbi_get_mvendorid();
+ vcpu->arch.marchid = sbi_get_marchid();
+ vcpu->arch.mimpid = sbi_get_mimpid();
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
@@ -118,12 +126,29 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
+ if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
+ return -ENOMEM;
+
/* By default, make CY, TM, and IR counters accessible in VU mode */
reset_csr->scounteren = 0x7;
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
+ /* setup performance monitoring */
+ kvm_riscv_vcpu_pmu_init(vcpu);
+
+ /* Setup VCPU AIA */
+ rc = kvm_riscv_vcpu_aia_init(vcpu);
+ if (rc)
+ return rc;
+
+ /*
+ * Setup SBI extensions
+ * NOTE: This must be the last thing to be initialized.
+ */
+ kvm_riscv_vcpu_sbi_init(vcpu);
+
/* Reset VCPU */
kvm_riscv_reset_vcpu(vcpu);
@@ -143,24 +168,34 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ /* Cleanup VCPU AIA context */
+ kvm_riscv_vcpu_aia_deinit(vcpu);
+
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);
+ kvm_riscv_vcpu_pmu_deinit(vcpu);
+
/* Free unused pages pre-allocated for G-stage page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+
+ /* Free vector context space for host and guest kernel */
+ kvm_riscv_vcpu_free_vector_context(vcpu);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
+ return kvm_riscv_vcpu_timer_pending(vcpu);
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_aia_wakeon_hgei(vcpu, true);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_aia_wakeon_hgei(vcpu, false);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
@@ -184,337 +219,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CONFIG);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- switch (reg_num) {
- case KVM_REG_RISCV_CONFIG_REG(isa):
- reg_val = vcpu->arch.isa;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CONFIG);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- switch (reg_num) {
- case KVM_REG_RISCV_CONFIG_REG(isa):
- if (!vcpu->arch.ran_atleast_once) {
- /* Ignore the disable request for these extensions */
- vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
- vcpu->arch.isa &= riscv_isa_extension_base(NULL);
- vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
- kvm_riscv_vcpu_fp_reset(vcpu);
- } else {
- return -EOPNOTSUPP;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CORE);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
- reg_val = cntx->sepc;
- else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
- reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
- reg_val = ((unsigned long *)cntx)[reg_num];
- else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
- reg_val = (cntx->sstatus & SR_SPP) ?
- KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
- else
- return -EINVAL;
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CORE);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
- cntx->sepc = reg_val;
- else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
- reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
- ((unsigned long *)cntx)[reg_num] = reg_val;
- else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
- if (reg_val == KVM_RISCV_MODE_S)
- cntx->sstatus |= SR_SPP;
- else
- cntx->sstatus &= ~SR_SPP;
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CSR);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- kvm_riscv_vcpu_flush_interrupts(vcpu);
- reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
- } else
- reg_val = ((unsigned long *)csr)[reg_num];
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CSR);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- reg_val &= VSIP_VALID_MASK;
- reg_val <<= VSIP_TO_HVIP_SHIFT;
- }
-
- ((unsigned long *)csr)[reg_num] = reg_val;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
-
- return 0;
-}
-
-/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
-static unsigned long kvm_isa_ext_arr[] = {
- RISCV_ISA_EXT_a,
- RISCV_ISA_EXT_c,
- RISCV_ISA_EXT_d,
- RISCV_ISA_EXT_f,
- RISCV_ISA_EXT_h,
- RISCV_ISA_EXT_i,
- RISCV_ISA_EXT_m,
-};
-
-static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_ISA_EXT);
- unsigned long reg_val = 0;
- unsigned long host_isa_ext;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
-
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (__riscv_isa_extension_available(&vcpu->arch.isa, host_isa_ext))
- reg_val = 1; /* Mark the given extension as available */
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_ISA_EXT);
- unsigned long reg_val;
- unsigned long host_isa_ext;
- unsigned long host_isa_ext_mask;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (!__riscv_isa_extension_available(NULL, host_isa_ext))
- return -EOPNOTSUPP;
-
- if (host_isa_ext >= RISCV_ISA_EXT_BASE &&
- host_isa_ext < RISCV_ISA_EXT_MAX) {
- /*
- * Multi-letter ISA extension. Currently there is no provision
- * to enable/disable the multi-letter ISA extensions for guests.
- * Return success if the request is to enable any ISA extension
- * that is available in the hardware.
- * Return -EOPNOTSUPP otherwise.
- */
- if (!reg_val)
- return -EOPNOTSUPP;
- else
- return 0;
- }
-
- /* Single letter base ISA extension */
- if (!vcpu->arch.ran_atleast_once) {
- host_isa_ext_mask = BIT_MASK(host_isa_ext);
- if (!reg_val && (host_isa_ext_mask & KVM_RISCV_ISA_DISABLE_ALLOWED))
- vcpu->arch.isa &= ~host_isa_ext_mask;
- else
- vcpu->arch.isa |= host_isa_ext_mask;
- vcpu->arch.isa &= riscv_isa_extension_base(NULL);
- vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
- kvm_riscv_vcpu_fp_reset(vcpu);
- } else {
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
- return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
- return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
- return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
- return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
- return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_F);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
- return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_D);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
- return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
-
- return -EINVAL;
-}
-
-static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
- return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
- return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
- return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
- return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
- return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_F);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
- return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_D);
- else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
- return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
-
- return -EINVAL;
-}
-
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -558,6 +262,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
break;
}
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ r = -EFAULT;
+ if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+ if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+ break;
+ r = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
default:
break;
}
@@ -608,13 +330,16 @@ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
unsigned long mask, val;
- if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
- mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
- val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
+ mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
+ val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
csr->hvip &= ~mask;
csr->hvip |= val;
}
+
+ /* Flush AIA high interrupts */
+ kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
}
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
@@ -631,26 +356,38 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
if (hvip & (1UL << IRQ_VS_SOFT)) {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- set_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ set_bit(IRQ_VS_SOFT, v->irqs_pending);
} else {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ clear_bit(IRQ_VS_SOFT, v->irqs_pending);
}
}
+
+ /* Sync-up AIA high interrupts */
+ kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
+
+ /* Sync-up timer CSRs */
+ kvm_riscv_vcpu_timer_sync(vcpu);
}
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- set_bit(irq, &vcpu->arch.irqs_pending);
+ set_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
kvm_vcpu_kick(vcpu);
@@ -659,24 +396,37 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- clear_bit(irq, &vcpu->arch.irqs_pending);
+ clear_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
return 0;
}
-bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
- << VSIP_TO_HVIP_SHIFT) & mask;
+ unsigned long ie;
+
+ ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
+ << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
+ ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
+ (unsigned long)mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
+ return true;
- return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
+ /* Check AIA high interrupts */
+ return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
@@ -729,9 +479,38 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
+{
+ const unsigned long *isa = vcpu->arch.isa;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ if (riscv_isa_extension_available(isa, SVPBMT))
+ cfg->henvcfg |= ENVCFG_PBMTE;
+
+ if (riscv_isa_extension_available(isa, SSTC))
+ cfg->henvcfg |= ENVCFG_STCE;
+
+ if (riscv_isa_extension_available(isa, ZICBOM))
+ cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
+
+ if (riscv_isa_extension_available(isa, ZICBOZ))
+ cfg->henvcfg |= ENVCFG_CBZE;
+
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
+ cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
+ if (riscv_isa_extension_available(isa, SSAIA))
+ cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
+ SMSTATEEN0_AIA |
+ SMSTATEEN0_AIA_ISEL;
+ if (riscv_isa_extension_available(isa, SMSTATEEN))
+ cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
+ }
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
csr_write(CSR_VSSTATUS, csr->vsstatus);
csr_write(CSR_VSIE, csr->vsie);
@@ -742,6 +521,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_VSTVAL, csr->vstval);
csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp);
+ csr_write(CSR_HENVCFG, cfg->henvcfg);
+ if (IS_ENABLED(CONFIG_32BIT))
+ csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
+ csr_write(CSR_HSTATEEN0, cfg->hstateen0);
+ if (IS_ENABLED(CONFIG_32BIT))
+ csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
+ }
kvm_riscv_gstage_update_hgatp(vcpu);
@@ -750,6 +537,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
+ kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
+ kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
+ vcpu->arch.isa);
+
+ kvm_riscv_vcpu_aia_load(vcpu, cpu);
+
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->cpu = cpu;
}
@@ -760,10 +554,17 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
+ kvm_riscv_vcpu_aia_put(vcpu);
+
kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
+ kvm_riscv_vcpu_timer_save(vcpu);
+ kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
+ vcpu->arch.isa);
+ kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
+
csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE);
csr->vstvec = csr_read(CSR_VSTVEC);
@@ -817,6 +618,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
kvm_riscv_hfence_process(vcpu);
+
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ kvm_riscv_vcpu_record_steal_time(vcpu);
}
}
@@ -825,6 +629,33 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
csr_write(CSR_HVIP, csr->hvip);
+ kvm_riscv_vcpu_aia_update_hvip(vcpu);
+}
+
+static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+ vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
+ smcsr->sstateen0);
+}
+
+static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+ smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
+ vcpu->arch.host_sstateen0);
}
/*
@@ -836,10 +667,12 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_vcpu_swap_in_guest_state(vcpu);
guest_state_enter_irqoff();
__kvm_riscv_switch_to(&vcpu->arch);
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
+ kvm_riscv_vcpu_swap_in_host_state(vcpu);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@@ -848,27 +681,34 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_cpu_trap trap;
struct kvm_run *run = vcpu->run;
+ if (!vcpu->arch.ran_atleast_once)
+ kvm_riscv_vcpu_setup_config(vcpu);
+
/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
kvm_vcpu_srcu_read_lock(vcpu);
- /* Process MMIO value returned from user-space */
- if (run->exit_reason == KVM_EXIT_MMIO) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_MMIO:
+ /* Process MMIO value returned from user-space */
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
- if (ret) {
- kvm_vcpu_srcu_read_unlock(vcpu);
- return ret;
- }
- }
-
- /* Process SBI value returned from user-space */
- if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
+ break;
+ case KVM_EXIT_RISCV_SBI:
+ /* Process SBI value returned from user-space */
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
- if (ret) {
- kvm_vcpu_srcu_read_unlock(vcpu);
- return ret;
- }
+ break;
+ case KVM_EXIT_RISCV_CSR:
+ /* Process CSR value returned from user-space */
+ ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret) {
+ kvm_vcpu_srcu_read_unlock(vcpu);
+ return ret;
}
if (run->immediate_exit) {
@@ -884,7 +724,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
/* Check conditions before entering the guest */
- cond_resched();
+ ret = xfer_to_guest_mode_handle_work(vcpu);
+ if (ret)
+ continue;
+ ret = 1;
kvm_riscv_gstage_vmid_update(vcpu);
@@ -892,17 +735,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_disable();
- local_irq_disable();
-
- /*
- * Exit if we have a signal pending so that we can deliver
- * the signal to user space.
- */
- if (signal_pending(current)) {
- ret = -EINTR;
- run->exit_reason = KVM_EXIT_INTR;
+ /* Update AIA HW state before entering guest */
+ ret = kvm_riscv_vcpu_aia_update(vcpu);
+ if (ret <= 0) {
+ preempt_enable();
+ continue;
}
+ local_irq_disable();
+
/*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
@@ -923,9 +764,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Update HVIP CSR for current CPU */
kvm_riscv_update_hvip(vcpu);
- if (ret <= 0 ||
- kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
- kvm_request_pending(vcpu)) {
+ if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
+ kvm_request_pending(vcpu) ||
+ xfer_to_guest_mode_work_pending()) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();