aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_nested.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c172
1 files changed, 87 insertions, 85 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index ed8a2c9f5629..5a64a1341e6f 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -20,6 +20,7 @@
#include <asm/pte-walk.h>
#include <asm/reg.h>
#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
static struct patb_entry *pseries_partition_tb;
@@ -261,8 +262,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
/*
* Don't let L1 change LPCR bits for the L2 except these:
*/
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
- LPCR_LPES | LPCR_MER;
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
/*
* Additional filtering is required depending on hardware
@@ -306,10 +306,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_PARAMETER;
@@ -358,11 +358,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* convert TB values/offsets to host (L0) values */
hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
vc->tb_offset += l2_hv.tb_offset;
+ vcpu->arch.dec_expires += l2_hv.tb_offset;
/* set L1 state to L2 state */
vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
- l2->hfscr = l2_hv.hfscr;
+ vcpu->arch.nested_hfscr = l2_hv.hfscr;
vcpu->arch.regs = l2_regs;
/* Guest must always run with ME enabled, HV disabled. */
@@ -374,11 +375,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
do {
- if (mftb() >= hdec_exp) {
- vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
- r = RESUME_HOST;
- break;
- }
r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
@@ -399,6 +395,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_regs.msr & MSR_TS_MASK)
vcpu->arch.shregs.msr |= MSR_TS_S;
vc->tb_offset = saved_l1_hv.tb_offset;
+ /* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
+ vcpu->arch.dec_expires -= l2_hv.tb_offset;
restore_hv_regs(vcpu, &saved_l1_hv);
vcpu->arch.purr += delta_purr;
vcpu->arch.spurr += delta_spurr;
@@ -412,10 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
byteswap_hv_regs(&l2_hv);
byteswap_pt_regs(&l2_regs);
}
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_AUTHORITY;
@@ -441,10 +439,11 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
- /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
- ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
- if (ptb_order < 8)
- ptb_order = 8;
+ /* Partition table entry is 1<<4 bytes in size, hence the 4. */
+ ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
+ /* Minimum partition table size is 1<<12 bytes */
+ if (ptb_order < 12)
+ ptb_order = 12;
pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
GFP_KERNEL);
if (!pseries_partition_tb) {
@@ -452,7 +451,7 @@ long kvmhv_nested_init(void)
return -ENOMEM;
}
- ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+ ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
if (rc != H_SUCCESS) {
pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
@@ -523,11 +522,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
}
-void kvmhv_vm_nested_init(struct kvm *kvm)
-{
- kvm->arch.max_nested_lpid = -1;
-}
-
/*
* Handle the H_SET_PARTITION_TABLE hcall.
* r4 = guest real address of partition table + log_2(size) - 12
@@ -541,16 +535,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
long ret = H_SUCCESS;
srcu_idx = srcu_read_lock(&kvm->srcu);
- /*
- * Limit the partition table to 4096 entries (because that's what
- * hardware supports), and check the base address.
- */
- if ((ptcr & PRTS_MASK) > 12 - 8 ||
+ /* Check partition size and base address. */
+ if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
!kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
ret = H_PARAMETER;
srcu_read_unlock(&kvm->srcu, srcu_idx);
if (ret == H_SUCCESS)
kvm->arch.l1_ptcr = ptcr;
+
return ret;
}
@@ -582,7 +574,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
if (eaddr & (0xFFFUL << 52))
return H_PARAMETER;
- buf = kzalloc(n, GFP_KERNEL);
+ buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
if (!buf)
return H_NO_MEM;
@@ -602,16 +594,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
goto not_found;
/* Write what was loaded into our buffer back to the L1 guest */
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
} else {
/* Load the data to be stored from the L1 guest into our buf */
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
@@ -646,7 +638,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
ret = -EFAULT;
ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
- if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
+ if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
int srcu_idx = srcu_read_lock(&kvm->srcu);
ret = kvm_read_guest(kvm, ptbl_addr,
&ptbl_entry, sizeof(ptbl_entry));
@@ -662,6 +654,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
kvmhv_set_nested_ptbl(gp);
}
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+ idr_init(&kvm->arch.kvm_nested_guest_idr);
+}
+
+static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
+{
+ return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
+static bool __prealloc_nested(struct kvm *kvm, int lpid)
+{
+ if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
+ NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
+ return false;
+ return true;
+}
+
+static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
+{
+ if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
+ WARN_ON(1);
+}
+
+static void __remove_nested(struct kvm *kvm, int lpid)
+{
+ idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
{
struct kvm_nested_guest *gp;
@@ -722,13 +743,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
long ref;
spin_lock(&kvm->mmu_lock);
- if (gp == kvm->arch.nested_guests[lpid]) {
- kvm->arch.nested_guests[lpid] = NULL;
- if (lpid == kvm->arch.max_nested_lpid) {
- while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
- ;
- kvm->arch.max_nested_lpid = lpid;
- }
+ if (gp == __find_nested(kvm, lpid)) {
+ __remove_nested(kvm, lpid);
--gp->refcnt;
}
ref = gp->refcnt;
@@ -745,24 +761,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
*/
void kvmhv_release_all_nested(struct kvm *kvm)
{
- int i;
+ int lpid;
struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (!gp)
- continue;
- kvm->arch.nested_guests[i] = NULL;
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ __remove_nested(kvm, lpid);
if (--gp->refcnt == 0) {
gp->next = freelist;
freelist = gp;
}
}
- kvm->arch.max_nested_lpid = -1;
+ idr_destroy(&kvm->arch.kvm_nested_guest_idr);
+ /* idr is empty and may be reused at this point */
spin_unlock(&kvm->mmu_lock);
while ((gp = freelist) != NULL) {
freelist = gp->next;
@@ -770,7 +784,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
}
srcu_idx = srcu_read_lock(&kvm->srcu);
- kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
kvmhv_free_memslot_nest_rmap(memslot);
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
@@ -794,12 +808,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
{
struct kvm_nested_guest *gp, *newgp;
- if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
- l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+ if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
return NULL;
spin_lock(&kvm->mmu_lock);
- gp = kvm->arch.nested_guests[l1_lpid];
+ gp = __find_nested(kvm, l1_lpid);
if (gp)
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -810,17 +823,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
newgp = kvmhv_alloc_nested(kvm, l1_lpid);
if (!newgp)
return NULL;
+
+ if (!__prealloc_nested(kvm, l1_lpid)) {
+ kvmhv_release_nested(newgp);
+ return NULL;
+ }
+
spin_lock(&kvm->mmu_lock);
- if (kvm->arch.nested_guests[l1_lpid]) {
- /* someone else beat us to it */
- gp = kvm->arch.nested_guests[l1_lpid];
- } else {
- kvm->arch.nested_guests[l1_lpid] = newgp;
+ gp = __find_nested(kvm, l1_lpid);
+ if (!gp) {
+ __add_nested(kvm, l1_lpid, newgp);
++newgp->refcnt;
gp = newgp;
newgp = NULL;
- if (l1_lpid > kvm->arch.max_nested_lpid)
- kvm->arch.max_nested_lpid = l1_lpid;
}
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -843,20 +858,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
kvmhv_release_nested(gp);
}
-static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
-{
- if (lpid > kvm->arch.max_nested_lpid)
- return NULL;
- return kvm->arch.nested_guests[lpid];
-}
-
pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift)
{
struct kvm_nested_guest *gp;
pte_t *pte;
- gp = kvmhv_find_nested(kvm, lpid);
+ gp = __find_nested(kvm, lpid);
if (!gp)
return NULL;
@@ -962,7 +970,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
- gp = kvmhv_find_nested(kvm, lpid);
+ gp = __find_nested(kvm, lpid);
if (!gp)
return;
@@ -1154,16 +1162,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *gp;
- int i;
+ int lpid;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (gp) {
- spin_unlock(&kvm->mmu_lock);
- kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
- spin_lock(&kvm->mmu_lock);
- }
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ spin_lock(&kvm->mmu_lock);
}
spin_unlock(&kvm->mmu_lock);
}
@@ -1315,7 +1320,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
* H_ENTER_NESTED call. Since we can't differentiate this case from
* the invalid case, we ignore such flush requests and return success.
*/
- if (!kvmhv_find_nested(vcpu->kvm, lpid))
+ if (!__find_nested(vcpu->kvm, lpid))
return H_SUCCESS;
/*
@@ -1575,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
/* 2. Find the host pte for this L1 guest real address */
/* Used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* See if can find translation in our partition scoped tables for L1 */
@@ -1659,15 +1664,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
{
- int ret = -1;
+ int ret = lpid + 1;
spin_lock(&kvm->mmu_lock);
- while (++lpid <= kvm->arch.max_nested_lpid) {
- if (kvm->arch.nested_guests[lpid]) {
- ret = lpid;
- break;
- }
- }
+ if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
+ ret = -1;
spin_unlock(&kvm->mmu_lock);
+
return ret;
}