aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/virt/kvm/arm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/mmu.c')
-rw-r--r--virt/kvm/arm/mmu.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 30251e288629..ffd7acdceac7 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -27,10 +27,10 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_ras.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/virt.h>
-#include <asm/system_misc.h>
#include "trace.h"
@@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
*/
int kvm_alloc_stage2_pgd(struct kvm *kvm)
{
+ phys_addr_t pgd_phys;
pgd_t *pgd;
if (kvm->arch.pgd != NULL) {
@@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
+ pgd_phys = virt_to_phys(pgd);
+ if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
+ return -EINVAL;
+
kvm->arch.pgd = pgd;
+ kvm->arch.pgd_phys = pgd_phys;
return 0;
}
@@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
+ kvm->arch.pgd_phys = 0;
}
spin_unlock(&kvm->mmu_lock);
@@ -1396,14 +1403,6 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
return false;
}
-static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
@@ -1598,14 +1597,13 @@ static void kvm_send_hwpoison_signal(unsigned long address,
static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
unsigned long hva)
{
- gpa_t gpa_start, gpa_end;
+ gpa_t gpa_start;
hva_t uaddr_start, uaddr_end;
size_t size;
size = memslot->npages * PAGE_SIZE;
gpa_start = memslot->base_gfn << PAGE_SHIFT;
- gpa_end = gpa_start + size;
uaddr_start = memslot->userspace_addr;
uaddr_end = uaddr_start + size;
@@ -1906,7 +1904,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
* For RAS the host kernel may handle this abort.
* There is no need to pass the error into the guest.
*/
- if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
+ if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
return 1;
if (unlikely(!is_iabt)) {
@@ -2353,7 +2351,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return 0;
}
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
}