aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/mmu.c')
-rw-r--r--virt/kvm/arm/mmu.c128
1 files changed, 69 insertions, 59 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ed162a6c57c5..c23a1b323aad 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector;
static unsigned long io_map_base;
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
- stage2_pgd_clear(pgd);
+ pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
+ stage2_pgd_clear(kvm, pgd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(pud_table);
+ stage2_pud_free(kvm, pud_table);
put_page(virt_to_page(pgd));
}
static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
- pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
- VM_BUG_ON(stage2_pud_huge(*pud));
- stage2_pud_clear(pud);
+ pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
+ VM_BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pmd_free(pmd_table);
+ stage2_pmd_free(kvm, pmd_table);
put_page(virt_to_page(pud));
}
@@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- if (stage2_pte_table_empty(start_pte))
+ if (stage2_pte_table_empty(kvm, start_pte))
clear_stage2_pmd_entry(kvm, pmd, start_addr);
}
@@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t next, start_addr = addr;
pmd_t *pmd, *start_pmd;
- start_pmd = pmd = stage2_pmd_offset(pud, addr);
+ start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
pmd_t old_pmd = *pmd;
@@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
}
} while (pmd++, addr = next, addr != end);
- if (stage2_pmd_table_empty(start_pmd))
+ if (stage2_pmd_table_empty(kvm, start_pmd))
clear_stage2_pud_entry(kvm, pud, start_addr);
}
@@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
- start_pud = pud = stage2_pud_offset(pgd, addr);
+ start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud)) {
pud_t old_pud = *pud;
- stage2_pud_clear(pud);
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
@@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
}
} while (pud++, addr = next, addr != end);
- if (stage2_pud_table_empty(start_pud))
+ if (stage2_pud_table_empty(kvm, start_pud))
clear_stage2_pgd_entry(kvm, pgd, start_addr);
}
@@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Make sure the page table is still active, as another thread
@@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
*/
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (!stage2_pgd_none(*pgd))
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
@@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd))
kvm_flush_dcache_pmd(*pmd);
@@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud))
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud))
kvm_flush_dcache_pud(*pud);
else
stage2_flush_pmds(kvm, pud, addr, next);
@@ -409,10 +408,11 @@ static void stage2_flush_memslot(struct kvm *kvm,
phys_addr_t next;
pgd_t *pgd;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
- next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
+ stage2_flush_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -897,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
}
/* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+ pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return -ENOMEM;
@@ -986,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
@@ -994,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
/* Free the HW pgd, one page at a time */
if (pgd)
- free_pages_exact(pgd, S2_PGD_SIZE);
+ free_pages_exact(pgd, stage2_pgd_size(kvm));
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1003,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pgd_t *pgd;
pud_t *pud;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- if (WARN_ON(stage2_pgd_none(*pgd))) {
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+ if (stage2_pgd_none(kvm, *pgd)) {
if (!cache)
return NULL;
pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(pgd, pud);
+ stage2_pgd_populate(kvm, pgd, pud);
get_page(virt_to_page(pgd));
}
- return stage2_pud_offset(pgd, addr);
+ return stage2_pud_offset(kvm, pgd, addr);
}
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1025,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (!pud)
return NULL;
- if (stage2_pud_none(*pud)) {
+ if (stage2_pud_none(kvm, *pud)) {
if (!cache)
return NULL;
pmd = mmu_memory_cache_alloc(cache);
- stage2_pud_populate(pud, pmd);
+ stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud));
}
- return stage2_pmd_offset(pud, addr);
+ return stage2_pmd_offset(kvm, pud, addr);
}
static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1207,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (writable)
pte = kvm_s2pte_mkwrite(pte);
- ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ ret = mmu_topup_memory_cache(&cache,
+ kvm_mmu_cache_min_pages(kvm),
+ KVM_NR_MEM_OBJS);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
@@ -1230,8 +1231,14 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
- if (PageTransCompoundMap(pfn_to_page(pfn))) {
+ /*
+ * PageTransCompoungMap() returns true for THP and
+ * hugetlbfs. Make sure the adjustment is done only for THP
+ * pages.
+ */
+ if (!PageHuge(page) && PageTransCompoundMap(page)) {
unsigned long mask;
/*
* The address we faulted on is backed by a transparent huge
@@ -1296,19 +1303,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
/**
* stage2_wp_pmds - write protect PUD range
+ * kvm: kvm instance for the VM
* @pud: pointer to pud entry
* @addr: range start address
* @end: range end address
*/
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
+ phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
if (!kvm_s2pmd_readonly(pmd))
@@ -1328,18 +1337,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
*
* Process PUD entries, for a huge PUD we cause a panic.
*/
-static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
/* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(*pud));
- stage2_wp_pmds(pud, addr, next);
+ BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_wp_pmds(kvm, pud, addr, next);
}
} while (pud++, addr = next, addr != end);
}
@@ -1355,7 +1365,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
pgd_t *pgd;
phys_addr_t next;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Release kvm_mmu_lock periodically if the memory region is
@@ -1369,9 +1379,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
cond_resched_lock(&kvm->mmu_lock);
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (stage2_pgd_present(*pgd))
- stage2_wp_puds(pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (stage2_pgd_present(kvm, *pgd))
+ stage2_wp_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -1520,7 +1530,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+ ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
KVM_NR_MEM_OBJS);
if (ret)
return ret;
@@ -1763,7 +1773,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/* Userspace should not be able to register out-of-bounds IPAs */
- VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
if (fault_status == FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
@@ -2062,7 +2072,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* space addressable by the KVM guest IPA space.
*/
if (memslot->base_gfn + memslot->npages >=
- (KVM_PHYS_SIZE >> PAGE_SHIFT))
+ (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
down_read(&current->mm->mmap_sem);