aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/kvm/kvm.h3
-rw-r--r--drivers/kvm/kvm_main.c12
-rw-r--r--drivers/kvm/mmu.c23
-rw-r--r--drivers/kvm/paging_tmpl.h12
-rw-r--r--drivers/kvm/x86.c2
5 files changed, 33 insertions, 19 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 52e80183e050..c2acd74389fa 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
-void kvm_release_page(struct page *page);
+void kvm_release_page_clean(struct page *page);
+void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4e1bd9488470..729573b844e5 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
EXPORT_SYMBOL_GPL(gfn_to_page);
-void kvm_release_page(struct page *page)
+void kvm_release_page_clean(struct page *page)
+{
+ put_page(page);
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+
+void kvm_release_page_dirty(struct page *page)
{
if (!PageReserved(page))
SetPageDirty(page);
put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page);
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
static int next_segment(unsigned long len, int offset)
{
@@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
/* current->mm->mmap_sem is already held so call lockless version */
page = __gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
- kvm_release_page(page);
+ kvm_release_page_clean(page);
return NOPAGE_SIGBUS;
}
if (type != NULL)
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 8add4d5c6840..4624f3789b9a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page;
+ struct page *release_page;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
page = page_header(__pa(spte));
- kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
- PAGE_SHIFT));
+ release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+ if (is_writeble_pte(*spte))
+ kvm_release_page_dirty(release_page);
+ else
+ kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa;
+ struct page *page;
+ page = pfn_to_page(p >> PAGE_SHIFT);
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
u64 *table;
@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
pte = table[index];
was_rmapped = is_rmap_pte(pte);
if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
return 0;
}
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
else
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
+
return 0;
}
@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
1, 3, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
return -ENOMEM;
}
@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) {
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return 1;
}
@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
" valid guest gva %lx\n", audit_msg, va);
page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
}
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 77a2b22492bf..bf15d127a48f 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
if (is_error_hpa(paddr)) {
set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return;
}
@@ -259,12 +259,12 @@ unshadowed:
page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
}
else
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte;
}
@@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
#undef pt_element_t
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 5a1b72fbaeaa..6212984a2e6c 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
if (vcpu->pio.guest_pages[i]) {
- kvm_release_page(vcpu->pio.guest_pages[i]);
+ kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
vcpu->pio.guest_pages[i] = NULL;
}
}