aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/kvm_book3s_64.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 12:32:27 +0000
committerAvi Kivity <avi@redhat.com>2012-03-05 14:52:37 +0200
commit9d0ef5ea043d1242897d15c71bd1a15da79b4a5d (patch)
tree2847b3bd444b999f3c2a32d4cbb220d0e788e93a /arch/powerpc/include/asm/kvm_book3s_64.h
parentKVM: PPC: Allow use of small pages to back Book3S HV guests (diff)
downloadlinux-dev-9d0ef5ea043d1242897d15c71bd1a15da79b4a5d.tar.xz
linux-dev-9d0ef5ea043d1242897d15c71bd1a15da79b4a5d.zip
KVM: PPC: Allow I/O mappings in memory slots
This provides for the case where userspace maps an I/O device into the address range of a memory slot using a VM_PFNMAP mapping. In that case, we work out the pfn from vma->vm_pgoff, and record the cache enable bits from vma->vm_page_prot in two low-order bits in the slot_phys array entries. Then, in kvmppc_h_enter() we check that the cache bits in the HPTE that the guest wants to insert match the cache bits in the slot_phys array entry. However, we do allow the guest to create what it thinks is a non-cacheable or write-through mapping to memory that is actually cacheable, so that we can use normal system memory as part of an emulated device later on. In that case the actual HPTE we insert is a cacheable HPTE. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s_64.h')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 10920f7a2b8d..18b590d261ff 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -113,6 +113,32 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
return 0; /* error */
}
+static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
+{
+ unsigned int wimg = ptel & HPTE_R_WIMG;
+
+ /* Handle SAO */
+ if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ wimg = HPTE_R_M;
+
+ if (!io_type)
+ return wimg == HPTE_R_M;
+
+ return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
+}
+
+/* Return HPTE cache control bits corresponding to Linux pte bits */
+static inline unsigned long hpte_cache_bits(unsigned long pte_val)
+{
+#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
+ return pte_val & (HPTE_R_W | HPTE_R_I);
+#else
+ return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
+ ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
+#endif
+}
+
static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
unsigned long pagesize)
{