From 88ec6b93c8e7d6d4ffaf6ad6395ceb3bf552de15 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Wed, 10 Apr 2019 19:04:33 +0200 Subject: powerpc/xive: add OPAL extensions for the XIVE native exploitation support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The support for XIVE native exploitation mode in Linux/KVM needs a couple more OPAL calls to get and set the state of the XIVE internal structures being used by a sPAPR guest. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/opal-api.h | 7 ++++--- arch/powerpc/include/asm/opal.h | 7 +++++++ arch/powerpc/include/asm/xive.h | 14 ++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 870fb7b239ea..e1d118ac61dc 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -186,8 +186,8 @@ #define OPAL_XIVE_FREE_IRQ 140 #define OPAL_XIVE_SYNC 141 #define OPAL_XIVE_DUMP 142 -#define OPAL_XIVE_RESERVED3 143 -#define OPAL_XIVE_RESERVED4 144 +#define OPAL_XIVE_GET_QUEUE_STATE 143 +#define OPAL_XIVE_SET_QUEUE_STATE 144 #define OPAL_SIGNAL_SYSTEM_RESET 145 #define OPAL_NPU_INIT_CONTEXT 146 #define OPAL_NPU_DESTROY_CONTEXT 147 @@ -210,7 +210,8 @@ #define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 #define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 #define OPAL_NX_COPROC_INIT 167 -#define OPAL_LAST 167 +#define OPAL_XIVE_GET_VP_STATE 170 +#define OPAL_LAST 170 #define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index a55b01c90bb1..4e978d4dea5c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -279,6 +279,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id); int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id); +int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio, + __be32 *out_qtoggle, + __be32 *out_qindex); +int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio, + uint32_t qtoggle, + uint32_t qindex); +int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01); int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, uint64_t desc, uint16_t pe_number); diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 3c704f5dd3ae..b579a943407b 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -109,12 +109,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); extern void xive_native_sync_source(u32 hw_irq); +extern void xive_native_sync_queue(u32 hw_irq); extern bool is_xive_irq(struct irq_chip *chip); extern int xive_native_enable_vp(u32 vp_id, bool single_escalation); extern int xive_native_disable_vp(u32 vp_id); extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); extern bool xive_native_has_single_escalation(void); +extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio, + u64 *out_qpage, + u64 *out_qsize, + u64 *out_qeoi_page, + u32 *out_escalate_irq, + u64 *out_qflags); + +extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle, + u32 *qindex); +extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle, + u32 qindex); +extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state); + #else static inline bool xive_enabled(void) { return false; } -- cgit v1.2.3-59-g8ed1b From c1fe190c06723322f2dfac31d3b982c581e434ef Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 1 Apr 2019 17:03:12 +1100 Subject: powerpc: Add force enable of DAWR on P9 option This adds a flag so that the DAWR can be enabled on P9 via: echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous The DAWR was previously force disabled on POWER9 in: 9654153158 powerpc: Disable DAWR in the base POWER9 CPU features Also see Documentation/powerpc/DAWR-POWER9.txt This is a dangerous setting, USE AT YOUR OWN RISK. Some users may not care about a bad user crashing their box (ie. single user/desktop systems) and really want the DAWR. This allows them to force enable DAWR. This flag can also be used to disable DAWR access. Once this is cleared, all DAWR access should be cleared immediately and your machine once again safe from crashing. Userspace may get confused by toggling this. If DAWR is force enabled/disabled between getting the number of breakpoints (via PTRACE_GETHWDBGINFO) and setting the breakpoint, userspace will get an inconsistent view of what's available. Similarly for guests. For the DAWR to be enabled in a KVM guest, the DAWR needs to be force enabled in the host AND the guest. For this reason, this won't work on POWERVM as it doesn't allow the HCALL to work. Writes of 'Y' to the dawr_enable_dangerous file will fail if the hypervisor doesn't support writing the DAWR. To double check the DAWR is working, run this kernel selftest: tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c Any errors/failures/skips mean something is wrong. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- Documentation/powerpc/DAWR-POWER9.txt | 32 +++++++++++++++++ arch/powerpc/include/asm/hw_breakpoint.h | 8 +++++ arch/powerpc/kernel/hw_breakpoint.c | 62 +++++++++++++++++++++++++++++++- arch/powerpc/kernel/process.c | 9 ++--- arch/powerpc/kernel/ptrace.c | 3 +- arch/powerpc/kvm/book3s_hv.c | 3 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 23 ++++++------ 7 files changed, 123 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/include') diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/DAWR-POWER9.txt index 2feaa6619658..bdec03650941 100644 --- a/Documentation/powerpc/DAWR-POWER9.txt +++ b/Documentation/powerpc/DAWR-POWER9.txt @@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be trapped in GDB. The watchpoint is remembered, so if the guest is migrated back to the POWER8 host, it will start working again. +Force enabling the DAWR +============================= +Kernels (since ~v5.2) have an option to force enable the DAWR via: + + echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous + +This enables the DAWR even on POWER9. + +This is a dangerous setting, USE AT YOUR OWN RISK. + +Some users may not care about a bad user crashing their box +(ie. single user/desktop systems) and really want the DAWR. This +allows them to force enable DAWR. + +This flag can also be used to disable DAWR access. Once this is +cleared, all DAWR access should be cleared immediately and your +machine once again safe from crashing. + +Userspace may get confused by toggling this. If DAWR is force +enabled/disabled between getting the number of breakpoints (via +PTRACE_GETHWDBGINFO) and setting the breakpoint, userspace will get an +inconsistent view of what's available. Similarly for guests. + +For the DAWR to be enabled in a KVM guest, the DAWR needs to be force +enabled in the host AND the guest. For this reason, this won't work on +POWERVM as it doesn't allow the HCALL to work. Writes of 'Y' to the +dawr_enable_dangerous file will fail if the hypervisor doesn't support +writing the DAWR. + +To double check the DAWR is working, run this kernel selftest: + tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c +Any errors/failures/skips mean something is wrong. diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index ece4dc89c90b..0fe8c1e46bbc 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void) extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); int hw_breakpoint_handler(struct die_args *args); +extern int set_dawr(struct arch_hw_breakpoint *brk); +extern bool dawr_force_enable; +static inline bool dawr_enabled(void) +{ + return dawr_force_enable; +} + #else /* CONFIG_HAVE_HW_BREAKPOINT */ static inline void hw_breakpoint_disable(void) { } static inline void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) { } +static inline bool dawr_enabled(void) { return false; } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* __KERNEL__ */ #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index fec8a6773119..da307dd93ee3 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -29,11 +29,15 @@ #include #include #include +#include +#include #include #include #include #include +#include +#include #include /* @@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (!ppc_breakpoint_available()) return -ENODEV; length_max = 8; /* DABR */ - if (cpu_has_feature(CPU_FTR_DAWR)) { + if (dawr_enabled()) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ if ((attr->bp_addr >> 9) != @@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ } + +bool dawr_force_enable; +EXPORT_SYMBOL_GPL(dawr_force_enable); + +static ssize_t dawr_write_file_bool(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct arch_hw_breakpoint null_brk = {0, 0, 0}; + size_t rc; + + /* Send error to user if they hypervisor won't allow us to write DAWR */ + if ((!dawr_force_enable) && + (firmware_has_feature(FW_FEATURE_LPAR)) && + (set_dawr(&null_brk) != H_SUCCESS)) + return -1; + + rc = debugfs_write_file_bool(file, user_buf, count, ppos); + if (rc) + return rc; + + /* If we are clearing, make sure all CPUs have the DAWR cleared */ + if (!dawr_force_enable) + smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0); + + return rc; +} + +static const struct file_operations dawr_enable_fops = { + .read = debugfs_read_file_bool, + .write = dawr_write_file_bool, + .open = simple_open, + .llseek = default_llseek, +}; + +static int __init dawr_force_setup(void) +{ + dawr_force_enable = false; + + if (cpu_has_feature(CPU_FTR_DAWR)) { + /* Don't setup sysfs file for user control on P8 */ + dawr_force_enable = true; + return 0; + } + + if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) { + /* Turn DAWR off by default, but allow admin to turn it on */ + dawr_force_enable = false; + debugfs_create_file_unsafe("dawr_enable_dangerous", 0600, + powerpc_debugfs_root, + &dawr_force_enable, + &dawr_enable_fops); + } + return 0; +} +arch_initcall(dawr_force_setup); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index dd9e0d5386ee..225705aac814 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include @@ -784,7 +785,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk) return __set_dabr(dabr, dabrx); } -static inline int set_dawr(struct arch_hw_breakpoint *brk) +int set_dawr(struct arch_hw_breakpoint *brk) { unsigned long dawr, dawrx, mrd; @@ -816,7 +817,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) { memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); - if (cpu_has_feature(CPU_FTR_DAWR)) + if (dawr_enabled()) // Power8 or later set_dawr(brk); else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) @@ -830,8 +831,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) /* Check if we have DAWR or DABR hardware */ bool ppc_breakpoint_available(void) { - if (cpu_has_feature(CPU_FTR_DAWR)) - return true; /* POWER8 DAWR */ + if (dawr_enabled()) + return true; /* POWER8 DAWR or POWER9 forced DAWR */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) return false; /* POWER9 with DAWR disabled */ /* DABR: Everything but POWER8 and POWER9 */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index d9ac7d94656e..684b0b315c32 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -43,6 +43,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request, dbginfo.sizeof_condition = 0; #ifdef CONFIG_HAVE_HW_BREAKPOINT dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; - if (cpu_has_feature(CPU_FTR_DAWR)) + if (dawr_enabled()) dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR; #else dbginfo.features = 0; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 06964350b97a..0fab0a201027 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "book3s.h" @@ -3374,7 +3375,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_PURR, vcpu->arch.purr); mtspr(SPRN_SPURR, vcpu->arch.spurr); - if (cpu_has_feature(CPU_FTR_DAWR)) { + if (dawr_enabled()) { mtspr(SPRN_DAWR, vcpu->arch.dawr); mtspr(SPRN_DAWRX, vcpu->arch.dawrx); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3a5e719ef032..139027c62dc2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -822,18 +822,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) mtspr SPRN_IAMR, r5 mtspr SPRN_PSPB, r6 mtspr SPRN_FSCR, r7 - ld r5, VCPU_DAWR(r4) - ld r6, VCPU_DAWRX(r4) - ld r7, VCPU_CIABR(r4) - ld r8, VCPU_TAR(r4) /* * Handle broken DAWR case by not writing it. This means we * can still store the DAWR register for migration. */ -BEGIN_FTR_SECTION + LOAD_REG_ADDR(r5, dawr_force_enable) + lbz r5, 0(r5) + cmpdi r5, 0 + beq 1f + ld r5, VCPU_DAWR(r4) + ld r6, VCPU_DAWRX(r4) mtspr SPRN_DAWR, r5 mtspr SPRN_DAWRX, r6 -END_FTR_SECTION_IFSET(CPU_FTR_DAWR) +1: + ld r7, VCPU_CIABR(r4) + ld r8, VCPU_TAR(r4) mtspr SPRN_CIABR, r7 mtspr SPRN_TAR, r8 ld r5, VCPU_IC(r4) @@ -2513,11 +2516,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) blr 2: -BEGIN_FTR_SECTION - /* POWER9 with disabled DAWR */ + LOAD_REG_ADDR(r11, dawr_force_enable) + lbz r11, 0(r11) + cmpdi r11, 0 li r3, H_HARDWARE - blr -END_FTR_SECTION_IFCLR(CPU_FTR_DAWR) + beqlr /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT -- cgit v1.2.3-59-g8ed1b From eadfb1c5f8c02f428a565e62e908e99900b697e4 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Fri, 22 Mar 2019 17:05:45 +1100 Subject: KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler Implement a real mode handler for the H_CALL H_PAGE_INIT which can be used to zero or copy a guest page. The page is defined to be 4k and must be 4k aligned. The in-kernel real mode handler halves the time to handle this H_CALL compared to handling it in userspace for a hash guest. Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 2 + arch/powerpc/kvm/book3s_hv_rm_mmu.c | 144 ++++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 +- 3 files changed, 147 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ac22b28ae78d..df0b173da3e0 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -665,6 +665,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index); long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index); +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src); long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 3b3791ed74a6..8431ad1e8391 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -867,6 +868,149 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, return ret; } +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, + int writing, unsigned long *hpa, + struct kvm_memory_slot **memslot_p) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + unsigned long gfn, hva, pa, psize = PAGE_SHIFT; + unsigned int shift; + pte_t *ptep, pte; + + /* Find the memslot for this address */ + gfn = gpa >> PAGE_SHIFT; + memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + return H_PARAMETER; + + /* Translate to host virtual address */ + hva = __gfn_to_hva_memslot(memslot, gfn); + + /* Try to find the host pte for that virtual address */ + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + if (!ptep) + return H_TOO_HARD; + pte = kvmppc_read_update_linux_pte(ptep, writing); + if (!pte_present(pte)) + return H_TOO_HARD; + + /* Convert to a physical address */ + if (shift) + psize = 1UL << shift; + pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (psize - 1); + pa |= gpa & ~PAGE_MASK; + + if (hpa) + *hpa = pa; + if (memslot_p) + *memslot_p = memslot; + + return H_SUCCESS; +} + +static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, + unsigned long dest) +{ + struct kvm_memory_slot *memslot; + struct kvm *kvm = vcpu->kvm; + unsigned long pa, mmu_seq; + long ret = H_SUCCESS; + int i; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Zero the page */ + for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) + dcbz((void *)pa); + kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, + unsigned long dest, unsigned long src) +{ + unsigned long dest_pa, src_pa, mmu_seq; + struct kvm_memory_slot *dest_memslot; + struct kvm *kvm = vcpu->kvm; + long ret = H_SUCCESS; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot); + if (ret != H_SUCCESS) + return ret; + ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Copy the page */ + memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); + + kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src) +{ + struct kvm *kvm = vcpu->kvm; + u64 pg_mask = SZ_4K - 1; /* 4K page size */ + long ret = H_SUCCESS; + + /* Don't handle radix mode here, go up to the virtual mode handler */ + if (kvm_is_radix(kvm)) + return H_TOO_HARD; + + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) + return H_PARAMETER; + + /* dest (and src if copy_page flag set) must be page aligned */ + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) + return H_PARAMETER; + + /* zero and/or copy the page as determined by the flags */ + if (flags & H_COPY_PAGE) + ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); + else if (flags & H_ZERO_PAGE) + ret = kvmppc_do_h_page_init_zero(vcpu, dest); + + /* We can ignore the other flags */ + + return ret; +} + void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3a5e719ef032..4e8be4d9e114 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2281,7 +2281,7 @@ hcall_real_table: #endif .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table - .long 0 /* 0x2c */ + .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table .long 0 /* 0x30 */ .long 0 /* 0x34 */ .long 0 /* 0x38 */ -- cgit v1.2.3-59-g8ed1b From 2001825efcea75e4209e4956f6cd619fbc246d16 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Fri, 29 Mar 2019 16:42:20 +1100 Subject: KVM: PPC: Book3S HV: Avoid lockdep debugging in TCE realmode handlers The kvmppc_tce_to_ua() helper is called from real and virtual modes and it works fine as long as CONFIG_DEBUG_LOCKDEP is not enabled. However if the lockdep debugging is on, the lockdep will most likely break in kvm_memslots() because of srcu_dereference_check() so we need to use PPC-own kvm_memslots_raw() which uses realmode safe rcu_dereference_raw_notrace(). This creates a realmode copy of kvmppc_tce_to_ua() which replaces kvm_memslots() with kvm_memslots_raw(). Since kvmppc_rm_tce_to_ua() becomes static and can only be used inside HV KVM, this moves it earlier under CONFIG_KVM_BOOK3S_HV_POSSIBLE. This moves truly virtual-mode kvmppc_tce_to_ua() to where it belongs and drops the prmap parameter which was never used in the virtual mode. Fixes: d3695aa4f452 ("KVM: PPC: Add support for multiple-TCE hcalls", 2016-02-15) Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 2 -- arch/powerpc/kvm/book3s_64_vio.c | 24 ++++++++++++++--- arch/powerpc/kvm/book3s_64_vio_hv.c | 51 +++++++++++++++++-------------------- 3 files changed, 44 insertions(+), 33 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index df0b173da3e0..02732eb156ae 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -197,8 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ H_PARAMETER : H_SUCCESS) -extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, - unsigned long *ua, unsigned long **prmap); extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, unsigned long idx, unsigned long tce); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index f100e331e69b..5e3b62c491f8 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -363,6 +363,22 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return ret; } +static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, + unsigned long *ua) +{ + unsigned long gfn = tce >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = search_memslots(kvm_memslots(kvm), gfn); + if (!memslot) + return -EINVAL; + + *ua = __gfn_to_hva_memslot(memslot, gfn) | + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + + return 0; +} + static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { @@ -378,7 +394,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_TOO_HARD; - if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { @@ -551,7 +567,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; goto unlock_exit; } @@ -612,7 +628,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) { ret = H_TOO_HARD; goto unlock_exit; } @@ -647,7 +663,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, } tce = be64_to_cpu(tce); - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 2206bc729b9a..31e59748832a 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -88,6 +88,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, EXPORT_SYMBOL_GPL(kvmppc_find_table); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce, + unsigned long *ua, unsigned long **prmap) +{ + unsigned long gfn = tce >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = search_memslots(kvm_memslots_raw(kvm), gfn); + if (!memslot) + return -EINVAL; + + *ua = __gfn_to_hva_memslot(memslot, gfn) | + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + + if (prmap) + *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; + + return 0; +} + /* * Validates TCE address. * At the moment flags and page mask are validated. @@ -111,7 +130,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; - if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL)) return H_TOO_HARD; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -181,28 +200,6 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, } EXPORT_SYMBOL_GPL(kvmppc_tce_put); -long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, - unsigned long *ua, unsigned long **prmap) -{ - unsigned long gfn = tce >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; - - memslot = search_memslots(kvm_memslots(kvm), gfn); - if (!memslot) - return -EINVAL; - - *ua = __gfn_to_hva_memslot(memslot, gfn) | - (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); - -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - if (prmap) - *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; -#endif - - return 0; -} -EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua); - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, @@ -390,7 +387,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; entry = ioba >> stt->page_shift; @@ -492,7 +489,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ struct mm_iommu_table_group_mem_t *mem; - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) return H_TOO_HARD; mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); @@ -508,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * We do not require memory to be preregistered in this case * so lock rmap and do __find_linux_pte_or_hugepte(). */ - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) return H_TOO_HARD; rmap = (void *) vmalloc_to_phys(rmap); @@ -542,7 +539,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { -- cgit v1.2.3-59-g8ed1b From e1a1ef84cd07f72ce12f139eb9a37d3f9028e7a7 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Fri, 29 Mar 2019 16:43:26 +1100 Subject: KVM: PPC: Book3S: Allocate guest TCEs on demand too We already allocate hardware TCE tables in multiple levels and skip intermediate levels when we can, now it is a turn of the KVM TCE tables. Thankfully these are allocated already in 2 levels. This moves the table's last level allocation from the creating helper to kvmppc_tce_put() and kvm_spapr_tce_fault(). Since such allocation cannot be done in real mode, this creates a virtual mode version of kvmppc_tce_put() which handles allocations. This adds kvmppc_rm_ioba_validate() to do an additional test if the consequent kvmppc_tce_put() needs a page which has not been allocated; if this is the case, we bail out to virtual mode handlers. The allocations are protected by a new mutex as kvm->lock is not suitable for the task because the fault handler is called with the mmap_sem held but kvmhv_setup_mmu() locks kvm->lock and mmap_sem in the reverse order. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 3 ++ arch/powerpc/include/asm/kvm_ppc.h | 2 -- arch/powerpc/kvm/book3s_64_vio.c | 72 +++++++++++++++++++++++++++++-------- arch/powerpc/kvm/book3s_64_vio_hv.c | 66 +++++++++++++++++++++++++--------- 4 files changed, 110 insertions(+), 33 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e6b5bb012ccb..07cefa53222e 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -201,6 +201,8 @@ struct kvmppc_spapr_tce_iommu_table { struct kref kref; }; +#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) + struct kvmppc_spapr_tce_table { struct list_head list; struct kvm *kvm; @@ -210,6 +212,7 @@ struct kvmppc_spapr_tce_table { u64 offset; /* in pages */ u64 size; /* window size in pages */ struct list_head iommu_tables; + struct mutex alloc_lock; struct page *pages[0]; }; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 02732eb156ae..05128e917a83 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -197,8 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ H_PARAMETER : H_SUCCESS) -extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, - unsigned long idx, unsigned long tce); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce); extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 5e3b62c491f8..66270e07449a 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -228,11 +228,33 @@ static void release_spapr_tce_table(struct rcu_head *head) unsigned long i, npages = kvmppc_tce_pages(stt->size); for (i = 0; i < npages; i++) - __free_page(stt->pages[i]); + if (stt->pages[i]) + __free_page(stt->pages[i]); kfree(stt); } +static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt, + unsigned long sttpage) +{ + struct page *page = stt->pages[sttpage]; + + if (page) + return page; + + mutex_lock(&stt->alloc_lock); + page = stt->pages[sttpage]; + if (!page) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + WARN_ON_ONCE(!page); + if (page) + stt->pages[sttpage] = page; + } + mutex_unlock(&stt->alloc_lock); + + return page; +} + static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) { struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; @@ -241,7 +263,10 @@ static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) return VM_FAULT_SIGBUS; - page = stt->pages[vmf->pgoff]; + page = kvm_spapr_get_tce_page(stt, vmf->pgoff); + if (!page) + return VM_FAULT_OOM; + get_page(page); vmf->page = page; return 0; @@ -296,7 +321,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *siter; unsigned long npages, size = args->size; int ret = -ENOMEM; - int i; if (!args->size || args->page_shift < 12 || args->page_shift > 34 || (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) @@ -318,14 +342,9 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, stt->offset = args->offset; stt->size = size; stt->kvm = kvm; + mutex_init(&stt->alloc_lock); INIT_LIST_HEAD_RCU(&stt->iommu_tables); - for (i = 0; i < npages; i++) { - stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!stt->pages[i]) - goto fail; - } - mutex_lock(&kvm->lock); /* Check this LIOBN hasn't been previously allocated */ @@ -352,11 +371,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, if (ret >= 0) return ret; - fail: - for (i = 0; i < npages; i++) - if (stt->pages[i]) - __free_page(stt->pages[i]); - kfree(stt); fail_acct: kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); @@ -413,6 +427,36 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } +/* + * Handles TCE requests for emulated devices. + * Puts guest TCE values to the table and expects user space to convert them. + * Cannot fail so kvmppc_tce_validate must be called before it. + */ +static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, + unsigned long idx, unsigned long tce) +{ + struct page *page; + u64 *tbl; + unsigned long sttpage; + + idx -= stt->offset; + sttpage = idx / TCES_PER_PAGE; + page = stt->pages[sttpage]; + + if (!page) { + /* We allow any TCE, not just with read|write permissions */ + if (!tce) + return; + + page = kvm_spapr_get_tce_page(stt, sttpage); + if (!page) + return; + } + tbl = page_to_virt(page); + + tbl[idx % TCES_PER_PAGE] = tce; +} + static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry) { diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 31e59748832a..484b47fa3960 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -66,8 +66,6 @@ #endif -#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) - /* * Finds a TCE table descriptor by LIOBN. * @@ -148,7 +146,6 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } -#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ /* Note on the use of page_address() in real mode, * @@ -180,13 +177,9 @@ static u64 *kvmppc_page_address(struct page *page) /* * Handles TCE requests for emulated devices. * Puts guest TCE values to the table and expects user space to convert them. - * Called in both real and virtual modes. - * Cannot fail so kvmppc_tce_validate must be called before it. - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM + * Cannot fail so kvmppc_rm_tce_validate must be called before it. */ -void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, +static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt, unsigned long idx, unsigned long tce) { struct page *page; @@ -194,13 +187,48 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, idx -= stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; + /* + * page must not be NULL in real mode, + * kvmppc_rm_ioba_validate() must have taken care of this. + */ + WARN_ON_ONCE_RM(!page); tbl = kvmppc_page_address(page); tbl[idx % TCES_PER_PAGE] = tce; } -EXPORT_SYMBOL_GPL(kvmppc_tce_put); -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so + * in real mode. + * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is + * allocated or not required (when clearing a tce entry). + */ +static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long ioba, unsigned long npages, bool clearing) +{ + unsigned long i, idx, sttpage, sttpages; + unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages); + + if (ret) + return ret; + /* + * clearing==true says kvmppc_rm_tce_put won't be allocating pages + * for empty tces. + */ + if (clearing) + return H_SUCCESS; + + idx = (ioba >> stt->page_shift) - stt->offset; + sttpage = idx / TCES_PER_PAGE; + sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / + TCES_PER_PAGE; + for (i = sttpage; i < sttpage + sttpages; ++i) + if (!stt->pages[i]) + return H_TOO_HARD; + + return H_SUCCESS; +} + static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) @@ -378,7 +406,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (!stt) return H_TOO_HARD; - ret = kvmppc_ioba_validate(stt, ioba, 1); + ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0); if (ret != H_SUCCESS) return ret; @@ -406,7 +434,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, } } - kvmppc_tce_put(stt, entry, tce); + kvmppc_rm_tce_put(stt, entry, tce); return H_SUCCESS; } @@ -477,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (tce_list & (SZ_4K - 1)) return H_PARAMETER; - ret = kvmppc_ioba_validate(stt, ioba, npages); + ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false); if (ret != H_SUCCESS) return ret; @@ -554,7 +582,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, } } - kvmppc_tce_put(stt, entry + i, tce); + kvmppc_rm_tce_put(stt, entry + i, tce); } unlock_exit: @@ -580,7 +608,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, if (!stt) return H_TOO_HARD; - ret = kvmppc_ioba_validate(stt, ioba, npages); + ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0); if (ret != H_SUCCESS) return ret; @@ -607,7 +635,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, } for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) - kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); return H_SUCCESS; } @@ -632,6 +660,10 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, idx = (ioba >> stt->page_shift) - stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; + if (!page) { + vcpu->arch.regs.gpr[4] = 0; + return H_SUCCESS; + } tbl = (u64 *)page_address(page); vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; -- cgit v1.2.3-59-g8ed1b From 2940ba0c48bf18e15e85cbb0f26c0e88e1211587 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 29 Apr 2019 19:00:40 +1000 Subject: KVM: PPC: Book3S HV: Move HPT guest TLB flushing to C code This replaces assembler code in book3s_hv_rmhandlers.S that checks the kvm->arch.need_tlb_flush cpumask and optionally does a TLB flush with C code in book3s_hv_builtin.c. Note that unlike the radix version, the hash version doesn't do an explicit ERAT invalidation because we will invalidate and load up the SLB before entering the guest, and that will invalidate the ERAT. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 2 ++ arch/powerpc/kvm/book3s_hv_builtin.c | 29 +++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 38 ++++----------------------------- 3 files changed, 35 insertions(+), 34 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 05128e917a83..0975f5f2fd70 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -476,6 +476,8 @@ extern void kvm_hv_vm_activated(void); extern void kvm_hv_vm_deactivated(void); extern bool kvm_hv_mode_active(void); +extern void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm); + #else static inline void __init kvm_cma_reserve(void) {} diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index b0cf22477e87..489abe5d9797 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -805,3 +805,32 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) vcpu->arch.doorbell_request = 0; } } + +void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm) +{ + int pcpu = raw_smp_processor_id(); + unsigned long rb, set; + + /* + * On POWER9, individual threads can come in here, but the + * TLB is shared between the 4 threads in a core, hence + * invalidating on one thread invalidates for all. + * Thus we make all 4 threads use the same bit. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pcpu = cpu_first_thread_sibling(pcpu); + + if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { + rb = PPC_BIT(52); /* IS = 2 */ + for (set = 0; set < kvm->arch.tlb_sets; ++set) { + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (0), "i" (0), "i" (0), + "r" (0) : "memory"); + rb += PPC_BIT(51); /* increment set number */ + } + asm volatile("ptesync": : :"memory"); + + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); + } +} diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 31f914742fdd..9d2fea43b0c5 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -623,40 +623,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) isync /* See if we need to flush the TLB. Hash has to be done in RM */ - lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ -BEGIN_FTR_SECTION - /* - * On POWER9, individual threads can come in here, but the - * TLB is shared between the 4 threads in a core, hence - * invalidating on one thread invalidates for all. - * Thus we make all 4 threads use the same bit here. - */ - clrrdi r6,r6,2 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - clrldi r7,r6,64-6 /* extract bit number (6 bits) */ - srdi r6,r6,6 /* doubleword number */ - sldi r6,r6,3 /* address offset */ - add r6,r6,r9 - addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ - li r8,1 - sld r8,r8,r7 - ld r7,0(r6) - and. r7,r7,r8 - beq 22f - /* Flush the TLB of any entries for this LPID */ - lwz r0,KVM_TLB_SETS(r9) - mtctr r0 - li r7,0x800 /* IS field = 0b10 */ - ptesync - li r0,0 /* RS for P9 version of tlbiel */ -28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ - addi r7,r7,0x1000 - bdnz 28b - ptesync -23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ - andc r7,r7,r8 - stdcx. r7,0,r6 - bne 23b + mr r3, r9 /* kvm pointer */ + bl kvmppc_hpt_check_need_tlb_flush + nop + ld r5, HSTATE_KVM_VCORE(r13) /* Add timebase offset onto timebase */ 22: ld r8,VCORE_TB_OFFSET(r5) -- cgit v1.2.3-59-g8ed1b From 70ea13f6e609e8762d9f57287ebf873a18c91a44 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 29 Apr 2019 19:02:58 +1000 Subject: KVM: PPC: Book3S HV: Flush TLB on secondary radix threads When running on POWER9 with kvm_hv.indep_threads_mode = N and the host in SMT1 mode, KVM will run guest VCPUs on offline secondary threads. If those guests are in radix mode, we fail to load the LPID and flush the TLB if necessary, leading to the guest crashing with an unsupported MMU fault. This arises from commit 9a4506e11b97 ("KVM: PPC: Book3S HV: Make radix handle process scoped LPID flush in C, with relocation on", 2018-05-17), which didn't consider the case where indep_threads_mode = N. For simplicity, this makes the real-mode guest entry path flush the TLB in the same place for both radix and hash guests, as we did before 9a4506e11b97, though the code is now C code rather than assembly code. We also have the radix TLB flush open-coded rather than calling radix__local_flush_tlb_lpid_guest(), because the TLB flush can be called in real mode, and in real mode we don't want to invoke the tracepoint code. Fixes: 9a4506e11b97 ("KVM: PPC: Book3S HV: Make radix handle process scoped LPID flush in C, with relocation on") Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 3 +- arch/powerpc/kvm/book3s_hv.c | 55 +++++---------------------------- arch/powerpc/kvm/book3s_hv_builtin.c | 52 ++++++++++++++++++++++++------- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 ++-- 4 files changed, 53 insertions(+), 63 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0975f5f2fd70..0e99a6f8066f 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -476,7 +476,8 @@ extern void kvm_hv_vm_activated(void); extern void kvm_hv_vm_deactivated(void); extern bool kvm_hv_mode_active(void); -extern void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm); +extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, + struct kvm_nested_guest *nested); #else static inline void __init kvm_cma_reserve(void) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 392aa7ee9d02..d250a76b1153 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2584,37 +2584,6 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) } } -static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu, - struct kvm_nested_guest *nested) -{ - cpumask_t *need_tlb_flush; - int lpid; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return; - - if (cpu_has_feature(CPU_FTR_ARCH_300)) - pcpu &= ~0x3UL; - - if (nested) { - lpid = nested->shadow_lpid; - need_tlb_flush = &nested->need_tlb_flush; - } else { - lpid = kvm->arch.lpid; - need_tlb_flush = &kvm->arch.need_tlb_flush; - } - - mtspr(SPRN_LPID, lpid); - isync(); - smp_mb(); - - if (cpumask_test_cpu(pcpu, need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(lpid); - /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(pcpu, need_tlb_flush); - } -} - static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { int cpu; @@ -3308,20 +3277,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); - if (kvm_is_radix(vc->kvm)) { - /* - * Do we need to flush the process scoped TLB for the LPAR? - * - * On POWER9, individual threads can come in here, but the - * TLB is shared between the 4 threads in a core, hence - * invalidating on one thread invalidates for all. - * Thus we make all 4 threads use the same bit here. - * - * Hash must be flushed in realmode in order to use tlbiel. - */ - kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL); - } - guest_enter_irqoff(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); @@ -4050,7 +4005,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, unsigned long lpcr) { int trap, r, pcpu; - int srcu_idx; + int srcu_idx, lpid; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; @@ -4126,8 +4081,12 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc->vcore_state = VCORE_RUNNING; trace_kvmppc_run_core(vc, 0); - if (cpu_has_feature(CPU_FTR_HVMODE)) - kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; + mtspr(SPRN_LPID, lpid); + isync(); + kvmppc_check_need_tlb_flush(kvm, pcpu, nested); + } trace_hardirqs_on(); guest_enter_irqoff(); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 489abe5d9797..6035d24f1d1d 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -806,11 +806,40 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) } } -void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm) +static void flush_guest_tlb(struct kvm *kvm) { - int pcpu = raw_smp_processor_id(); unsigned long rb, set; + rb = PPC_BIT(52); /* IS = 2 */ + if (kvm_is_radix(kvm)) { + /* R=1 PRS=1 RIC=2 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (1), "i" (1), "i" (2), + "r" (0) : "memory"); + for (set = 1; set < kvm->arch.tlb_sets; ++set) { + rb += PPC_BIT(51); /* increment set number */ + /* R=1 PRS=1 RIC=0 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (1), "i" (1), "i" (0), + "r" (0) : "memory"); + } + } else { + for (set = 0; set < kvm->arch.tlb_sets; ++set) { + /* R=0 PRS=0 RIC=0 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (0), "i" (0), "i" (0), + "r" (0) : "memory"); + rb += PPC_BIT(51); /* increment set number */ + } + } + asm volatile("ptesync": : :"memory"); +} + +void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, + struct kvm_nested_guest *nested) +{ + cpumask_t *need_tlb_flush; + /* * On POWER9, individual threads can come in here, but the * TLB is shared between the 4 threads in a core, hence @@ -820,17 +849,16 @@ void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm) if (cpu_has_feature(CPU_FTR_ARCH_300)) pcpu = cpu_first_thread_sibling(pcpu); - if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { - rb = PPC_BIT(52); /* IS = 2 */ - for (set = 0; set < kvm->arch.tlb_sets; ++set) { - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) - : : "r" (rb), "i" (0), "i" (0), "i" (0), - "r" (0) : "memory"); - rb += PPC_BIT(51); /* increment set number */ - } - asm volatile("ptesync": : :"memory"); + if (nested) + need_tlb_flush = &nested->need_tlb_flush; + else + need_tlb_flush = &kvm->arch.need_tlb_flush; + + if (cpumask_test_cpu(pcpu, need_tlb_flush)) { + flush_guest_tlb(kvm); /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); + cpumask_clear_cpu(pcpu, need_tlb_flush); } } +EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9d2fea43b0c5..66d6bf8caca9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -622,9 +622,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_LPID,r7 isync - /* See if we need to flush the TLB. Hash has to be done in RM */ + /* See if we need to flush the TLB. */ mr r3, r9 /* kvm pointer */ - bl kvmppc_hpt_check_need_tlb_flush + lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ + li r5, 0 /* nested vcpu pointer */ + bl kvmppc_check_need_tlb_flush nop ld r5, HSTATE_KVM_VCORE(r13) -- cgit v1.2.3-59-g8ed1b From 90c73795afa24890bd2ae4f3b359de04b4147d37 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:27 +0200 Subject: KVM: PPC: Book3S HV: Add a new KVM device for the XIVE native exploitation mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the basic framework for the new KVM device supporting the XIVE native exploitation mode. The user interface exposes a new KVM device to be created by QEMU, only available when running on a L0 hypervisor. Support for nested guests is not available yet. The XIVE device reuses the device structure of the XICS-on-XIVE device as they have a lot in common. That could possibly change in the future if the need arise. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 19 +++ arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_ppc.h | 8 ++ arch/powerpc/include/uapi/asm/kvm.h | 3 + arch/powerpc/kvm/Makefile | 2 +- arch/powerpc/kvm/book3s.c | 7 +- arch/powerpc/kvm/book3s_xive_native.c | 179 +++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 + 8 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 Documentation/virtual/kvm/devices/xive.txt create mode 100644 arch/powerpc/kvm/book3s_xive_native.c (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt new file mode 100644 index 000000000000..fdbd2ff92a88 --- /dev/null +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -0,0 +1,19 @@ +POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1) +========================================================== + +Device types supported: + KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1 + +This device acts as a VM interrupt controller. It provides the KVM +interface to configure the interrupt sources of a VM in the underlying +POWER9 XIVE interrupt controller. + +Only one XIVE instance may be instantiated. A guest XIVE device +requires a POWER9 host and the guest OS should have support for the +XIVE native exploitation interrupt mode. If not, it should run using +the legacy interrupt mode, referred as XICS (POWER7/8). + +* Groups: + + 1. KVM_DEV_XIVE_GRP_CTRL + Provides global controls on the device diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 07cefa53222e..9074f1d7613c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -225,6 +225,7 @@ extern struct kvm_device_ops kvm_xics_ops; struct kvmppc_xive; struct kvmppc_xive_vcpu; extern struct kvm_device_ops kvm_xive_ops; +extern struct kvm_device_ops kvm_xive_native_ops; struct kvmppc_passthru_irqmap; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0e99a6f8066f..f746d85f36c7 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -593,6 +593,10 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); + +extern void kvmppc_xive_native_init_module(void); +extern void kvmppc_xive_native_exit_module(void); + #else static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) { return -1; } @@ -616,6 +620,10 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { return -ENODEV; } static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } + +static inline void kvmppc_xive_native_init_module(void) { } +static inline void kvmppc_xive_native_exit_module(void) { } + #endif /* CONFIG_KVM_XIVE */ #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 26ca425f4c2c..be0ce1f17625 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -677,4 +677,7 @@ struct kvm_ppc_cpu_char { #define KVM_XICS_PRESENTED (1ULL << 43) #define KVM_XICS_QUEUED (1ULL << 44) +/* POWER9 XIVE Native Interrupt Controller */ +#define KVM_DEV_XIVE_GRP_CTRL 1 + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 3223aec88b2c..4c67cc79de7c 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -94,7 +94,7 @@ endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ book3s_xics.o -kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o +kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o book3s_xive_native.o kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o kvm-book3s_64-module-objs := \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 10c5579d20ce..7c3348fa27e1 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -1050,6 +1050,9 @@ static int kvmppc_book3s_init(void) if (xics_on_xive()) { kvmppc_xive_init_module(); kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); + kvmppc_xive_native_init_module(); + kvm_register_device_ops(&kvm_xive_native_ops, + KVM_DEV_TYPE_XIVE); } else #endif kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); @@ -1060,8 +1063,10 @@ static int kvmppc_book3s_init(void) static void kvmppc_book3s_exit(void) { #ifdef CONFIG_KVM_XICS - if (xics_on_xive()) + if (xics_on_xive()) { kvmppc_xive_exit_module(); + kvmppc_xive_native_exit_module(); + } #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c new file mode 100644 index 000000000000..751259394150 --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2019, IBM Corporation. + */ + +#define pr_fmt(fmt) "xive-kvm: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "book3s_xive.h" + +static int kvmppc_xive_native_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_XIVE_GRP_CTRL: + break; + } + return -ENXIO; +} + +static int kvmppc_xive_native_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int kvmppc_xive_native_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_XIVE_GRP_CTRL: + break; + } + return -ENXIO; +} + +static void kvmppc_xive_native_free(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = dev->private; + struct kvm *kvm = xive->kvm; + + debugfs_remove(xive->dentry); + + pr_devel("Destroying xive native device\n"); + + if (kvm) + kvm->arch.xive = NULL; + + if (xive->vp_base != XIVE_INVALID_VP) + xive_native_free_vp_block(xive->vp_base); + + kfree(xive); + kfree(dev); +} + +static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) +{ + struct kvmppc_xive *xive; + struct kvm *kvm = dev->kvm; + int ret = 0; + + pr_devel("Creating xive native device\n"); + + if (kvm->arch.xive) + return -EEXIST; + + xive = kzalloc(sizeof(*xive), GFP_KERNEL); + if (!xive) + return -ENOMEM; + + dev->private = xive; + xive->dev = dev; + xive->kvm = kvm; + kvm->arch.xive = xive; + + /* + * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for + * a default. Getting the max number of CPUs the VM was + * configured with would improve our usage of the XIVE VP space. + */ + xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); + pr_devel("VP_Base=%x\n", xive->vp_base); + + if (xive->vp_base == XIVE_INVALID_VP) + ret = -ENXIO; + + xive->single_escalation = xive_native_has_single_escalation(); + + if (ret) + kfree(xive); + + return ret; +} + +static int xive_native_debug_show(struct seq_file *m, void *private) +{ + struct kvmppc_xive *xive = m->private; + struct kvm *kvm = xive->kvm; + + if (!kvm) + return 0; + + return 0; +} + +static int xive_native_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, xive_native_debug_show, inode->i_private); +} + +static const struct file_operations xive_native_debug_fops = { + .open = xive_native_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xive_native_debugfs_init(struct kvmppc_xive *xive) +{ + char *name; + + name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); + if (!name) { + pr_err("%s: no memory for name\n", __func__); + return; + } + + xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root, + xive, &xive_native_debug_fops); + + pr_debug("%s: created %s\n", __func__, name); + kfree(name); +} + +static void kvmppc_xive_native_init(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; + + /* Register some debug interfaces */ + xive_native_debugfs_init(xive); +} + +struct kvm_device_ops kvm_xive_native_ops = { + .name = "kvm-xive-native", + .create = kvmppc_xive_native_create, + .init = kvmppc_xive_native_init, + .destroy = kvmppc_xive_native_free, + .set_attr = kvmppc_xive_native_set_attr, + .get_attr = kvmppc_xive_native_get_attr, + .has_attr = kvmppc_xive_native_has_attr, +}; + +void kvmppc_xive_native_init_module(void) +{ + ; +} + +void kvmppc_xive_native_exit_module(void) +{ + ; +} diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6d4ea4b6c922..e6368163d3a0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1211,6 +1211,8 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_ITS, #define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS + KVM_DEV_TYPE_XIVE, +#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_MAX, }; -- cgit v1.2.3-59-g8ed1b From eacc56bb9de3e6830ddc169553772cd6de59ee4c Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:28 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Introduce a new capability KVM_CAP_PPC_IRQ_XIVE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The user interface exposes a new capability KVM_CAP_PPC_IRQ_XIVE to let QEMU connect the vCPU presenters to the XIVE KVM device if required. The capability is not advertised for now as the full support for the XIVE native exploitation mode is not yet available. When this is case, the capability will be advertised on PowerNV Hypervisors only. Nested guests (pseries KVM Hypervisor) are not supported. Internally, the interface to the new KVM device is protected with a new interrupt mode: KVMPPC_IRQ_XIVE. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 9 ++ arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_ppc.h | 13 +++ arch/powerpc/kvm/book3s_xive.c | 88 ++++++++++---------- arch/powerpc/kvm/book3s_xive.h | 11 +++ arch/powerpc/kvm/book3s_xive_native.c | 150 ++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/powerpc.c | 36 ++++++++ include/uapi/linux/kvm.h | 1 + 8 files changed, 268 insertions(+), 41 deletions(-) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 67068c47c591..e38eb17b7be6 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4504,6 +4504,15 @@ struct kvm_sync_regs { struct kvm_vcpu_events events; }; +6.75 KVM_CAP_PPC_IRQ_XIVE + +Architectures: ppc +Target: vcpu +Parameters: args[0] is the XIVE device fd + args[1] is the XIVE CPU number (server ID) for this vcpu + +This capability connects the vcpu to an in-kernel XIVE device. + 7. Capabilities that can be enabled on VMs ------------------------------------------ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 9074f1d7613c..eac25fd7e631 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -453,6 +453,7 @@ struct kvmppc_passthru_irqmap { #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 #define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */ +#define KVMPPC_IRQ_XIVE 3 /* XIVE native exploitation mode */ #define MMIO_HPTE_CACHE_SIZE 4 diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index f746d85f36c7..9fc2753e516e 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -594,6 +594,14 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); +static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE; +} + +extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu); +extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_native_init_module(void); extern void kvmppc_xive_native_exit_module(void); @@ -621,6 +629,11 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir int level, bool line_status) { return -ENODEV; } static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } +static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) + { return 0; } +static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } +static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_native_init_module(void) { } static inline void kvmppc_xive_native_exit_module(void) { } diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index f78d002f0fe0..e7f1ada1c3de 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -380,11 +380,6 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) return -EBUSY; } -static u32 xive_vp(struct kvmppc_xive *xive, u32 server) -{ - return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); -} - static u8 xive_lock_and_mask(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state) @@ -430,8 +425,8 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive, */ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { xive_native_configure_irq(hw_num, - xive_vp(xive, state->act_server), - MASKED, state->number); + kvmppc_xive_vp(xive, state->act_server), + MASKED, state->number); /* set old_p so we can track if an H_EOI was done */ state->old_p = true; state->old_q = false; @@ -486,8 +481,8 @@ static void xive_finish_unmask(struct kvmppc_xive *xive, */ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { xive_native_configure_irq(hw_num, - xive_vp(xive, state->act_server), - state->act_priority, state->number); + kvmppc_xive_vp(xive, state->act_server), + state->act_priority, state->number); /* If an EOI is needed, do it here */ if (!state->old_p) xive_vm_source_eoi(hw_num, xd); @@ -563,7 +558,7 @@ static int xive_target_interrupt(struct kvm *kvm, kvmppc_xive_select_irq(state, &hw_num, NULL); return xive_native_configure_irq(hw_num, - xive_vp(xive, server), + kvmppc_xive_vp(xive, server), prio, state->number); } @@ -951,7 +946,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, * which is fine for a never started interrupt. */ xive_native_configure_irq(hw_irq, - xive_vp(xive, state->act_server), + kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* @@ -1027,7 +1022,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, /* Reconfigure the IPI */ xive_native_configure_irq(state->ipi_number, - xive_vp(xive, state->act_server), + kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* @@ -1049,7 +1044,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, } EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); -static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) +void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvm *kvm = vcpu->kvm; @@ -1166,7 +1161,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, xc->xive = xive; xc->vcpu = vcpu; xc->server_num = cpu; - xc->vp_id = xive_vp(xive, cpu); + xc->vp_id = kvmppc_xive_vp(xive, cpu); xc->mfrr = 0xff; xc->valid = true; @@ -1883,6 +1878,43 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) return 0; } +int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int i; + + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + u32 i0, i1, idx; + + if (!q->qpage && !xc->esc_virq[i]) + continue; + + seq_printf(m, " [q%d]: ", i); + + if (q->qpage) { + idx = q->idx; + i0 = be32_to_cpup(q->qpage + idx); + idx = (idx + 1) & q->msk; + i1 = be32_to_cpup(q->qpage + idx); + seq_printf(m, "T=%d %08x %08x...\n", q->toggle, + i0, i1); + } + if (xc->esc_virq[i]) { + struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); + struct xive_irq_data *xd = + irq_data_get_irq_handler_data(d); + u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); + + seq_printf(m, "E:%c%c I(%d:%llx:%llx)", + (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', + (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', + xc->esc_virq[i], pq, xd->eoi_page); + seq_puts(m, "\n"); + } + } + return 0; +} static int xive_debug_show(struct seq_file *m, void *private) { @@ -1908,7 +1940,6 @@ static int xive_debug_show(struct seq_file *m, void *private) kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - unsigned int i; if (!xc) continue; @@ -1918,33 +1949,8 @@ static int xive_debug_show(struct seq_file *m, void *private) xc->server_num, xc->cppr, xc->hw_cppr, xc->mfrr, xc->pending, xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); - for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { - struct xive_q *q = &xc->queues[i]; - u32 i0, i1, idx; - if (!q->qpage && !xc->esc_virq[i]) - continue; - - seq_printf(m, " [q%d]: ", i); - - if (q->qpage) { - idx = q->idx; - i0 = be32_to_cpup(q->qpage + idx); - idx = (idx + 1) & q->msk; - i1 = be32_to_cpup(q->qpage + idx); - seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1); - } - if (xc->esc_virq[i]) { - struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); - u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); - seq_printf(m, "E:%c%c I(%d:%llx:%llx)", - (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', - (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', - xc->esc_virq[i], pq, xd->eoi_page); - seq_printf(m, "\n"); - } - } + kvmppc_xive_debug_show_queues(m, vcpu); t_rm_h_xirr += xc->stat_rm_h_xirr; t_rm_h_ipoll += xc->stat_rm_h_ipoll; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index a08ae6fd4c51..d366df69b9cb 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -198,6 +198,11 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp return xive->src_blocks[bid]; } +static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server) +{ + return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); +} + /* * Mapping between guest priorities and host priorities * is as follow. @@ -248,5 +253,11 @@ extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); +/* + * Common Xive routines for XICS-over-XIVE and XIVE native + */ +void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu); +int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu); + #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 751259394150..6fa73cfd9d9c 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -26,6 +26,134 @@ #include "book3s_xive.h" +static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q = &xc->queues[prio]; + + xive_native_disable_queue(xc->vp_id, q, prio); + if (q->qpage) { + put_page(virt_to_page(q->qpage)); + q->qpage = NULL; + } +} + +void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + int i; + + if (!kvmppc_xive_enabled(vcpu)) + return; + + if (!xc) + return; + + pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num); + + /* Ensure no interrupt is still routed to that VP */ + xc->valid = false; + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + /* Disable the VP */ + xive_native_disable_vp(xc->vp_id); + + /* Free the queues & associated interrupts */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + /* Free the escalation irq */ + if (xc->esc_virq[i]) { + free_irq(xc->esc_virq[i], vcpu); + irq_dispose_mapping(xc->esc_virq[i]); + kfree(xc->esc_virq_names[i]); + xc->esc_virq[i] = 0; + } + + /* Free the queue */ + kvmppc_xive_native_cleanup_queue(vcpu, i); + } + + /* Free the VP */ + kfree(xc); + + /* Cleanup the vcpu */ + vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; + vcpu->arch.xive_vcpu = NULL; +} + +int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 server_num) +{ + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_vcpu *xc = NULL; + int rc; + + pr_devel("native_connect_vcpu(server=%d)\n", server_num); + + if (dev->ops != &kvm_xive_native_ops) { + pr_devel("Wrong ops !\n"); + return -EPERM; + } + if (xive->kvm != vcpu->kvm) + return -EPERM; + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) + return -EBUSY; + if (server_num >= KVM_MAX_VCPUS) { + pr_devel("Out of bounds !\n"); + return -EINVAL; + } + + mutex_lock(&vcpu->kvm->lock); + + if (kvmppc_xive_find_server(vcpu->kvm, server_num)) { + pr_devel("Duplicate !\n"); + rc = -EEXIST; + goto bail; + } + + xc = kzalloc(sizeof(*xc), GFP_KERNEL); + if (!xc) { + rc = -ENOMEM; + goto bail; + } + + vcpu->arch.xive_vcpu = xc; + xc->xive = xive; + xc->vcpu = vcpu; + xc->server_num = server_num; + + xc->vp_id = kvmppc_xive_vp(xive, server_num); + xc->valid = true; + vcpu->arch.irq_type = KVMPPC_IRQ_XIVE; + + rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); + if (rc) { + pr_err("Failed to get VP info from OPAL: %d\n", rc); + goto bail; + } + + /* + * Enable the VP first as the single escalation mode will + * affect escalation interrupts numbering + */ + rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation); + if (rc) { + pr_err("Failed to enable VP in OPAL: %d\n", rc); + goto bail; + } + + /* Configure VCPU fields for use by assembly push/pull */ + vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); + vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); + + /* TODO: reset all queues to a clean state ? */ +bail: + mutex_unlock(&vcpu->kvm->lock); + if (rc) + kvmppc_xive_native_cleanup_vcpu(vcpu); + + return rc; +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -114,10 +242,32 @@ static int xive_native_debug_show(struct seq_file *m, void *private) { struct kvmppc_xive *xive = m->private; struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; if (!kvm) return 0; + seq_puts(m, "=========\nVCPU state\n=========\n"); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", + xc->server_num, + vcpu->arch.xive_saved_state.nsr, + vcpu->arch.xive_saved_state.cppr, + vcpu->arch.xive_saved_state.ipb, + vcpu->arch.xive_saved_state.pipr, + vcpu->arch.xive_saved_state.w01, + (u32) vcpu->arch.xive_cam_word); + + kvmppc_xive_debug_show_queues(m, vcpu); + } + return 0; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 8885377ec3e0..b0858ee61460 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -570,6 +570,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PPC_GET_CPU_CHAR: r = 1; break; +#ifdef CONFIG_KVM_XIVE + case KVM_CAP_PPC_IRQ_XIVE: + /* + * Return false until all the XIVE infrastructure is + * in place including support for migration. + */ + r = 0; + break; +#endif case KVM_CAP_PPC_ALLOC_HTAB: r = hv_enabled; @@ -753,6 +762,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) else kvmppc_xics_free_icp(vcpu); break; + case KVMPPC_IRQ_XIVE: + kvmppc_xive_native_cleanup_vcpu(vcpu); + break; } kvmppc_core_vcpu_free(vcpu); @@ -1941,6 +1953,30 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, break; } #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_CAP_PPC_IRQ_XIVE: { + struct fd f; + struct kvm_device *dev; + + r = -EBADF; + f = fdget(cap->args[0]); + if (!f.file) + break; + + r = -ENXIO; + if (!xive_enabled()) + break; + + r = -EPERM; + dev = kvm_device_from_filp(f.file); + if (dev) + r = kvmppc_xive_native_connect_vcpu(dev, vcpu, + cap->args[1]); + + fdput(f); + break; + } +#endif /* CONFIG_KVM_XIVE */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_FWNMI: r = -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e6368163d3a0..52bf74a1616e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -988,6 +988,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_VM_IPA_SIZE 165 #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 #define KVM_CAP_HYPERV_CPUID 167 +#define KVM_CAP_PPC_IRQ_XIVE 168 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-59-g8ed1b From 4131f83c3d64e591014dad14c7f8070c538b9422 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:29 +0200 Subject: KVM: PPC: Book3S HV: XIVE: add a control to initialize a source MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE KVM device maintains a list of interrupt sources for the VM which are allocated in the pool of generic interrupts (IPIs) of the main XIVE IC controller. These are used for the CPU IPIs as well as for virtual device interrupts. The IRQ number space is defined by QEMU. The XIVE device reuses the source structures of the XICS-on-XIVE device for the source blocks (2-level tree) and for the source interrupts. Under XIVE native, the source interrupt caches mostly configuration information and is less used than under the XICS-on-XIVE device in which hcalls are still necessary at run-time. When a source is initialized in KVM, an IPI interrupt source is simply allocated at the OPAL level and then MASKED. KVM only needs to know about its type: LSI or MSI. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 15 ++++ arch/powerpc/include/uapi/asm/kvm.h | 5 ++ arch/powerpc/kvm/book3s_xive.c | 8 +-- arch/powerpc/kvm/book3s_xive.h | 10 +++ arch/powerpc/kvm/book3s_xive_native.c | 106 +++++++++++++++++++++++++++++ 5 files changed, 140 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index fdbd2ff92a88..cd8bfc37b72e 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -17,3 +17,18 @@ the legacy interrupt mode, referred as XICS (POWER7/8). 1. KVM_DEV_XIVE_GRP_CTRL Provides global controls on the device + + 2. KVM_DEV_XIVE_GRP_SOURCE (write only) + Initializes a new source in the XIVE device and mask it. + Attributes: + Interrupt source number (64-bit) + The kvm_device_attr.addr points to a __u64 value: + bits: | 63 .... 2 | 1 | 0 + values: | unused | level | type + - type: 0:MSI 1:LSI + - level: assertion level in case of an LSI. + Errors: + -E2BIG: Interrupt source number is out of range + -ENOMEM: Could not create a new source block + -EFAULT: Invalid user pointer for attr->addr. + -ENXIO: Could not allocate underlying HW interrupt diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index be0ce1f17625..d468294c2a67 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -679,5 +679,10 @@ struct kvm_ppc_cpu_char { /* POWER9 XIVE Native Interrupt Controller */ #define KVM_DEV_XIVE_GRP_CTRL 1 +#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ + +/* Layout of 64-bit XIVE source attribute values */ +#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0) +#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1) #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index e7f1ada1c3de..6c9f9fd0855f 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1480,8 +1480,8 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) return 0; } -static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, - int irq) +struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( + struct kvmppc_xive *xive, int irq) { struct kvm *kvm = xive->kvm; struct kvmppc_xive_src_block *sb; @@ -1560,7 +1560,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) { pr_devel("No source, creating source block...\n"); - sb = xive_create_src_block(xive, irq); + sb = kvmppc_xive_create_src_block(xive, irq); if (!sb) { pr_devel("Failed to create block...\n"); return -ENOMEM; @@ -1784,7 +1784,7 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) xive_cleanup_irq_data(xd); } -static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) +void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) { int i; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index d366df69b9cb..1be921cb5dcb 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -12,6 +12,13 @@ #ifdef CONFIG_KVM_XICS #include "book3s_xics.h" +/* + * The XIVE Interrupt source numbers are within the range 0 to + * KVMPPC_XICS_NR_IRQS. + */ +#define KVMPPC_XIVE_FIRST_IRQ 0 +#define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS + /* * State for one guest irq source. * @@ -258,6 +265,9 @@ extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); */ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu); int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu); +struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( + struct kvmppc_xive *xive, int irq); +void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 6fa73cfd9d9c..5f2bd6c137b7 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -26,6 +26,17 @@ #include "book3s_xive.h" +static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) +{ + u64 val; + + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) + offset |= offset << 4; + + val = in_be64(xd->eoi_mmio + offset); + return (u8)val; +} + static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -154,12 +165,94 @@ bail: return rc; } +static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq, + u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u64 val; + u16 idx; + int rc; + + pr_devel("%s irq=0x%lx\n", __func__, irq); + + if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS) + return -E2BIG; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) { + pr_debug("No source, creating source block...\n"); + sb = kvmppc_xive_create_src_block(xive, irq); + if (!sb) { + pr_err("Failed to create block...\n"); + return -ENOMEM; + } + } + state = &sb->irq_state[idx]; + + if (get_user(val, ubufp)) { + pr_err("fault getting user info !\n"); + return -EFAULT; + } + + arch_spin_lock(&sb->lock); + + /* + * If the source doesn't already have an IPI, allocate + * one and get the corresponding data + */ + if (!state->ipi_number) { + state->ipi_number = xive_native_alloc_irq(); + if (state->ipi_number == 0) { + pr_err("Failed to allocate IRQ !\n"); + rc = -ENXIO; + goto unlock; + } + xive_native_populate_irq_data(state->ipi_number, + &state->ipi_data); + pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__, + state->ipi_number, irq); + } + + /* Restore LSI state */ + if (val & KVM_XIVE_LEVEL_SENSITIVE) { + state->lsi = true; + if (val & KVM_XIVE_LEVEL_ASSERTED) + state->asserted = true; + pr_devel(" LSI ! Asserted=%d\n", state->asserted); + } + + /* Mask IRQ to start with */ + state->act_server = 0; + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + + /* Increment the number of valid sources and mark this one valid */ + if (!state->valid) + xive->src_count++; + state->valid = true; + + rc = 0; + +unlock: + arch_spin_unlock(&sb->lock); + + return rc; +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + struct kvmppc_xive *xive = dev->private; + switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: break; + case KVM_DEV_XIVE_GRP_SOURCE: + return kvmppc_xive_native_set_source(xive, attr->attr, + attr->addr); } return -ENXIO; } @@ -176,6 +269,11 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: break; + case KVM_DEV_XIVE_GRP_SOURCE: + if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && + attr->attr < KVMPPC_XIVE_NR_IRQS) + return 0; + break; } return -ENXIO; } @@ -184,6 +282,7 @@ static void kvmppc_xive_native_free(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; + int i; debugfs_remove(xive->dentry); @@ -192,6 +291,13 @@ static void kvmppc_xive_native_free(struct kvm_device *dev) if (kvm) kvm->arch.xive = NULL; + for (i = 0; i <= xive->max_sbid; i++) { + if (xive->src_blocks[i]) + kvmppc_xive_free_sources(xive->src_blocks[i]); + kfree(xive->src_blocks[i]); + xive->src_blocks[i] = NULL; + } + if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); -- cgit v1.2.3-59-g8ed1b From e8676ce50e224d507946b1c535bc13584e6b49ff Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:30 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a control to configure a source MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This control will be used by the H_INT_SET_SOURCE_CONFIG hcall from QEMU to configure the target of a source and also to restore the configuration of a source when migrating the VM. The XIVE source interrupt structure is extended with the value of the Effective Interrupt Source Number. The EISN is the interrupt number pushed in the event queue that the guest OS will use to dispatch events internally. Caching the EISN value in KVM eases the test when checking if a reconfiguration is indeed needed. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 21 +++++++ arch/powerpc/include/uapi/asm/kvm.h | 11 ++++ arch/powerpc/kvm/book3s_xive.c | 5 +- arch/powerpc/kvm/book3s_xive.h | 4 ++ arch/powerpc/kvm/book3s_xive_native.c | 97 ++++++++++++++++++++++++++++++ 5 files changed, 136 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index cd8bfc37b72e..33c64b2cdbe8 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -32,3 +32,24 @@ the legacy interrupt mode, referred as XICS (POWER7/8). -ENOMEM: Could not create a new source block -EFAULT: Invalid user pointer for attr->addr. -ENXIO: Could not allocate underlying HW interrupt + + 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only) + Configures source targeting + Attributes: + Interrupt source number (64-bit) + The kvm_device_attr.addr points to a __u64 value: + bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0 + values: | eisn | mask | server | priority + - priority: 0-7 interrupt priority level + - server: CPU number chosen to handle the interrupt + - mask: mask flag (unused) + - eisn: Effective Interrupt Source Number + Errors: + -ENOENT: Unknown source number + -EINVAL: Not initialized source number + -EINVAL: Invalid priority + -EINVAL: Invalid CPU number. + -EFAULT: Invalid user pointer for attr->addr. + -ENXIO: CPU event queues not configured or configuration of the + underlying HW interrupt failed + -EBUSY: No CPU available to serve interrupt diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index d468294c2a67..e8161e21629b 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -680,9 +680,20 @@ struct kvm_ppc_cpu_char { /* POWER9 XIVE Native Interrupt Controller */ #define KVM_DEV_XIVE_GRP_CTRL 1 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ +#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ /* Layout of 64-bit XIVE source attribute values */ #define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0) #define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1) +/* Layout of 64-bit XIVE source configuration attribute values */ +#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0 +#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7 +#define KVM_XIVE_SOURCE_SERVER_SHIFT 3 +#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL +#define KVM_XIVE_SOURCE_MASKED_SHIFT 32 +#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL +#define KVM_XIVE_SOURCE_EISN_SHIFT 33 +#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 6c9f9fd0855f..e09f3addffe5 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -342,7 +342,7 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; } -static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) +int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) { struct kvm_vcpu *vcpu; int i, rc; @@ -530,7 +530,7 @@ static int xive_target_interrupt(struct kvm *kvm, * priority. The count for that new target will have * already been incremented. */ - rc = xive_select_target(kvm, &server, prio); + rc = kvmppc_xive_select_target(kvm, &server, prio); /* * We failed to find a target ? Not much we can do @@ -1504,6 +1504,7 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; + sb->irq_state[i].eisn = 0; sb->irq_state[i].guest_priority = MASKED; sb->irq_state[i].saved_priority = MASKED; sb->irq_state[i].act_priority = MASKED; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 1be921cb5dcb..ae26fe653d98 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -61,6 +61,9 @@ struct kvmppc_xive_irq_state { bool saved_p; bool saved_q; u8 saved_scan_prio; + + /* Xive native */ + u32 eisn; /* Guest Effective IRQ number */ }; /* Select the "right" interrupt (IPI vs. passthrough) */ @@ -268,6 +271,7 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu); struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( struct kvmppc_xive *xive, int irq); void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); +int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 5f2bd6c137b7..492825a35958 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -242,6 +242,99 @@ unlock: return rc; } +static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state, + u32 server, u8 priority, bool masked, + u32 eisn) +{ + struct kvm *kvm = xive->kvm; + u32 hw_num; + int rc = 0; + + arch_spin_lock(&sb->lock); + + if (state->act_server == server && state->act_priority == priority && + state->eisn == eisn) + goto unlock; + + pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n", + priority, server, masked, state->act_server, + state->act_priority); + + kvmppc_xive_select_irq(state, &hw_num, NULL); + + if (priority != MASKED && !masked) { + rc = kvmppc_xive_select_target(kvm, &server, priority); + if (rc) + goto unlock; + + state->act_priority = priority; + state->act_server = server; + state->eisn = eisn; + + rc = xive_native_configure_irq(hw_num, + kvmppc_xive_vp(xive, server), + priority, eisn); + } else { + state->act_priority = MASKED; + state->act_server = 0; + state->eisn = 0; + + rc = xive_native_configure_irq(hw_num, 0, MASKED, 0); + } + +unlock: + arch_spin_unlock(&sb->lock); + return rc; +} + +static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive, + long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u16 src; + u64 kvm_cfg; + u32 server; + u8 priority; + bool masked; + u32 eisn; + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[src]; + + if (!state->valid) + return -EINVAL; + + if (get_user(kvm_cfg, ubufp)) + return -EFAULT; + + pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg); + + priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >> + KVM_XIVE_SOURCE_PRIORITY_SHIFT; + server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >> + KVM_XIVE_SOURCE_SERVER_SHIFT; + masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >> + KVM_XIVE_SOURCE_MASKED_SHIFT; + eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >> + KVM_XIVE_SOURCE_EISN_SHIFT; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("invalid priority for queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + + return kvmppc_xive_native_update_source_config(xive, sb, state, server, + priority, masked, eisn); +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -253,6 +346,9 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, case KVM_DEV_XIVE_GRP_SOURCE: return kvmppc_xive_native_set_source(xive, attr->attr, attr->addr); + case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: + return kvmppc_xive_native_set_source_config(xive, attr->attr, + attr->addr); } return -ENXIO; } @@ -270,6 +366,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, case KVM_DEV_XIVE_GRP_CTRL: break; case KVM_DEV_XIVE_GRP_SOURCE: + case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && attr->attr < KVMPPC_XIVE_NR_IRQS) return 0; -- cgit v1.2.3-59-g8ed1b From 13ce3297c5766b9541b6a7a255794c5168a7ae1a Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:31 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add controls for the EQ configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These controls will be used by the H_INT_SET_QUEUE_CONFIG and H_INT_GET_QUEUE_CONFIG hcalls from QEMU to configure the underlying Event Queue in the XIVE IC. They will also be used to restore the configuration of the XIVE EQs and to capture the internal run-time state of the EQs. Both 'get' and 'set' rely on an OPAL call to access the EQ toggle bit and EQ index which are updated by the XIVE IC when event notifications are enqueued in the EQ. The value of the guest physical address of the event queue is saved in the XIVE internal xive_q structure for later use. That is when migration needs to mark the EQ pages dirty to capture a consistent memory state of the VM. To be noted that H_INT_SET_QUEUE_CONFIG does not require the extra OPAL call setting the EQ toggle bit and EQ index to configure the EQ, but restoring the EQ state will. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 34 ++++ arch/powerpc/include/asm/xive.h | 2 + arch/powerpc/include/uapi/asm/kvm.h | 19 +++ arch/powerpc/kvm/book3s_xive.c | 15 +- arch/powerpc/kvm/book3s_xive.h | 2 + arch/powerpc/kvm/book3s_xive_native.c | 249 +++++++++++++++++++++++++++++ 6 files changed, 315 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 33c64b2cdbe8..cc13bfd5cf53 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -53,3 +53,37 @@ the legacy interrupt mode, referred as XICS (POWER7/8). -ENXIO: CPU event queues not configured or configuration of the underlying HW interrupt failed -EBUSY: No CPU available to serve interrupt + + 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write) + Configures an event queue of a CPU + Attributes: + EQ descriptor identifier (64-bit) + The EQ descriptor identifier is a tuple (server, priority) : + bits: | 63 .... 32 | 31 .. 3 | 2 .. 0 + values: | unused | server | priority + The kvm_device_attr.addr points to : + struct kvm_ppc_xive_eq { + __u32 flags; + __u32 qshift; + __u64 qaddr; + __u32 qtoggle; + __u32 qindex; + __u8 pad[40]; + }; + - flags: queue flags + KVM_XIVE_EQ_ALWAYS_NOTIFY (required) + forces notification without using the coalescing mechanism + provided by the XIVE END ESBs. + - qshift: queue size (power of 2) + - qaddr: real address of queue + - qtoggle: current queue toggle bit + - qindex: current queue index + - pad: reserved for future use + Errors: + -ENOENT: Invalid CPU number + -EINVAL: Invalid priority + -EINVAL: Invalid flags + -EINVAL: Invalid queue size + -EINVAL: Invalid queue address + -EFAULT: Invalid user pointer for attr->addr. + -EIO: Configuration of the underlying HW failed diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index b579a943407b..c4e88abd3b67 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -73,6 +73,8 @@ struct xive_q { u32 esc_irq; atomic_t count; atomic_t pending_count; + u64 guest_qaddr; + u32 guest_qshift; }; /* Global enable flags for the XIVE support */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index e8161e21629b..85005400fd86 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -681,6 +681,7 @@ struct kvm_ppc_cpu_char { #define KVM_DEV_XIVE_GRP_CTRL 1 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ +#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ /* Layout of 64-bit XIVE source attribute values */ #define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0) @@ -696,4 +697,22 @@ struct kvm_ppc_cpu_char { #define KVM_XIVE_SOURCE_EISN_SHIFT 33 #define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL +/* Layout of 64-bit EQ identifier */ +#define KVM_XIVE_EQ_PRIORITY_SHIFT 0 +#define KVM_XIVE_EQ_PRIORITY_MASK 0x7 +#define KVM_XIVE_EQ_SERVER_SHIFT 3 +#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL + +/* Layout of EQ configuration values (64 bytes) */ +struct kvm_ppc_xive_eq { + __u32 flags; + __u32 qshift; + __u64 qaddr; + __u32 qtoggle; + __u32 qindex; + __u8 pad[40]; +}; + +#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001 + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index e09f3addffe5..c1b7aa7dbc28 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -166,7 +166,8 @@ static irqreturn_t xive_esc_irq(int irq, void *data) return IRQ_HANDLED; } -static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) +int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, + bool single_escalation) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_q *q = &xc->queues[prio]; @@ -185,7 +186,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) return -EIO; } - if (xc->xive->single_escalation) + if (single_escalation) name = kasprintf(GFP_KERNEL, "kvm-%d-%d", vcpu->kvm->arch.lpid, xc->server_num); else @@ -217,7 +218,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) * interrupt, thus leaving it effectively masked after * it fires once. */ - if (xc->xive->single_escalation) { + if (single_escalation) { struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); @@ -291,7 +292,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio) continue; rc = xive_provision_queue(vcpu, prio); if (rc == 0 && !xive->single_escalation) - xive_attach_escalation(vcpu, prio); + kvmppc_xive_attach_escalation(vcpu, prio, + xive->single_escalation); if (rc) return rc; } @@ -1214,7 +1216,8 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, if (xive->qmap & (1 << i)) { r = xive_provision_queue(vcpu, i); if (r == 0 && !xive->single_escalation) - xive_attach_escalation(vcpu, i); + kvmppc_xive_attach_escalation( + vcpu, i, xive->single_escalation); if (r) goto bail; } else { @@ -1229,7 +1232,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, } /* If not done above, attach priority 0 escalation */ - r = xive_attach_escalation(vcpu, 0); + r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation); if (r) goto bail; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index ae26fe653d98..622f594d93e1 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -272,6 +272,8 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( struct kvmppc_xive *xive, int irq); void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio); +int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, + bool single_escalation); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 492825a35958..3e7cdcacc932 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -335,6 +335,243 @@ static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive, priority, masked, eisn); } +static int xive_native_validate_queue_size(u32 qshift) +{ + /* + * We only support 64K pages for the moment. This is also + * advertised in the DT property "ibm,xive-eq-sizes" + */ + switch (qshift) { + case 0: /* EQ reset */ + case 16: + return 0; + case 12: + case 21: + case 24: + default: + return -EINVAL; + } +} + +static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, + long eq_idx, u64 addr) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + void __user *ubufp = (void __user *) addr; + u32 server; + u8 priority; + struct kvm_ppc_xive_eq kvm_eq; + int rc; + __be32 *qaddr = 0; + struct page *page; + struct xive_q *q; + gfn_t gfn; + unsigned long page_size; + + /* + * Demangle priority/server tuple from the EQ identifier + */ + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> + KVM_XIVE_EQ_PRIORITY_SHIFT; + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> + KVM_XIVE_EQ_SERVER_SHIFT; + + if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq))) + return -EFAULT; + + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_err("Can't find server %d\n", server); + return -ENOENT; + } + xc = vcpu->arch.xive_vcpu; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("Trying to restore invalid queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + q = &xc->queues[priority]; + + pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", + __func__, server, priority, kvm_eq.flags, + kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); + + /* + * sPAPR specifies a "Unconditional Notify (n) flag" for the + * H_INT_SET_QUEUE_CONFIG hcall which forces notification + * without using the coalescing mechanisms provided by the + * XIVE END ESBs. This is required on KVM as notification + * using the END ESBs is not supported. + */ + if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) { + pr_err("invalid flags %d\n", kvm_eq.flags); + return -EINVAL; + } + + rc = xive_native_validate_queue_size(kvm_eq.qshift); + if (rc) { + pr_err("invalid queue size %d\n", kvm_eq.qshift); + return rc; + } + + /* reset queue and disable queueing */ + if (!kvm_eq.qshift) { + q->guest_qaddr = 0; + q->guest_qshift = 0; + + rc = xive_native_configure_queue(xc->vp_id, q, priority, + NULL, 0, true); + if (rc) { + pr_err("Failed to reset queue %d for VCPU %d: %d\n", + priority, xc->server_num, rc); + return rc; + } + + if (q->qpage) { + put_page(virt_to_page(q->qpage)); + q->qpage = NULL; + } + + return 0; + } + + if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) { + pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr, + 1ull << kvm_eq.qshift); + return -EINVAL; + } + + gfn = gpa_to_gfn(kvm_eq.qaddr); + page = gfn_to_page(kvm, gfn); + if (is_error_page(page)) { + pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); + return -EINVAL; + } + + page_size = kvm_host_page_size(kvm, gfn); + if (1ull << kvm_eq.qshift > page_size) { + pr_warn("Incompatible host page size %lx!\n", page_size); + return -EINVAL; + } + + qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); + + /* + * Backup the queue page guest address to the mark EQ page + * dirty for migration. + */ + q->guest_qaddr = kvm_eq.qaddr; + q->guest_qshift = kvm_eq.qshift; + + /* + * Unconditional Notification is forced by default at the + * OPAL level because the use of END ESBs is not supported by + * Linux. + */ + rc = xive_native_configure_queue(xc->vp_id, q, priority, + (__be32 *) qaddr, kvm_eq.qshift, true); + if (rc) { + pr_err("Failed to configure queue %d for VCPU %d: %d\n", + priority, xc->server_num, rc); + put_page(page); + return rc; + } + + /* + * Only restore the queue state when needed. When doing the + * H_INT_SET_SOURCE_CONFIG hcall, it should not. + */ + if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) { + rc = xive_native_set_queue_state(xc->vp_id, priority, + kvm_eq.qtoggle, + kvm_eq.qindex); + if (rc) + goto error; + } + + rc = kvmppc_xive_attach_escalation(vcpu, priority, + xive->single_escalation); +error: + if (rc) + kvmppc_xive_native_cleanup_queue(vcpu, priority); + return rc; +} + +static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive, + long eq_idx, u64 addr) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + struct xive_q *q; + void __user *ubufp = (u64 __user *) addr; + u32 server; + u8 priority; + struct kvm_ppc_xive_eq kvm_eq; + u64 qaddr; + u64 qshift; + u64 qeoi_page; + u32 escalate_irq; + u64 qflags; + int rc; + + /* + * Demangle priority/server tuple from the EQ identifier + */ + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> + KVM_XIVE_EQ_PRIORITY_SHIFT; + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> + KVM_XIVE_EQ_SERVER_SHIFT; + + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_err("Can't find server %d\n", server); + return -ENOENT; + } + xc = vcpu->arch.xive_vcpu; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("invalid priority for queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + q = &xc->queues[priority]; + + memset(&kvm_eq, 0, sizeof(kvm_eq)); + + if (!q->qpage) + return 0; + + rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift, + &qeoi_page, &escalate_irq, &qflags); + if (rc) + return rc; + + kvm_eq.flags = 0; + if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY) + kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; + + kvm_eq.qshift = q->guest_qshift; + kvm_eq.qaddr = q->guest_qaddr; + + rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle, + &kvm_eq.qindex); + if (rc) + return rc; + + pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", + __func__, server, priority, kvm_eq.flags, + kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); + + if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq))) + return -EFAULT; + + return 0; +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -349,6 +586,9 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: return kvmppc_xive_native_set_source_config(xive, attr->attr, attr->addr); + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return kvmppc_xive_native_set_queue_config(xive, attr->attr, + attr->addr); } return -ENXIO; } @@ -356,6 +596,13 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, static int kvmppc_xive_native_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + struct kvmppc_xive *xive = dev->private; + + switch (attr->group) { + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return kvmppc_xive_native_get_queue_config(xive, attr->attr, + attr->addr); + } return -ENXIO; } @@ -371,6 +618,8 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, attr->attr < KVMPPC_XIVE_NR_IRQS) return 0; break; + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return 0; } return -ENXIO; } -- cgit v1.2.3-59-g8ed1b From 5ca806474859a0e94584b3a63f9509a25758408e Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:32 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a global reset control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This control is to be used by the H_INT_RESET hcall from QEMU. Its purpose is to clear all configuration of the sources and EQs. This is necessary in case of a kexec (for a kdump kernel for instance) to make sure that no remaining configuration is left from the previous boot setup so that the new kernel can start safely from a clean state. The queue 7 is ignored when the XIVE device is configured to run in single escalation mode. Prio 7 is used by escalations. The XIVE VP is kept enabled as the vCPU is still active and connected to the XIVE device. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 5 ++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 85 ++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index cc13bfd5cf53..429cbc4cf960 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -17,6 +17,11 @@ the legacy interrupt mode, referred as XICS (POWER7/8). 1. KVM_DEV_XIVE_GRP_CTRL Provides global controls on the device + Attributes: + 1.1 KVM_DEV_XIVE_RESET (write only) + Resets the interrupt controller configuration for sources and event + queues. To be used by kexec and kdump. + Errors: none 2. KVM_DEV_XIVE_GRP_SOURCE (write only) Initializes a new source in the XIVE device and mask it. diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 85005400fd86..f045f9dee42e 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -679,6 +679,7 @@ struct kvm_ppc_cpu_char { /* POWER9 XIVE Native Interrupt Controller */ #define KVM_DEV_XIVE_GRP_CTRL 1 +#define KVM_DEV_XIVE_RESET 1 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 3e7cdcacc932..b9597d80c95a 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -572,6 +572,83 @@ static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive, return 0; } +static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb) +{ + int i; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; + + if (!state->valid) + continue; + + if (state->act_priority == MASKED) + continue; + + state->eisn = 0; + state->act_server = 0; + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + if (state->pt_number) { + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->pt_number, + 0, MASKED, 0); + } + } +} + +static int kvmppc_xive_reset(struct kvmppc_xive *xive) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; + + pr_devel("%s\n", __func__); + + mutex_lock(&kvm->lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int prio; + + if (!xc) + continue; + + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { + + /* Single escalation, no queue 7 */ + if (prio == 7 && xive->single_escalation) + break; + + if (xc->esc_virq[prio]) { + free_irq(xc->esc_virq[prio], vcpu); + irq_dispose_mapping(xc->esc_virq[prio]); + kfree(xc->esc_virq_names[prio]); + xc->esc_virq[prio] = 0; + } + + kvmppc_xive_native_cleanup_queue(vcpu, prio); + } + } + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_reset_sources(sb); + arch_spin_unlock(&sb->lock); + } + } + + mutex_unlock(&kvm->lock); + + return 0; +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -579,6 +656,10 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XIVE_RESET: + return kvmppc_xive_reset(xive); + } break; case KVM_DEV_XIVE_GRP_SOURCE: return kvmppc_xive_native_set_source(xive, attr->attr, @@ -611,6 +692,10 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, { switch (attr->group) { case KVM_DEV_XIVE_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XIVE_RESET: + return 0; + } break; case KVM_DEV_XIVE_GRP_SOURCE: case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: -- cgit v1.2.3-59-g8ed1b From 7b46b6169ab80f8f415a0ca2ea4aa7f1afdcc4f3 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:33 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a control to sync the sources MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This control will be used by the H_INT_SYNC hcall from QEMU to flush event notifications on the XIVE IC owning the source. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 8 +++++++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 36 ++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 429cbc4cf960..1e7f19d7594b 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -92,3 +92,11 @@ the legacy interrupt mode, referred as XICS (POWER7/8). -EINVAL: Invalid queue address -EFAULT: Invalid user pointer for attr->addr. -EIO: Configuration of the underlying HW failed + + 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only) + Synchronize the source to flush event notifications + Attributes: + Interrupt source number (64-bit) + Errors: + -ENOENT: Unknown source number + -EINVAL: Not initialized source number diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index f045f9dee42e..e4abe30f6fc6 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -683,6 +683,7 @@ struct kvm_ppc_cpu_char { #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ +#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */ /* Layout of 64-bit XIVE source attribute values */ #define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0) diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index b9597d80c95a..65380416d101 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -335,6 +335,38 @@ static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive, priority, masked, eisn); } +static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive, + long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct xive_irq_data *xd; + u32 hw_num; + u16 src; + int rc = 0; + + pr_devel("%s irq=0x%lx", __func__, irq); + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[src]; + + rc = -EINVAL; + + arch_spin_lock(&sb->lock); + + if (state->valid) { + kvmppc_xive_select_irq(state, &hw_num, &xd); + xive_native_sync_source(hw_num); + rc = 0; + } + + arch_spin_unlock(&sb->lock); + return rc; +} + static int xive_native_validate_queue_size(u32 qshift) { /* @@ -670,6 +702,9 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, case KVM_DEV_XIVE_GRP_EQ_CONFIG: return kvmppc_xive_native_set_queue_config(xive, attr->attr, attr->addr); + case KVM_DEV_XIVE_GRP_SOURCE_SYNC: + return kvmppc_xive_native_sync_source(xive, attr->attr, + attr->addr); } return -ENXIO; } @@ -699,6 +734,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, break; case KVM_DEV_XIVE_GRP_SOURCE: case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: + case KVM_DEV_XIVE_GRP_SOURCE_SYNC: if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && attr->attr < KVMPPC_XIVE_NR_IRQS) return 0; -- cgit v1.2.3-59-g8ed1b From e6714bd1671da9d8dfb5332075df251b746fd0fd Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:34 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a control to dirty the XIVE EQ pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When migration of a VM is initiated, a first copy of the RAM is transferred to the destination before the VM is stopped, but there is no guarantee that the EQ pages in which the event notifications are queued have not been modified. To make sure migration will capture a consistent memory state, the XIVE device should perform a XIVE quiesce sequence to stop the flow of event notifications and stabilize the EQs. This is the purpose of the KVM_DEV_XIVE_EQ_SYNC control which will also marks the EQ pages dirty to force their transfer. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 29 ++++++++++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 85 ++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 1e7f19d7594b..7ffd4c7be7b5 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -23,6 +23,12 @@ the legacy interrupt mode, referred as XICS (POWER7/8). queues. To be used by kexec and kdump. Errors: none + 1.2 KVM_DEV_XIVE_EQ_SYNC (write only) + Sync all the sources and queues and mark the EQ pages dirty. This + to make sure that a consistent memory state is captured when + migrating the VM. + Errors: none + 2. KVM_DEV_XIVE_GRP_SOURCE (write only) Initializes a new source in the XIVE device and mask it. Attributes: @@ -100,3 +106,26 @@ the legacy interrupt mode, referred as XICS (POWER7/8). Errors: -ENOENT: Unknown source number -EINVAL: Not initialized source number + +* Migration: + + Saving the state of a VM using the XIVE native exploitation mode + should follow a specific sequence. When the VM is stopped : + + 1. Mask all sources (PQ=01) to stop the flow of events. + + 2. Sync the XIVE device with the KVM control KVM_DEV_XIVE_EQ_SYNC to + flush any in-flight event notification and to stabilize the EQs. At + this stage, the EQ pages are marked dirty to make sure they are + transferred in the migration sequence. + + 3. Capture the state of the source targeting, the EQs configuration + and the state of thread interrupt context registers. + + Restore is similar : + + 1. Restore the EQ configuration. As targeting depends on it. + 2. Restore targeting + 3. Restore the thread interrupt contexts + 4. Restore the source states + 5. Let the vCPU run diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index e4abe30f6fc6..12744608a61c 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -680,6 +680,7 @@ struct kvm_ppc_cpu_char { /* POWER9 XIVE Native Interrupt Controller */ #define KVM_DEV_XIVE_GRP_CTRL 1 #define KVM_DEV_XIVE_RESET 1 +#define KVM_DEV_XIVE_EQ_SYNC 2 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 65380416d101..16d23ef3bd39 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -681,6 +681,88 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive) return 0; } +static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb) +{ + int j; + + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; + struct xive_irq_data *xd; + u32 hw_num; + + if (!state->valid) + continue; + + /* + * The struct kvmppc_xive_irq_state reflects the state + * of the EAS configuration and not the state of the + * source. The source is masked setting the PQ bits to + * '-Q', which is what is being done before calling + * the KVM_DEV_XIVE_EQ_SYNC control. + * + * If a source EAS is configured, OPAL syncs the XIVE + * IC of the source and the XIVE IC of the previous + * target if any. + * + * So it should be fine ignoring MASKED sources as + * they have been synced already. + */ + if (state->act_priority == MASKED) + continue; + + kvmppc_xive_select_irq(state, &hw_num, &xd); + xive_native_sync_source(hw_num); + xive_native_sync_queue(hw_num); + } +} + +static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int prio; + + if (!xc) + return -ENOENT; + + for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { + struct xive_q *q = &xc->queues[prio]; + + if (!q->qpage) + continue; + + /* Mark EQ page dirty for migration */ + mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); + } + return 0; +} + +static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; + + pr_devel("%s\n", __func__); + + mutex_lock(&kvm->lock); + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_native_sync_sources(sb); + arch_spin_unlock(&sb->lock); + } + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvmppc_xive_native_vcpu_eq_sync(vcpu); + } + mutex_unlock(&kvm->lock); + + return 0; +} + static int kvmppc_xive_native_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -691,6 +773,8 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev, switch (attr->attr) { case KVM_DEV_XIVE_RESET: return kvmppc_xive_reset(xive); + case KVM_DEV_XIVE_EQ_SYNC: + return kvmppc_xive_native_eq_sync(xive); } break; case KVM_DEV_XIVE_GRP_SOURCE: @@ -729,6 +813,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, case KVM_DEV_XIVE_GRP_CTRL: switch (attr->attr) { case KVM_DEV_XIVE_RESET: + case KVM_DEV_XIVE_EQ_SYNC: return 0; } break; -- cgit v1.2.3-59-g8ed1b From e4945b9da52b36052b7c509ca31c5ead1d165b24 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:35 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add get/set accessors for the VP XIVE state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The state of the thread interrupt management registers needs to be collected for migration. These registers are cached under the 'xive_saved_state.w01' field of the VCPU when the VPCU context is pulled from the HW thread. An OPAL call retrieves the backup of the IPB register in the underlying XIVE NVT structure and merges it in the KVM state. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 1 + Documentation/virtual/kvm/devices/xive.txt | 17 +++++++ arch/powerpc/include/asm/kvm_ppc.h | 11 +++++ arch/powerpc/include/uapi/asm/kvm.h | 2 + arch/powerpc/kvm/book3s.c | 24 ++++++++++ arch/powerpc/kvm/book3s_xive_native.c | 76 ++++++++++++++++++++++++++++++ 6 files changed, 131 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index e38eb17b7be6..5b505520a616 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1985,6 +1985,7 @@ registers, find a list below: PPC | KVM_REG_PPC_TLB3PS | 32 PPC | KVM_REG_PPC_EPTCFG | 32 PPC | KVM_REG_PPC_ICP_STATE | 64 + PPC | KVM_REG_PPC_VP_STATE | 128 PPC | KVM_REG_PPC_TB_OFFSET | 64 PPC | KVM_REG_PPC_SPMC1 | 32 PPC | KVM_REG_PPC_SPMC2 | 32 diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 7ffd4c7be7b5..525d1eebcf34 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -107,6 +107,23 @@ the legacy interrupt mode, referred as XICS (POWER7/8). -ENOENT: Unknown source number -EINVAL: Not initialized source number +* VCPU state + + The XIVE IC maintains VP interrupt state in an internal structure + called the NVT. When a VP is not dispatched on a HW processor + thread, this structure can be updated by HW if the VP is the target + of an event notification. + + It is important for migration to capture the cached IPB from the NVT + as it synthesizes the priorities of the pending interrupts. We + capture a bit more to report debug information. + + KVM_REG_PPC_VP_STATE (2 * 64bits) + bits: | 63 .... 32 | 31 .... 0 | + values: | TIMA word0 | TIMA word1 | + bits: | 127 .......... 64 | + values: | unused | + * Migration: Saving the state of a VM using the XIVE native exploitation mode diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9fc2753e516e..bc892380e6cd 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -269,6 +269,7 @@ union kvmppc_one_reg { u64 addr; u64 length; } vpaval; + u64 xive_timaval[2]; }; struct kvmppc_ops { @@ -604,6 +605,10 @@ extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_native_init_module(void); extern void kvmppc_xive_native_exit_module(void); +extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, + union kvmppc_one_reg *val); +extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, + union kvmppc_one_reg *val); #else static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, @@ -636,6 +641,12 @@ static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_native_init_module(void) { } static inline void kvmppc_xive_native_exit_module(void) { } +static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, + union kvmppc_one_reg *val) +{ return 0; } +static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, + union kvmppc_one_reg *val) +{ return -ENOENT; } #endif /* CONFIG_KVM_XIVE */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 12744608a61c..cd3f16b70a2e 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */ #define KVM_REG_PPC_ICP_PPRI_MASK 0xff +#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d) + /* Device control API: PPC-specific devices */ #define KVM_DEV_MPIC_GRP_MISC 1 #define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 7c3348fa27e1..efd15101eef0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -651,6 +651,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_REG_PPC_VP_STATE: + if (!vcpu->arch.xive_vcpu) { + r = -ENXIO; + break; + } + if (xive_enabled()) + r = kvmppc_xive_native_get_vp(vcpu, val); + else + r = -ENXIO; + break; +#endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: *val = get_reg_val(id, vcpu->arch.fscr); break; @@ -724,6 +736,18 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); break; #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_REG_PPC_VP_STATE: + if (!vcpu->arch.xive_vcpu) { + r = -ENXIO; + break; + } + if (xive_enabled()) + r = kvmppc_xive_native_set_vp(vcpu, val); + else + r = -ENXIO; + break; +#endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: vcpu->arch.fscr = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 16d23ef3bd39..2f9d5e9439a6 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -896,6 +896,82 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) return ret; } +/* + * Interrupt Pending Buffer (IPB) offset + */ +#define TM_IPB_SHIFT 40 +#define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT) + +int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u64 opal_state; + int rc; + + if (!kvmppc_xive_enabled(vcpu)) + return -EPERM; + + if (!xc) + return -ENOENT; + + /* Thread context registers. We only care about IPB and CPPR */ + val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01; + + /* Get the VP state from OPAL */ + rc = xive_native_get_vp_state(xc->vp_id, &opal_state); + if (rc) + return rc; + + /* + * Capture the backup of IPB register in the NVT structure and + * merge it in our KVM VP state. + */ + val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK); + + pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n", + __func__, + vcpu->arch.xive_saved_state.nsr, + vcpu->arch.xive_saved_state.cppr, + vcpu->arch.xive_saved_state.ipb, + vcpu->arch.xive_saved_state.pipr, + vcpu->arch.xive_saved_state.w01, + (u32) vcpu->arch.xive_cam_word, opal_state); + + return 0; +} + +int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + + pr_devel("%s w01=%016llx vp=%016llx\n", __func__, + val->xive_timaval[0], val->xive_timaval[1]); + + if (!kvmppc_xive_enabled(vcpu)) + return -EPERM; + + if (!xc || !xive) + return -ENOENT; + + /* We can't update the state of a "pushed" VCPU */ + if (WARN_ON(vcpu->arch.xive_pushed)) + return -EBUSY; + + /* + * Restore the thread context registers. IPB and CPPR should + * be the only ones that matter. + */ + vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0]; + + /* + * There is no need to restore the XIVE internal state (IPB + * stored in the NVT) as the IPB register was merged in KVM VP + * state when captured. + */ + return 0; +} + static int xive_native_debug_show(struct seq_file *m, void *private) { struct kvmppc_xive *xive = m->private; -- cgit v1.2.3-59-g8ed1b From 39e9af3de5ca936098bc80ebe14401426673c208 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:37 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a TIMA mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each thread has an associated Thread Interrupt Management context composed of a set of registers. These registers let the thread handle priority management and interrupt acknowledgment. The most important are : - Interrupt Pending Buffer (IPB) - Current Processor Priority (CPPR) - Notification Source Register (NSR) They are exposed to software in four different pages each proposing a view with a different privilege. The first page is for the physical thread context and the second for the hypervisor. Only the third (operating system) and the fourth (user level) are exposed the guest. A custom VM fault handler will populate the VMA with the appropriate pages, which should only be the OS page for now. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 23 ++++++++++++++++++ arch/powerpc/include/asm/xive.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 2 ++ arch/powerpc/kvm/book3s_xive_native.c | 39 ++++++++++++++++++++++++++++++ arch/powerpc/sysdev/xive/native.c | 11 +++++++++ 5 files changed, 76 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 525d1eebcf34..0cd7847ec38a 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -13,6 +13,29 @@ requires a POWER9 host and the guest OS should have support for the XIVE native exploitation interrupt mode. If not, it should run using the legacy interrupt mode, referred as XICS (POWER7/8). +* Device Mappings + + The KVM device exposes different MMIO ranges of the XIVE HW which + are required for interrupt management. These are exposed to the + guest in VMAs populated with a custom VM fault handler. + + 1. Thread Interrupt Management Area (TIMA) + + Each thread has an associated Thread Interrupt Management context + composed of a set of registers. These registers let the thread + handle priority management and interrupt acknowledgment. The most + important are : + + - Interrupt Pending Buffer (IPB) + - Current Processor Priority (CPPR) + - Notification Source Register (NSR) + + They are exposed to software in four different pages each proposing + a view with a different privilege. The first page is for the + physical thread context and the second for the hypervisor. Only the + third (operating system) and the fourth (user level) are exposed the + guest. + * Groups: 1. KVM_DEV_XIVE_GRP_CTRL diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index c4e88abd3b67..eaf76f57023a 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -23,6 +23,7 @@ * same offset regardless of where the code is executing */ extern void __iomem *xive_tima; +extern unsigned long xive_tima_os; /* * Offset in the TM area of our current execution level (provided by diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index cd3f16b70a2e..0998e8edc91a 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -720,4 +720,6 @@ struct kvm_ppc_xive_eq { #define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001 +#define KVM_XIVE_TIMA_PAGE_OFFSET 0 + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 2f9d5e9439a6..f5314da0cb45 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -165,6 +165,44 @@ bail: return rc; } +static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + + switch (vmf->pgoff - vma->vm_pgoff) { + case 0: /* HW - forbid access */ + case 1: /* HV - forbid access */ + return VM_FAULT_SIGBUS; + case 2: /* OS */ + vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); + return VM_FAULT_NOPAGE; + case 3: /* USER - TODO */ + default: + return VM_FAULT_SIGBUS; + } +} + +static const struct vm_operations_struct xive_native_tima_vmops = { + .fault = xive_native_tima_fault, +}; + +static int kvmppc_xive_native_mmap(struct kvm_device *dev, + struct vm_area_struct *vma) +{ + /* We only allow mappings at fixed offset for now */ + if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) { + if (vma_pages(vma) > 4) + return -EINVAL; + vma->vm_ops = &xive_native_tima_vmops; + } else { + return -EINVAL; + } + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); + return 0; +} + static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq, u64 addr) { @@ -1050,6 +1088,7 @@ struct kvm_device_ops kvm_xive_native_ops = { .set_attr = kvmppc_xive_native_set_attr, .get_attr = kvmppc_xive_native_get_attr, .has_attr = kvmppc_xive_native_has_attr, + .mmap = kvmppc_xive_native_mmap, }; void kvmppc_xive_native_init_module(void) diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 0c037e933e55..7782201e5fe8 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -521,6 +521,9 @@ u32 xive_native_default_eq_shift(void) } EXPORT_SYMBOL_GPL(xive_native_default_eq_shift); +unsigned long xive_tima_os; +EXPORT_SYMBOL_GPL(xive_tima_os); + bool __init xive_native_init(void) { struct device_node *np; @@ -573,6 +576,14 @@ bool __init xive_native_init(void) for_each_possible_cpu(cpu) kvmppc_set_xive_tima(cpu, r.start, tima); + /* Resource 2 is OS window */ + if (of_address_to_resource(np, 2, &r)) { + pr_err("Failed to get thread mgmnt area resource\n"); + return false; + } + + xive_tima_os = r.start; + /* Grab size of provisionning pages */ xive_parse_provisioning(np); -- cgit v1.2.3-59-g8ed1b From 6520ca64cde71b75dae54f3fcb33517a93d82486 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:38 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Add a mapping for the source ESB pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each source is associated with an Event State Buffer (ESB) with a even/odd pair of pages which provides commands to manage the source: to trigger, to EOI, to turn off the source for instance. The custom VM fault handler will deduce the guest IRQ number from the offset of the fault, and the ESB page of the associated XIVE interrupt will be inserted into the VMA using the internal structure caching information on the interrupts. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/devices/xive.txt | 7 ++++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 57 ++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+) (limited to 'arch/powerpc/include') diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt index 0cd7847ec38a..69ee62d3d4dc 100644 --- a/Documentation/virtual/kvm/devices/xive.txt +++ b/Documentation/virtual/kvm/devices/xive.txt @@ -36,6 +36,13 @@ the legacy interrupt mode, referred as XICS (POWER7/8). third (operating system) and the fourth (user level) are exposed the guest. + 2. Event State Buffer (ESB) + + Each source is associated with an Event State Buffer (ESB) with + either a pair of even/odd pair of pages which provides commands to + manage the source: to trigger, to EOI, to turn off the source for + instance. + * Groups: 1. KVM_DEV_XIVE_GRP_CTRL diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 0998e8edc91a..b0f72dea8b11 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -721,5 +721,6 @@ struct kvm_ppc_xive_eq { #define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001 #define KVM_XIVE_TIMA_PAGE_OFFSET 0 +#define KVM_XIVE_ESB_PAGE_OFFSET 4 #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index f5314da0cb45..465eb90ff23e 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -165,6 +165,59 @@ bail: return rc; } +static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct kvm_device *dev = vma->vm_file->private_data; + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct xive_irq_data *xd; + u32 hw_num; + u16 src; + u64 page; + unsigned long irq; + u64 page_offset; + + /* + * Linux/KVM uses a two pages ESB setting, one for trigger and + * one for EOI + */ + page_offset = vmf->pgoff - vma->vm_pgoff; + irq = page_offset / 2; + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) { + pr_devel("%s: source %lx not found !\n", __func__, irq); + return VM_FAULT_SIGBUS; + } + + state = &sb->irq_state[src]; + kvmppc_xive_select_irq(state, &hw_num, &xd); + + arch_spin_lock(&sb->lock); + + /* + * first/even page is for trigger + * second/odd page is for EOI and management. + */ + page = page_offset % 2 ? xd->eoi_page : xd->trig_page; + arch_spin_unlock(&sb->lock); + + if (WARN_ON(!page)) { + pr_err("%s: acessing invalid ESB page for source %lx !\n", + __func__, irq); + return VM_FAULT_SIGBUS; + } + + vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct xive_native_esb_vmops = { + .fault = xive_native_esb_fault, +}; + static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -194,6 +247,10 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev, if (vma_pages(vma) > 4) return -EINVAL; vma->vm_ops = &xive_native_tima_vmops; + } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) { + if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2) + return -EINVAL; + vma->vm_ops = &xive_native_esb_vmops; } else { return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From 5422e95103cf9663bc86cf1056a3ea44c2e2f09e Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 18 Apr 2019 12:39:42 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Replace the 'destroy' method by a 'release' method MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a P9 sPAPR VM boots, the CAS negotiation process determines which interrupt mode to use (XICS legacy or XIVE native) and invokes a machine reset to activate the chosen mode. We introduce 'release' methods for the XICS-on-XIVE and the XIVE native KVM devices which are called when the file descriptor of the device is closed after the TIMA and ESB pages have been unmapped. They perform the necessary cleanups : clear the vCPU interrupt presenters that could be attached and then destroy the device. The 'release' methods replace the 'destroy' methods as 'destroy' is not called anymore once 'release' is. Compatibility with older QEMU is nevertheless maintained. This is not considered as a safe operation as the vCPUs are still running and could be referencing the KVM device through their presenters. To protect the system from any breakage, the kvmppc_xive objects representing both KVM devices are now stored in an array under the VM. Allocation is performed on first usage and memory is freed only when the VM exits. [paulus@ozlabs.org - Moved freeing of xive structures to book3s.c, put it under #ifdef CONFIG_KVM_XICS.] Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 6 +++- arch/powerpc/kvm/book3s.c | 11 ++++++ arch/powerpc/kvm/book3s_xive.c | 67 +++++++++++++++++++++++++++++++---- arch/powerpc/kvm/book3s_xive.h | 1 + arch/powerpc/kvm/book3s_xive_native.c | 28 ++++++++++++--- 5 files changed, 101 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index eac25fd7e631..013c76a0a03e 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -316,7 +316,11 @@ struct kvm_arch { #endif #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; - struct kvmppc_xive *xive; + struct kvmppc_xive *xive; /* Current XIVE device in use */ + struct { + struct kvmppc_xive *native; + struct kvmppc_xive *xics_on_xive; + } xive_devices; struct kvmppc_passthru_irqmap *pimap; #endif struct kvmppc_ops *kvm_ops; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index efd15101eef0..61a212d0daf0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -915,6 +915,17 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) kvmppc_rtas_tokens_free(kvm); WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif + +#ifdef CONFIG_KVM_XICS + /* + * Free the XIVE devices which are not directly freed by the + * device 'release' method + */ + kfree(kvm->arch.xive_devices.native); + kvm->arch.xive_devices.native = NULL; + kfree(kvm->arch.xive_devices.xics_on_xive); + kvm->arch.xive_devices.xics_on_xive = NULL; +#endif /* CONFIG_KVM_XICS */ } int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 480a3fc6b9fd..922689b768e6 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1100,9 +1100,15 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - struct kvmppc_xive *xive = xc->xive; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; int i; + if (!kvmppc_xics_enabled(vcpu)) + return; + + if (!xc) + return; + pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); /* Ensure no interrupt is still routed to that VP */ @@ -1141,6 +1147,10 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) } /* Free the VP */ kfree(xc); + + /* Cleanup the vcpu */ + vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; + vcpu->arch.xive_vcpu = NULL; } int kvmppc_xive_connect_vcpu(struct kvm_device *dev, @@ -1158,7 +1168,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, } if (xive->kvm != vcpu->kvm) return -EPERM; - if (vcpu->arch.irq_type) + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { pr_devel("Duplicate !\n"); @@ -1824,12 +1834,26 @@ void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) } } -static void kvmppc_xive_free(struct kvm_device *dev) +/* + * Called when device fd is closed + */ +static void kvmppc_xive_release(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; int i; + pr_devel("Releasing xive device\n"); + + /* + * When releasing the KVM device fd, the vCPUs can still be + * running and we should clean up the vCPU interrupt + * presenters first. + */ + kvm_for_each_vcpu(i, vcpu, kvm) + kvmppc_xive_cleanup_vcpu(vcpu); + debugfs_remove(xive->dentry); if (kvm) @@ -1846,11 +1870,42 @@ static void kvmppc_xive_free(struct kvm_device *dev) if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); + /* + * A reference of the kvmppc_xive pointer is now kept under + * the xive_devices struct of the machine for reuse. It is + * freed when the VM is destroyed for now until we fix all the + * execution paths. + */ - kfree(xive); kfree(dev); } +/* + * When the guest chooses the interrupt mode (XICS legacy or XIVE + * native), the VM will switch of KVM device. The previous device will + * be "released" before the new one is created. + * + * Until we are sure all execution paths are well protected, provide a + * fail safe (transitional) method for device destruction, in which + * the XIVE device pointer is recycled and not directly freed. + */ +struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type) +{ + struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ? + &kvm->arch.xive_devices.native : + &kvm->arch.xive_devices.xics_on_xive; + struct kvmppc_xive *xive = *kvm_xive_device; + + if (!xive) { + xive = kzalloc(sizeof(*xive), GFP_KERNEL); + *kvm_xive_device = xive; + } else { + memset(xive, 0, sizeof(*xive)); + } + + return xive; +} + static int kvmppc_xive_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; @@ -1859,7 +1914,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) pr_devel("Creating xive for partition\n"); - xive = kzalloc(sizeof(*xive), GFP_KERNEL); + xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; @@ -2024,7 +2079,7 @@ struct kvm_device_ops kvm_xive_ops = { .name = "kvm-xive", .create = kvmppc_xive_create, .init = kvmppc_xive_init, - .destroy = kvmppc_xive_free, + .release = kvmppc_xive_release, .set_attr = xive_set_attr, .get_attr = xive_get_attr, .has_attr = xive_has_attr, diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index e011622dc038..426146332984 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -283,6 +283,7 @@ void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio); int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, bool single_escalation); +struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 62648f833adf..0497272a72fa 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -964,15 +964,27 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev, return -ENXIO; } -static void kvmppc_xive_native_free(struct kvm_device *dev) +/* + * Called when device fd is closed + */ +static void kvmppc_xive_native_release(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; int i; debugfs_remove(xive->dentry); - pr_devel("Destroying xive native device\n"); + pr_devel("Releasing xive native device\n"); + + /* + * When releasing the KVM device fd, the vCPUs can still be + * running and we should clean up the vCPU interrupt + * presenters first. + */ + kvm_for_each_vcpu(i, vcpu, kvm) + kvmppc_xive_native_cleanup_vcpu(vcpu); if (kvm) kvm->arch.xive = NULL; @@ -987,7 +999,13 @@ static void kvmppc_xive_native_free(struct kvm_device *dev) if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); - kfree(xive); + /* + * A reference of the kvmppc_xive pointer is now kept under + * the xive_devices struct of the machine for reuse. It is + * freed when the VM is destroyed for now until we fix all the + * execution paths. + */ + kfree(dev); } @@ -1002,7 +1020,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) if (kvm->arch.xive) return -EEXIST; - xive = kzalloc(sizeof(*xive), GFP_KERNEL); + xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; @@ -1182,7 +1200,7 @@ struct kvm_device_ops kvm_xive_native_ops = { .name = "kvm-xive-native", .create = kvmppc_xive_native_create, .init = kvmppc_xive_native_init, - .destroy = kvmppc_xive_native_free, + .release = kvmppc_xive_native_release, .set_attr = kvmppc_xive_native_set_attr, .get_attr = kvmppc_xive_native_get_attr, .has_attr = kvmppc_xive_native_has_attr, -- cgit v1.2.3-59-g8ed1b