aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev/xive
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-05-09 11:50:01 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2017-05-09 11:50:01 +0200
commit4415b335282591e76762cd9e6dc60932a7595fc3 (patch)
tree07ba1eb24f8d44baeb07916351d4819026feccae /arch/powerpc/sysdev/xive
parentKVM: set no_llseek in stat_fops_per_vm (diff)
parentMerge remote-tracking branch 'remotes/powerpc/topic/xive' into kvm-ppc-next (diff)
downloadlinux-dev-4415b335282591e76762cd9e6dc60932a7595fc3.tar.xz
linux-dev-4415b335282591e76762cd9e6dc60932a7595fc3.zip
Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
The main thing here is a new implementation of the in-kernel XICS interrupt controller emulation for POWER9 machines, from Ben Herrenschmidt. POWER9 has a new interrupt controller called XIVE (eXternal Interrupt Virtualization Engine) which is able to deliver interrupts directly to guest virtual CPUs in hardware without hypervisor intervention. With this new code, the guest still sees the old XICS interface but performance is better because the XICS emulation in the host uses the XIVE directly rather than going through a XICS emulation in firmware. Conflicts: arch/powerpc/kernel/cpu_setup_power.S [cherry-picked fix] arch/powerpc/kvm/book3s_xive.c [include asm/debugfs.h]
Diffstat (limited to 'arch/powerpc/sysdev/xive')
-rw-r--r--arch/powerpc/sysdev/xive/common.c142
-rw-r--r--arch/powerpc/sysdev/xive/native.c86
2 files changed, 217 insertions, 11 deletions
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 6a98efb14264..913825086b8d 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -46,13 +46,15 @@
#endif
bool __xive_enabled;
+EXPORT_SYMBOL_GPL(__xive_enabled);
bool xive_cmdline_disabled;
/* We use only one priority for now */
static u8 xive_irq_priority;
-/* TIMA */
+/* TIMA exported to KVM */
void __iomem *xive_tima;
+EXPORT_SYMBOL_GPL(xive_tima);
u32 xive_tima_offset;
/* Backend ops */
@@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d)
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->pending_prio);
- /* EOI the source if it hasn't been disabled */
- if (!irqd_irq_disabled(d))
+ /*
+ * EOI the source if it hasn't been disabled and hasn't
+ * been passed-through to a KVM guest
+ */
+ if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
xive_do_source_eoi(irqd_to_hwirq(d), xd);
/*
@@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d,
old_target = xd->target;
- rc = xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(target),
- xive_irq_priority, d->irq);
+ /*
+ * Only configure the irq if it's not currently passed-through to
+ * a KVM guest
+ */
+ if (!irqd_is_forwarded_to_vcpu(d))
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
if (rc < 0) {
pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
return rc;
@@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d)
return 1;
}
+static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int rc;
+ u8 pq;
+
+ /*
+ * We only support this on interrupts that do not require
+ * firmware calls for masking and unmasking
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
+ return -EIO;
+
+ /*
+ * This is called by KVM with state non-NULL for enabling
+ * pass-through or NULL for disabling it
+ */
+ if (state) {
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Set it to PQ=10 state to prevent further sends */
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+
+ /* No target ? nothing to do */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ /*
+ * An untargetted interrupt should have been
+ * also masked at the source
+ */
+ WARN_ON(pq & 2);
+
+ return 0;
+ }
+
+ /*
+ * If P was set, adjust state to PQ=11 to indicate
+ * that a resend is needed for the interrupt to reach
+ * the guest. Also remember the value of P.
+ *
+ * This also tells us that it's in flight to a host queue
+ * or has already been fetched but hasn't been EOIed yet
+ * by the host. This it's potentially using up a host
+ * queue slot. This is important to know because as long
+ * as this is the case, we must not hard-unmask it when
+ * "returning" that interrupt to the host.
+ *
+ * This saved_p is cleared by the host EOI, when we know
+ * for sure the queue slot is no longer in use.
+ */
+ if (pq & 2) {
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ xd->saved_p = true;
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the guest. That should guarantee us
+ * that we *will* eventually get an EOI for it on
+ * the host. Otherwise there would be a small window
+ * for P to be seen here but the interrupt going
+ * to the guest queue.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+ } else
+ xd->saved_p = false;
+ } else {
+ irqd_clr_forwarded_to_vcpu(d);
+
+ /* No host target ? hard mask and return */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ xive_do_source_set_mask(xd, true);
+ return 0;
+ }
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the host.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+
+ /*
+ * By convention we are called with the interrupt in
+ * a PQ=10 or PQ=11 state, ie, it won't fire and will
+ * have latched in Q whether there's a pending HW
+ * interrupt or not.
+ *
+ * First reconfigure the target.
+ */
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ xive_irq_priority, d->irq);
+ if (rc)
+ return rc;
+
+ /*
+ * Then if saved_p is not set, effectively re-enable the
+ * interrupt with an EOI. If it is set, we know there is
+ * still a message in a host queue somewhere that will be
+ * EOId eventually.
+ *
+ * Note: We don't check irqd_irq_disabled(). Effectively,
+ * we *will* let the irq get through even if masked if the
+ * HW is still firing it in order to deal with the whole
+ * saved_p business properly. If the interrupt triggers
+ * while masked, the generic code will re-mask it anyway.
+ */
+ if (!xd->saved_p)
+ xive_do_source_eoi(hw_irq, xd);
+
+ }
+ return 0;
+}
+
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
@@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = {
.irq_set_affinity = xive_irq_set_affinity,
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
+ .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
};
bool is_xive_irq(struct irq_chip *chip)
{
return chip == &xive_irq_chip;
}
+EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
@@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd)
xd->trig_mmio = NULL;
}
}
+EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1a726229a427..ab9ecce61ee5 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -31,6 +31,7 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
#include "xive-internal.h"
@@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
@@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
return rc == 0 ? 0 : -ENXIO;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_queue);
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
@@ -192,6 +197,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
__xive_native_disable_queue(vp_id, q, prio);
}
+EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
@@ -262,6 +268,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
return 0;
}
+#endif /* CONFIG_SMP */
u32 xive_native_alloc_irq(void)
{
@@ -277,6 +284,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
void xive_native_free_irq(u32 irq)
{
@@ -287,7 +295,9 @@ void xive_native_free_irq(u32 irq)
msleep(1);
}
}
+EXPORT_SYMBOL_GPL(xive_native_free_irq);
+#ifdef CONFIG_SMP
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
@@ -383,7 +393,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
/* Enable the pool VP */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
@@ -428,7 +438,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Disable it */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
@@ -437,10 +447,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
}
}
-static void xive_native_sync_source(u32 hw_irq)
+void xive_native_sync_source(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
}
+EXPORT_SYMBOL_GPL(xive_native_sync_source);
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
@@ -501,10 +512,24 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
+static void xive_native_setup_pools(void)
+{
+ /* Allocate a pool big enough */
+ pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
+
+ xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
+ if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
+ pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+
+ pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
+ xive_pool_vps, nr_cpu_ids);
+}
+
u32 xive_native_default_eq_shift(void)
{
return xive_queue_shift;
}
+EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
bool xive_native_init(void)
{
@@ -514,7 +539,7 @@ bool xive_native_init(void)
struct property *prop;
u8 max_prio = 7;
const __be32 *p;
- u32 val;
+ u32 val, cpu;
s64 rc;
if (xive_cmdline_disabled)
@@ -550,7 +575,11 @@ bool xive_native_init(void)
break;
}
- /* Grab size of provisioning pages */
+ /* Configure Thread Management areas for KVM */
+ for_each_possible_cpu(cpu)
+ kvmppc_set_xive_tima(cpu, r.start, tima);
+
+ /* Grab size of provisionning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
@@ -560,6 +589,9 @@ bool xive_native_init(void)
return false;
}
+ /* Setup some dummy HV pool VPs */
+ xive_native_setup_pools();
+
/* Initialize XIVE core with our backend */
if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
@@ -638,3 +670,47 @@ void xive_native_free_vp_block(u32 vp_base)
pr_warn("OPAL error %lld freeing VP block\n", rc);
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
+
+int xive_native_enable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_enable_vp);
+
+int xive_native_disable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_disable_vp);
+
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
+{
+ __be64 vp_cam_be;
+ __be32 vp_chip_id_be;
+ s64 rc;
+
+ rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
+ if (rc)
+ return -EIO;
+ *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
+ *out_chip_id = be32_to_cpu(vp_chip_id_be);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_info);