aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/smp.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 11:51:30 -0700
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 11:51:30 -0700
commitf0d733942750c1ee6358c3a4a1a5d7ba73b7122f (patch)
tree32d0802b60078b6a2f43ce19d9019033ada6485d /arch/x86/xen/smp.c
parentClean up duplicate includes in arch/i386/xen/ (diff)
downloadlinux-dev-f0d733942750c1ee6358c3a4a1a5d7ba73b7122f.tar.xz
linux-dev-f0d733942750c1ee6358c3a4a1a5d7ba73b7122f.zip
xen: yield to IPI target if necessary
When sending a call-function IPI to a vcpu, yield if the vcpu isn't running. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r--arch/x86/xen/smp.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 557b8e24706a..865953e6f341 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -360,7 +360,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait)
{
struct call_data_struct data;
- int cpus;
+ int cpus, cpu;
+ bool yield;
/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);
@@ -389,9 +390,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
/* Send a message to other CPUs and wait for them to respond */
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
- /* Make sure other vcpus get a chance to run.
- XXX too severe? Maybe we should check the other CPU's states? */
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+ /* Make sure other vcpus get a chance to run if they need to. */
+ yield = false;
+ for_each_cpu_mask(cpu, mask)
+ if (xen_vcpu_stolen(cpu))
+ yield = true;
+
+ if (yield)
+ HYPERVISOR_sched_op(SCHEDOP_yield, 0);
/* Wait for response */
while (atomic_read(&data.started) != cpus ||