aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/aarch32.c20
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/eventfd.c17
-rw-r--r--virt/kvm/kvm_main.c39
6 files changed, 47 insertions, 37 deletions
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index efc84cbe8277..5abbe9b3c652 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+ bool is_arm = !(cpsr & PSR_AA32_T_BIT);
- if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
+ if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
else
itbits = (itbits << 1) & 0x1f;
- cpsr &= ~COMPAT_PSR_IT_MASK;
+ cpsr &= ~PSR_AA32_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
- is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
- bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+ bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
- cpsr = mode | COMPAT_PSR_I_BIT;
+ cpsr = mode | PSR_AA32_I_BIT;
if (sctlr & (1 << 30))
- cpsr |= COMPAT_PSR_T_BIT;
+ cpsr |= PSR_AA32_T_BIT;
if (sctlr & (1 << 25))
- cpsr |= COMPAT_PSR_E_BIT;
+ cpsr |= PSR_AA32_E_BIT;
*vcpu_cpsr(vcpu) = cpsr;
@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
- prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
+ prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
}
/*
@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
fsr = &vcpu_cp15(vcpu, c5_DFSR);
}
- prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
+ prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
*far = addr;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index ac658bd63196..c92053bc3f96 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -610,7 +610,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- swake_up(kvm_arch_vcpu_wq(vcpu));
+ swake_up_one(kvm_arch_vcpu_wq(vcpu));
}
}
@@ -618,7 +618,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c95ab4c5a475..9b73d3ad918a 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
+ swake_up_one(wq);
return PSCI_RET_SUCCESS;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 57bcb27dcf30..23c2519c5b32 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 90d30fbe95ae..b20b751286fc 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, shutdown);
+ struct kvm *kvm = irqfd->kvm;
u64 cnt;
+ /* Make sure irqfd has been initalized in assign path. */
+ synchronize_srcu(&kvm->irq_srcu);
+
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_update(kvm, irqfd);
- srcu_read_unlock(&kvm->irq_srcu, idx);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
#endif
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP
+ */
+ fdput(f);
return 0;
fail:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8b47507faab5..0df592c4f09f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -273,7 +273,8 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
* kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
* barrier here.
*/
- if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
+ if (!kvm_arch_flush_remote_tlb(kvm)
+ || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
@@ -1169,7 +1170,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
- dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock);
@@ -1342,18 +1343,16 @@ static inline int check_user_page_hwpoison(unsigned long addr)
}
/*
- * The atomic path to get the writable pfn which will be stored in @pfn,
- * true indicates success, otherwise false is returned.
+ * The fast path to get the writable pfn which will be stored in @pfn,
+ * true indicates success, otherwise false is returned. It's also the
+ * only part that runs if we can are in atomic context.
*/
-static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
- bool write_fault, bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
+ bool *writable, kvm_pfn_t *pfn)
{
struct page *page[1];
int npages;
- if (!(async || atomic))
- return false;
-
/*
* Fast pin a writable pfn only if it is a write fault request
* or the caller allows to map a writable pfn for a read fault
@@ -1497,7 +1496,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
- if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
+ if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
return pfn;
if (atomic)
@@ -2127,16 +2126,22 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
+ int ret = -EINTR;
+ int idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (kvm_arch_vcpu_runnable(vcpu)) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
- return -EINTR;
+ goto out;
}
if (kvm_cpu_has_pending_timer(vcpu))
- return -EINTR;
+ goto out;
if (signal_pending(current))
- return -EINTR;
+ goto out;
- return 0;
+ ret = 0;
+out:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return ret;
}
/*
@@ -2172,7 +2177,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -2214,7 +2219,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
return true;
}
@@ -2563,7 +2568,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (arg)
goto out;
oldpid = rcu_access_pointer(vcpu->pid);
- if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
+ if (unlikely(oldpid != task_pid(current))) {
/* The thread running this VCPU changed. */
struct pid *newpid;