aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-05-24 09:43:31 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2022-05-25 05:13:40 -0400
commitbaec4f5a018fe2d708fc1022330dba04b38b5fe3 (patch)
tree92e1eba8171dd8f077d8c7dd3dda99d7ea159bb8 /arch/x86/kernel/kvm.c
parentKVM: LAPIC: Drop pending LAPIC timer injection when canceling the timer (diff)
downloadlinux-dev-baec4f5a018fe2d708fc1022330dba04b38b5fe3.tar.xz
linux-dev-baec4f5a018fe2d708fc1022330dba04b38b5fe3.zip
x86, kvm: use correct GFP flags for preemption disabled
Commit ddd7ed842627 ("x86/kvm: Alloc dummy async #PF token outside of raw spinlock") leads to the following Smatch static checker warning: arch/x86/kernel/kvm.c:212 kvm_async_pf_task_wake() warn: sleeping in atomic context arch/x86/kernel/kvm.c 202 raw_spin_lock(&b->lock); 203 n = _find_apf_task(b, token); 204 if (!n) { 205 /* 206 * Async #PF not yet handled, add a dummy entry for the token. 207 * Allocating the token must be down outside of the raw lock 208 * as the allocator is preemptible on PREEMPT_RT kernels. 209 */ 210 if (!dummy) { 211 raw_spin_unlock(&b->lock); --> 212 dummy = kzalloc(sizeof(*dummy), GFP_KERNEL); ^^^^^^^^^^ Smatch thinks the caller has preempt disabled. The `smdb.py preempt kvm_async_pf_task_wake` output call tree is: sysvec_kvm_asyncpf_interrupt() <- disables preempt -> __sysvec_kvm_asyncpf_interrupt() -> kvm_async_pf_task_wake() The caller is this: arch/x86/kernel/kvm.c 290 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) 291 { 292 struct pt_regs *old_regs = set_irq_regs(regs); 293 u32 token; 294 295 ack_APIC_irq(); 296 297 inc_irq_stat(irq_hv_callback_count); 298 299 if (__this_cpu_read(apf_reason.enabled)) { 300 token = __this_cpu_read(apf_reason.token); 301 kvm_async_pf_task_wake(token); 302 __this_cpu_write(apf_reason.token, 0); 303 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1); 304 } 305 306 set_irq_regs(old_regs); 307 } The DEFINE_IDTENTRY_SYSVEC() is a wrapper that calls this function from the call_on_irqstack_cond(). It's inside the call_on_irqstack_cond() where preempt is disabled (unless it's already disabled). The irq_enter/exit_rcu() functions disable/enable preempt. Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 35b3c5836703..1a3658f7e6d9 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -209,7 +209,7 @@ again:
*/
if (!dummy) {
raw_spin_unlock(&b->lock);
- dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
/*
* Continue looping on allocation failure, eventually