Skip to content

Commit 916b7f4

Browse files
keithbuschbonzini
authored andcommitted
kvm: retry nx_huge_page_recovery_thread creation
A VMM may send a non-fatal signal to its threads, including vCPU tasks, at any time, and thus may signal vCPU tasks during KVM_RUN. If a vCPU task receives the signal while its trying to spawn the huge page recovery vhost task, then KVM_RUN will fail due to copy_process() returning -ERESTARTNOINTR. Rework call_once() to mark the call complete if and only if the called function succeeds, and plumb the function's true error code back to the call_once() invoker. This provides userspace with the correct, non-fatal error code so that the VMM doesn't terminate the VM on -ENOMEM, and allows subsequent KVM_RUN a succeed by virtue of retrying creation of the NX huge page task. Co-developed-by: Sean Christopherson <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> [implemented the kvm user side] Signed-off-by: Keith Busch <[email protected]> Message-ID: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent cb38090 commit 916b7f4

File tree

2 files changed

+38
-19
lines changed

2 files changed

+38
-19
lines changed

arch/x86/kvm/mmu/mmu.c

+4-6
Original file line numberDiff line numberDiff line change
@@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
74607460
return true;
74617461
}
74627462

7463-
static void kvm_mmu_start_lpage_recovery(struct once *once)
7463+
static int kvm_mmu_start_lpage_recovery(struct once *once)
74647464
{
74657465
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
74667466
struct kvm *kvm = container_of(ka, struct kvm, arch);
@@ -7472,23 +7472,21 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
74727472
kvm, "kvm-nx-lpage-recovery");
74737473

74747474
if (IS_ERR(nx_thread))
7475-
return;
7475+
return PTR_ERR(nx_thread);
74767476

74777477
vhost_task_start(nx_thread);
74787478

74797479
/* Make the task visible only once it is fully started. */
74807480
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
7481+
return 0;
74817482
}
74827483

74837484
int kvm_mmu_post_init_vm(struct kvm *kvm)
74847485
{
74857486
if (nx_hugepage_mitigation_hard_disabled)
74867487
return 0;
74877488

7488-
call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
7489-
if (!kvm->arch.nx_huge_page_recovery_thread)
7490-
return -ENOMEM;
7491-
return 0;
7489+
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
74927490
}
74937491

74947492
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)

include/linux/call_once.h

+34-13
Original file line numberDiff line numberDiff line change
@@ -26,20 +26,41 @@ do { \
2626
__once_init((once), #once, &__key); \
2727
} while (0)
2828

29-
static inline void call_once(struct once *once, void (*cb)(struct once *))
29+
/*
30+
* call_once - Ensure a function has been called exactly once
31+
*
32+
* @once: Tracking struct
33+
* @cb: Function to be called
34+
*
35+
* If @once has never completed successfully before, call @cb and, if
36+
* it returns a zero or positive value, mark @once as completed. Return
37+
* the value returned by @cb
38+
*
39+
* If @once has completed succesfully before, return 0.
40+
*
41+
* The call to @cb is implicitly surrounded by a mutex, though for
42+
* efficiency the * function avoids taking it after the first call.
43+
*/
44+
static inline int call_once(struct once *once, int (*cb)(struct once *))
3045
{
31-
/* Pairs with atomic_set_release() below. */
32-
if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
33-
return;
34-
35-
guard(mutex)(&once->lock);
36-
WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
37-
if (atomic_read(&once->state) != ONCE_NOT_STARTED)
38-
return;
39-
40-
atomic_set(&once->state, ONCE_RUNNING);
41-
cb(once);
42-
atomic_set_release(&once->state, ONCE_COMPLETED);
46+
int r, state;
47+
48+
/* Pairs with atomic_set_release() below. */
49+
if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
50+
return 0;
51+
52+
guard(mutex)(&once->lock);
53+
state = atomic_read(&once->state);
54+
if (unlikely(state != ONCE_NOT_STARTED))
55+
return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
56+
57+
atomic_set(&once->state, ONCE_RUNNING);
58+
r = cb(once);
59+
if (r < 0)
60+
atomic_set(&once->state, ONCE_NOT_STARTED);
61+
else
62+
atomic_set_release(&once->state, ONCE_COMPLETED);
63+
return r;
4364
}
4465

4566
#endif /* _LINUX_CALL_ONCE_H */

0 commit comments

Comments
 (0)