1 From b68844a40cf6580eded14bf33fbe37a2f5f63f7b Mon Sep 17 00:00:00 2001
2 From: Marcelo Tosatti <mtosatti@redhat.com>
3 Date: Wed, 8 Apr 2015 20:33:24 -0300
4 Subject: [PATCH 208/366] KVM: use simple waitqueue for vcpu->wq
8 On -RT, an emulated LAPIC timer instances has the following path:
11 2) ksoftirqd is scheduled
12 3) ksoftirqd wakes up vcpu thread
13 4) vcpu thread is scheduled
15 This extra context switch introduces unnecessary latency in the
16 LAPIC path for a KVM guest.
20 Allow waking up vcpu thread from hardirq context,
21 thus avoiding the need for ksoftirqd to be scheduled.
23 Normal waitqueues make use of spinlocks, which on -RT
24 are sleepable locks. Therefore, waking up a waitqueue
25 waiter involves locking a sleeping lock, which
26 is not allowed from hard interrupt context.
28 cyclictest command line:
29 # cyclictest -m -n -q -p99 -l 1000000 -h60 -D 1m
31 This patch reduces the average latency in my tests from 14us to 11us.
33 Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
34 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
36 arch/arm/kvm/arm.c | 8 ++++----
37 arch/arm/kvm/psci.c | 4 ++--
38 arch/powerpc/include/asm/kvm_host.h | 4 ++--
39 arch/powerpc/kvm/book3s_hv.c | 23 +++++++++++------------
40 arch/s390/include/asm/kvm_host.h | 2 +-
41 arch/s390/kvm/interrupt.c | 4 ++--
42 arch/x86/kvm/lapic.c | 6 +++---
43 include/linux/kvm_host.h | 4 ++--
44 virt/kvm/async_pf.c | 4 ++--
45 virt/kvm/kvm_main.c | 16 ++++++++--------
46 10 files changed, 37 insertions(+), 38 deletions(-)
48 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
49 index d7bef21..0310e51 100644
50 --- a/arch/arm/kvm/arm.c
51 +++ b/arch/arm/kvm/arm.c
52 @@ -496,18 +496,18 @@ static void kvm_arm_resume_guest(struct kvm *kvm)
53 struct kvm_vcpu *vcpu;
55 kvm_for_each_vcpu(i, vcpu, kvm) {
56 - wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
57 + struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
59 vcpu->arch.pause = false;
60 - wake_up_interruptible(wq);
61 + swait_wake_interruptible(wq);
65 static void vcpu_sleep(struct kvm_vcpu *vcpu)
67 - wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
68 + struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
70 - wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
71 + swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
72 (!vcpu->arch.pause)));
75 diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
76 index a9b3b90..08148c4 100644
77 --- a/arch/arm/kvm/psci.c
78 +++ b/arch/arm/kvm/psci.c
79 @@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
81 struct kvm *kvm = source_vcpu->kvm;
82 struct kvm_vcpu *vcpu = NULL;
83 - wait_queue_head_t *wq;
84 + struct swait_head *wq;
86 unsigned long context_id;
87 phys_addr_t target_pc;
88 @@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
89 smp_mb(); /* Make sure the above is visible */
91 wq = kvm_arch_vcpu_wq(vcpu);
92 - wake_up_interruptible(wq);
93 + swait_wake_interruptible(wq);
95 return PSCI_RET_SUCCESS;
97 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
98 index cfa758c..b439518 100644
99 --- a/arch/powerpc/include/asm/kvm_host.h
100 +++ b/arch/powerpc/include/asm/kvm_host.h
101 @@ -286,7 +286,7 @@ struct kvmppc_vcore {
102 struct list_head runnable_threads;
103 struct list_head preempt_list;
105 - wait_queue_head_t wq;
106 + struct swait_head wq;
107 spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
110 @@ -626,7 +626,7 @@ struct kvm_vcpu_arch {
114 - wait_queue_head_t *wqp;
115 + struct swait_head *wqp;
116 struct kvmppc_vcore *vcore;
119 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
120 index a7352b5..32be32b 100644
121 --- a/arch/powerpc/kvm/book3s_hv.c
122 +++ b/arch/powerpc/kvm/book3s_hv.c
123 @@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
124 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
127 - wait_queue_head_t *wqp;
128 + struct swait_head *wqp;
130 wqp = kvm_arch_vcpu_wq(vcpu);
131 - if (waitqueue_active(wqp)) {
132 - wake_up_interruptible(wqp);
133 + if (swaitqueue_active(wqp)) {
134 + swait_wake_interruptible(wqp);
135 ++vcpu->stat.halt_wakeup;
138 @@ -707,8 +707,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
139 tvcpu->arch.prodded = 1;
141 if (vcpu->arch.ceded) {
142 - if (waitqueue_active(&vcpu->wq)) {
143 - wake_up_interruptible(&vcpu->wq);
144 + if (swaitqueue_active(&vcpu->wq)) {
145 + swait_wake_interruptible(&vcpu->wq);
146 vcpu->stat.halt_wakeup++;
149 @@ -1447,7 +1447,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
150 INIT_LIST_HEAD(&vcore->runnable_threads);
151 spin_lock_init(&vcore->lock);
152 spin_lock_init(&vcore->stoltb_lock);
153 - init_waitqueue_head(&vcore->wq);
154 + init_swait_head(&vcore->wq);
155 vcore->preempt_tb = TB_NIL;
156 vcore->lpcr = kvm->arch.lpcr;
157 vcore->first_vcpuid = core * threads_per_subcore;
158 @@ -2519,10 +2519,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
160 struct kvm_vcpu *vcpu;
162 + DEFINE_SWAITER(wait);
166 - prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
167 + swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE);
170 * Check one last time for pending exceptions and ceded state after
171 @@ -2536,7 +2535,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
175 - finish_wait(&vc->wq, &wait);
176 + swait_finish(&vc->wq, &wait);
180 @@ -2544,7 +2543,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
181 trace_kvmppc_vcore_blocked(vc, 0);
182 spin_unlock(&vc->lock);
184 - finish_wait(&vc->wq, &wait);
185 + swait_finish(&vc->wq, &wait);
186 spin_lock(&vc->lock);
187 vc->vcore_state = VCORE_INACTIVE;
188 trace_kvmppc_vcore_blocked(vc, 1);
189 @@ -2600,7 +2599,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
190 kvmppc_start_thread(vcpu, vc);
191 trace_kvm_guest_enter(vcpu);
192 } else if (vc->vcore_state == VCORE_SLEEPING) {
194 + swait_wake(&vc->wq);
198 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
199 index e9a983f..2a84c76 100644
200 --- a/arch/s390/include/asm/kvm_host.h
201 +++ b/arch/s390/include/asm/kvm_host.h
202 @@ -427,7 +427,7 @@ struct kvm_s390_irq_payload {
203 struct kvm_s390_local_interrupt {
205 struct kvm_s390_float_interrupt *float_int;
206 - wait_queue_head_t *wq;
207 + struct swait_head *wq;
209 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
210 struct kvm_s390_irq_payload irq;
211 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
212 index 6a75352..8b4516b 100644
213 --- a/arch/s390/kvm/interrupt.c
214 +++ b/arch/s390/kvm/interrupt.c
215 @@ -868,13 +868,13 @@ no_timer:
217 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
219 - if (waitqueue_active(&vcpu->wq)) {
220 + if (swaitqueue_active(&vcpu->wq)) {
222 * The vcpu gave up the cpu voluntarily, mark it as a good
225 vcpu->preempted = true;
226 - wake_up_interruptible(&vcpu->wq);
227 + swait_wake_interruptible(&vcpu->wq);
228 vcpu->stat.halt_wakeup++;
231 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
232 index b8393e2..906a1ec 100644
233 --- a/arch/x86/kvm/lapic.c
234 +++ b/arch/x86/kvm/lapic.c
235 @@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
236 static void apic_timer_expired(struct kvm_lapic *apic)
238 struct kvm_vcpu *vcpu = apic->vcpu;
239 - wait_queue_head_t *q = &vcpu->wq;
240 + struct swait_head *q = &vcpu->wq;
241 struct kvm_timer *ktimer = &apic->lapic_timer;
243 if (atomic_read(&apic->lapic_timer.pending))
244 @@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kvm_lapic *apic)
245 atomic_inc(&apic->lapic_timer.pending);
246 kvm_set_pending_timer(vcpu);
248 - if (waitqueue_active(q))
249 - wake_up_interruptible(q);
250 + if (swaitqueue_active(q))
251 + swait_wake_interruptible(q);
253 if (apic_lvtt_tscdeadline(apic))
254 ktimer->expired_tscdeadline = ktimer->tscdeadline;
255 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
256 index c923350..d856ccd 100644
257 --- a/include/linux/kvm_host.h
258 +++ b/include/linux/kvm_host.h
259 @@ -243,7 +243,7 @@ struct kvm_vcpu {
261 int guest_fpu_loaded, guest_xcr0_loaded;
262 unsigned char fpu_counter;
263 - wait_queue_head_t wq;
264 + struct swait_head wq;
268 @@ -794,7 +794,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
272 -static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
273 +static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
275 #ifdef __KVM_HAVE_ARCH_WQP
276 return vcpu->arch.wqp;
277 diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
278 index 4f70d12..57d9436 100644
279 --- a/virt/kvm/async_pf.c
280 +++ b/virt/kvm/async_pf.c
281 @@ -98,8 +98,8 @@ static void async_pf_execute(struct work_struct *work)
282 * This memory barrier pairs with prepare_to_wait's set_current_state()
285 - if (waitqueue_active(&vcpu->wq))
286 - wake_up_interruptible(&vcpu->wq);
287 + if (swaitqueue_active(&vcpu->wq))
288 + swait_wake_interruptible(&vcpu->wq);
291 kvm_put_kvm(vcpu->kvm);
292 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
293 index 336ed26..abdf387 100644
294 --- a/virt/kvm/kvm_main.c
295 +++ b/virt/kvm/kvm_main.c
296 @@ -229,7 +229,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
299 vcpu->halt_poll_ns = 0;
300 - init_waitqueue_head(&vcpu->wq);
301 + init_swait_head(&vcpu->wq);
302 kvm_async_pf_vcpu_init(vcpu);
305 @@ -2005,7 +2005,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
306 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
310 + DEFINE_SWAITER(wait);
314 @@ -2030,7 +2030,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
315 kvm_arch_vcpu_blocking(vcpu);
318 - prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
319 + swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
321 if (kvm_vcpu_check_block(vcpu) < 0)
323 @@ -2039,7 +2039,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
327 - finish_wait(&vcpu->wq, &wait);
328 + swait_finish(&vcpu->wq, &wait);
331 kvm_arch_vcpu_unblocking(vcpu);
332 @@ -2071,11 +2071,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
336 - wait_queue_head_t *wqp;
337 + struct swait_head *wqp;
339 wqp = kvm_arch_vcpu_wq(vcpu);
340 - if (waitqueue_active(wqp)) {
341 - wake_up_interruptible(wqp);
342 + if (swaitqueue_active(wqp)) {
343 + swait_wake_interruptible(wqp);
344 ++vcpu->stat.halt_wakeup;
347 @@ -2176,7 +2176,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
351 - if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
352 + if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
354 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))