+ (void)user_mode;
+ assert_kdb (user_mode);
+
+ if (EXPECT_FALSE(!(ctxt->state(true) & Thread_ext_vcpu_enabled)))
+ {
+ ctxt->arch_load_vcpu_kern_state(vcpu, true);
+ return -L4_err::EInval;
+ }
+
+ void *vmcs_s = reinterpret_cast<char *>(vcpu) + 0x400;
+
+ for (;;)
+ {
+ // in the case of disabled IRQs and a pending IRQ directly simulate an
+ // external interrupt intercept
+ if ( !(vcpu->_saved_state & Vcpu_state::F_irqs)
+ && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
+ {
+ // XXX: check if this is correct, we set external irq exit as reason
+ write<Unsigned32>(vmcs_s, Vmx::F_exit_reason, 1);
+ ctxt->arch_load_vcpu_kern_state(vcpu, true);
+ return 1; // return 1 to indicate pending IRQs (IPCs)
+ }
+
+ int r = do_resume_vcpu(ctxt, vcpu, vmcs_s);
+
+ // test for error or non-IRQ exit reason
+ if (r <= 0)
+ {
+ ctxt->arch_load_vcpu_kern_state(vcpu, true);
+ return r;
+ }
+
+ // check for IRQ exits and allow to handle the IRQ
+ if (r == 1)
+ Proc::preemption_point();
+
+ // Check if the current context got a message delivered.
+ // This is done by testing for a valid continuation.
+ // When a continuation is set we have to directly
+ // leave the kernel to not overwrite the vcpu-regs
+ // with bogus state.
+ Thread *t = nonull_static_cast<Thread*>(ctxt);
+ if (t->continuation_test_and_restore())
+ {
+ ctxt->arch_load_vcpu_kern_state(vcpu, true);
+ t->fast_return_to_user(vcpu->_entry_ip, vcpu->_entry_sp,
+ t->vcpu_state().usr().get());
+ }
+ }