3 extern "C" void vcpu_resume(Trap_state *, Return_frame *sp)
4 FIASCO_FASTCALL FIASCO_NORETURN;
7 // --------------------------------------------------------------------------
13 PUBLIC inline NEEDS["logdefs.h", "vcpu.h"]
15 Thread::vcpu_pagefault(Address pfa, Mword err, Mword ip)
18 if (vcpu_pagefaults_enabled())
21 vcpu_enter_kernel_mode();
22 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
23 Vcpu_log *l = tbe->payload<Vcpu_log>();
25 l->state = vcpu_state()->_saved_state;
28 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
30 vcpu_state()->_ts.set_pagefault(pfa, err);
31 vcpu_save_state_and_upcall();
39 PRIVATE inline NEEDS[Thread::fast_return_to_user]
41 Thread::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
43 if (this != current() || !(state() & Thread_vcpu_enabled))
44 return commit_result(-L4_err::EInval);
46 Obj_space *s = space()->obj_space();
48 L4_obj_ref user_task = vcpu_state()->user_task;
49 if (user_task.valid())
51 unsigned char task_rights = 0;
52 Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
55 if (EXPECT_FALSE(task && !(task_rights & L4_fpage::W)))
56 return commit_result(-L4_err::EPerm);
58 if (task != vcpu_user_space())
59 vcpu_set_user_space(task);
61 vcpu_state()->user_task = L4_obj_ref();
63 else if (user_task.flags() == L4_obj_ref::Ipc_reply)
64 vcpu_set_user_space(0);
66 L4_snd_item_iter snd_items(utcb, tag.words());
67 int items = tag.items();
68 for (; items && snd_items.more(); --items)
70 if (EXPECT_FALSE(!snd_items.next()))
75 L4_snd_item_iter::Item const *const item = snd_items.get();
76 L4_fpage sfp(item->d);
79 L4_error err = fpage_map(space(), sfp,
80 vcpu_user_space(), L4_fpage::all_spaces(),
86 if (EXPECT_FALSE(!err.ok()))
87 return commit_error(utcb, err);
90 if ((vcpu_state()->_saved_state & Vcpu_state::F_irqs) && vcpu_irqs_pending())
92 assert_kdb(cpu_lock.test());
93 do_ipc(L4_msg_tag(), 0, 0, true, 0,
94 L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
95 &vcpu_state()->_ipc_regs, 7);
96 if (EXPECT_TRUE(!vcpu_state()->_ipc_regs.tag().has_error()))
98 vcpu_state()->_ts.set_ipc_upcall();
102 if (vcpu_state()->_saved_state & Vcpu_state::F_user_mode)
103 sp = vcpu_state()->_entry_sp;
105 sp = vcpu_state()->_ts.sp();
107 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
108 Vcpu_log *l = tbe->payload<Vcpu_log>();
110 l->state = vcpu_state()->state;
111 l->ip = vcpu_state()->_entry_ip;
113 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
116 fast_return_to_user(vcpu_state()->_entry_ip, sp, false);
120 vcpu_state()->state = vcpu_state()->_saved_state;
122 memcpy(&ts, &vcpu_state()->_ts, sizeof(Trap_state));
125 assert_kdb(cpu_lock.test());
129 ts.sanitize_user_state();
131 if (vcpu_state()->state & Vcpu_state::F_user_mode)
133 if (!vcpu_user_space())
134 return commit_result(-L4_err::EInval);
136 vcpu_state()->state |= Vcpu_state::F_traps | Vcpu_state::F_exceptions
137 | Vcpu_state::F_debug_exc;
138 state_add_dirty(Thread_vcpu_user_mode);
140 if (!(vcpu_state()->state & Vcpu_state::F_fpu_enabled))
142 state_add_dirty(Thread_vcpu_fpu_disabled);
146 state_del_dirty(Thread_vcpu_fpu_disabled);
148 vcpu_resume_user_arch();
150 vcpu_user_space()->switchin_context(space());
153 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
154 Vcpu_log *l = tbe->payload<Vcpu_log>();
156 l->state = vcpu_state()->state;
159 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
162 vcpu_resume(&ts, regs());