1 //----------------------------------------------------------------------------
2 IMPLEMENTATION [amd64]:
5 PUBLIC template<typename T> inline
7 Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
9 assert(cpu_lock.test());
10 assert(current() == this);
14 regs()->cs(Gdt::gdt_code_user | Gdt::Selector_user);
15 regs()->flags(EFLAGS_IF);
20 : "r" (static_cast<Return_frame*>(regs())), "D"(arg)
25 PROTECTED inline NEEDS[Thread::sys_gdt_x86]
27 Thread::invoke_arch(L4_msg_tag tag, Utcb *utcb)
29 switch (utcb->values[0] & Opcode_mask)
31 case Op_gdt_x86: return sys_gdt_x86(tag, utcb);
32 case Op_set_segment_base_amd64:
34 return commit_result(-L4_err::EInval);
35 switch (utcb->values[0] >> 16)
39 _fs_base = utcb->values[1];
40 if (current() == this)
41 Cpu::wrmsr(_fs_base, MSR_FS_BASE);
46 _gs_base = utcb->values[1];
47 if (current() == this)
48 Cpu::wrmsr(_gs_base, MSR_GS_BASE);
51 default: return commit_result(-L4_err::EInval);
53 return Kobject_iface::commit_result(0);
55 return commit_result(-L4_err::ENosys);
61 Thread::user_sp() const
62 { return exception_triggered()?_exc_cont.sp(regs()):regs()->sp(); }
66 Thread::user_sp(Mword sp)
68 if (exception_triggered())
69 _exc_cont.sp(regs(), sp);
76 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
78 if (!exception_triggered())
80 _exc_cont.activate(r, ret_handler);
83 // else ignore change of IP because triggered exception already pending
89 Thread::restore_exc_state()
91 _exc_cont.restore(regs());
96 Thread::trap_state_to_rf(Trap_state *ts)
98 char *im = reinterpret_cast<char*>(ts + 1);
99 return reinterpret_cast<Return_frame*>(im)-1;
102 PRIVATE static inline NEEDS[Thread::trap_state_to_rf, Thread::sanitize_user_flags]
103 bool FIASCO_WARN_RESULT
104 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
105 L4_fpage::Rights rights)
107 if (EXPECT_FALSE(tag.words() < Ts::Reg_words))
110 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
111 Unsigned32 cs = ts->cs();
112 Utcb *snd_utcb = snd->utcb().access();
114 if (EXPECT_FALSE(rcv->exception_triggered()))
116 // triggered exception pending
117 Mem::memcpy_mwords(ts, snd_utcb->values, Ts::Reg_words);
118 Continuation::User_return_frame const *urfp
119 = reinterpret_cast<Continuation::User_return_frame const *>
120 ((char*)&snd_utcb->values[Ts::Iret_offset]);
122 Continuation::User_return_frame urf = access_once(urfp);
125 urf.flags(sanitize_user_flags(urf.flags()));
126 rcv->_exc_cont.set(trap_state_to_rf(ts), &urf);
130 Mem::memcpy_mwords(ts, snd_utcb->values, Ts::Words);
132 ts->flags(sanitize_user_flags(ts->flags()));
133 // don't allow to overwrite the code selector!
137 if (tag.transfer_fpu() && (rights & L4_fpage::Rights::W()))
138 snd->transfer_fpu(rcv);
140 bool ret = transfer_msg_items(tag, snd, snd_utcb,
141 rcv, rcv->utcb().access(), rights);
143 rcv->state_del(Thread_in_exception);
147 PRIVATE static inline NEEDS[Thread::trap_state_to_rf]
148 bool FIASCO_WARN_RESULT
149 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
150 L4_fpage::Rights rights)
152 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
153 Utcb *rcv_utcb = rcv->utcb().access();
155 auto guard = lock_guard(cpu_lock);
156 if (EXPECT_FALSE(snd->exception_triggered()))
158 Mem::memcpy_mwords(rcv_utcb->values, ts, Ts::Reg_words + Ts::Code_words);
159 Continuation::User_return_frame *d
160 = reinterpret_cast<Continuation::User_return_frame *>
161 ((char*)&rcv_utcb->values[Ts::Iret_offset]);
163 snd->_exc_cont.get(d, trap_state_to_rf(ts));
166 Mem::memcpy_mwords(rcv_utcb->values, ts, Ts::Words);
168 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::Rights::W()))
169 snd->transfer_fpu(rcv);
177 Thread::user_invoke()
179 user_invoke_generic();
183 if (current()->space()->is_sigma0())
184 di = Kmem::virt_to_phys(Kip::k());
187 (" mov %%rax,%%rsp \n" // set stack pointer to regs structure
190 " xor %%rax,%%rax \n"
191 " xor %%rcx,%%rcx \n" // clean out user regs
192 " xor %%rdx,%%rdx \n"
193 " xor %%rsi,%%rsi \n"
194 " xor %%rbx,%%rbx \n"
195 " xor %%rbp,%%rbp \n"
198 " xor %%r10,%%r10 \n"
199 " xor %%r11,%%r11 \n"
200 " xor %%r12,%%r12 \n"
201 " xor %%r13,%%r13 \n"
202 " xor %%r14,%%r14 \n"
203 " xor %%r15,%%r15 \n"
207 : "a" (nonull_static_cast<Return_frame*>(current()->regs())),
208 "c" (Gdt::gdt_data_user | Gdt::Selector_user),
212 // never returns here
217 Thread::check_trap13_kernel (Trap_state * /*ts*/)
221 //----------------------------------------------------------------------------
222 IMPLEMENTATION [amd64 & (debug | kdb)]:
224 #include "kernel_task.h"
226 /** Call the nested trap handler (either Jdb::enter_kdebugger() or the
227 * gdb stub. Setup our own stack frame */
230 Thread::call_nested_trap_handler(Trap_state *ts)
234 Cpu_number log_cpu = dbg_find_cpu();
235 unsigned long &ntr = nested_trap_recover.cpu(log_cpu);
238 printf("%s: lcpu%u sp=%p t=%u nested_trap_recover=%ld\n",
239 __func__, log_cpu, (void*)Proc::stack_pointer(), ts->_trapno,
246 stack = dbg_stack.cpu(log_cpu).stack_top;
248 Unsigned64 dummy1, dummy2, dummy3;
250 // don't set %esp if gdb fault recovery to ensure that exceptions inside
251 // kdb/jdb don't overwrite the stack
253 ("mov %%rsp,%[d2] \n\t" // save old stack pointer
254 "cmpq $0,%[recover] \n\t"
255 "jne 1f \n\t" // check trap within trap handler
256 "mov %[stack],%%rsp \n\t" // setup clean stack pointer
258 "incq %[recover] \n\t"
259 "mov %%cr3, %[d1] \n\t"
260 "push %[d2] \n\t" // save old stack pointer on new stack
261 "push %[d1] \n\t" // save old pdbr
262 "mov %[pdbr], %%cr3 \n\t"
263 "callq *%[handler] \n\t"
265 "mov %[d1], %%cr3 \n\t"
266 "pop %%rsp \n\t" // restore old stack pointer
267 "cmpq $0,%[recover] \n\t" // check trap within trap handler
269 "decq %[recover] \n\t"
271 : [ret] "=a"(ret), [d2] "=&r"(dummy2), [d1] "=&r"(dummy1), "=D"(dummy3),
274 [pdbr] "r" (Kernel_task::kernel_task()->virt_to_phys((Address)Kmem::dir())),
277 [handler] "m" (nested_trap_handler)
278 : "rdx", "rcx", "r8", "r9", "memory");
281 Cpu_call::handle_global_requests();
283 return ret == 0 ? 0 : -1;