1 //----------------------------------------------------------------------------
2 IMPLEMENTATION [amd64]:
5 PUBLIC template<typename T> inline
7 Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
9 assert_kdb(cpu_lock.test());
10 assert_kdb(current() == this);
11 assert_kdb(regs()->cs() & 3 == 3);
15 regs()->flags(EFLAGS_IF);
20 : "r" (static_cast<Return_frame*>(regs())), "d"(arg)
27 Thread::invoke_arch(L4_msg_tag & /*tag*/, Utcb * /*utcb*/)
34 Thread::user_sp() const
35 { return exception_triggered()?_exc_cont.sp(regs()):regs()->sp(); }
39 Thread::user_sp(Mword sp)
41 if (exception_triggered())
42 _exc_cont.sp(regs(), sp);
49 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
51 if (!exception_triggered())
53 _exc_cont.activate(r, ret_handler);
56 // else ignore change of IP because triggered exception already pending
62 Thread::restore_exc_state()
64 _exc_cont.restore(regs());
69 Thread::trap_state_to_rf(Trap_state *ts)
71 char *im = reinterpret_cast<char*>(ts + 1);
72 return reinterpret_cast<Return_frame*>(im)-1;
75 PRIVATE static inline NEEDS[Thread::trap_is_privileged,
76 Thread::trap_state_to_rf]
77 bool FIASCO_WARN_RESULT
78 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
81 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
82 Mword s = tag.words();
83 Unsigned32 cs = ts->cs();
84 Utcb *snd_utcb = snd->utcb().access();
86 if (EXPECT_FALSE(rcv->exception_triggered()))
88 // triggered exception pending
89 Mem::memcpy_mwords (ts, snd_utcb->values, s > 19 ? 19 : s);
90 if (EXPECT_TRUE(s > 22))
92 Continuation::User_return_frame const *s
93 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[19]);
95 rcv->_exc_cont.set(trap_state_to_rf(ts), s);
99 Mem::memcpy_mwords (ts, snd_utcb->values, s > 23 ? 23 : s);
101 if (tag.transfer_fpu() && (rights & L4_fpage::W))
102 snd->transfer_fpu(rcv);
105 // XXX: ia32 in here!
106 if (!rcv->trap_is_privileged(0))
107 ts->flags((ts->flags() & ~(EFLAGS_IOPL | EFLAGS_NT)) | EFLAGS_IF);
109 // don't allow to overwrite the code selector!
112 bool ret = transfer_msg_items(tag, snd, snd_utcb,
113 rcv, rcv->utcb().access(), rights);
115 rcv->state_del(Thread_in_exception);
119 PRIVATE static inline NEEDS[Thread::trap_state_to_rf]
120 bool FIASCO_WARN_RESULT
121 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
122 unsigned char rights)
124 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
125 Utcb *rcv_utcb = rcv->utcb().access();
127 Lock_guard <Cpu_lock> guard (&cpu_lock);
128 if (EXPECT_FALSE(snd->exception_triggered()))
130 Mem::memcpy_mwords (rcv_utcb->values, ts, 19);
131 Continuation::User_return_frame *d
132 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[19]);
134 snd->_exc_cont.get(d, trap_state_to_rf(ts));
137 Mem::memcpy_mwords (rcv_utcb->values, ts, 23);
139 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
140 snd->transfer_fpu(rcv);
148 Thread::user_invoke()
150 user_invoke_generic();
153 (" mov %%rax,%%rsp \n" // set stack pointer to regs structure
156 " xor %%rax,%%rax \n"
157 " xor %%rcx,%%rcx \n" // clean out user regs
158 " xor %%rdx,%%rdx \n"
159 " xor %%rsi,%%rsi \n"
160 " xor %%rdi,%%rdi \n"
161 " xor %%rbx,%%rbx \n"
162 " xor %%rbp,%%rbp \n"
165 " xor %%r10,%%r10 \n"
166 " xor %%r11,%%r11 \n"
167 " xor %%r12,%%r12 \n"
168 " xor %%r13,%%r13 \n"
169 " xor %%r14,%%r14 \n"
170 " xor %%r15,%%r15 \n"
174 : "a" (nonull_static_cast<Return_frame*>(current()->regs())),
175 "c" (Gdt::gdt_data_user | Gdt::Selector_user)
178 // never returns here
183 Thread::check_trap13_kernel (Trap_state * /*ts*/)
187 //----------------------------------------------------------------------------
188 IMPLEMENTATION [amd64 & (debug | kdb)]:
190 #include "kernel_task.h"
192 /** Call the nested trap handler (either Jdb::enter_kdebugger() or the
193 * gdb stub. Setup our own stack frame */
196 Thread::call_nested_trap_handler(Trap_state *ts)
200 unsigned long phys_cpu = Cpu::phys_id_direct();
201 unsigned log_cpu = Cpu::p2l(phys_cpu);
204 printf("Trap on unknown CPU phys_id=%lx\n", phys_cpu);
208 unsigned long &ntr = nested_trap_recover.cpu(log_cpu);
211 printf("%s: lcpu%u sp=%p t=%u nested_trap_recover=%ld\n",
212 __func__, log_cpu, (void*)Proc::stack_pointer(), ts->_trapno,
219 stack = dbg_stack.cpu(log_cpu).stack_top;
221 Unsigned64 dummy1, dummy2, dummy3;
223 // don't set %esp if gdb fault recovery to ensure that exceptions inside
224 // kdb/jdb don't overwrite the stack
226 ("mov %%rsp,%[d2] \n\t" // save old stack pointer
227 "cmpq $0,%[recover] \n\t"
228 "jne 1f \n\t" // check trap within trap handler
229 "mov %[stack],%%rsp \n\t" // setup clean stack pointer
231 "incq %[recover] \n\t"
232 "mov %%cr3, %[d1] \n\t"
233 "push %[d2] \n\t" // save old stack pointer on new stack
234 "push %[d1] \n\t" // save old pdbr
235 "mov %[pdbr], %%cr3 \n\t"
236 "callq *%[handler] \n\t"
238 "mov %[d1], %%cr3 \n\t"
239 "pop %%rsp \n\t" // restore old stack pointer
240 "cmpq $0,%[recover] \n\t" // check trap within trap handler
242 "decq %[recover] \n\t"
244 : [ret] "=a"(ret), [d2] "=&r"(dummy2), [d1] "=&r"(dummy1), "=D"(dummy3),
247 [pdbr] "r" (Kernel_task::kernel_task()->mem_space()->virt_to_phys((Address)Kmem::dir())),
250 [handler] "m" (nested_trap_handler)
251 : "rdx", "rcx", "r8", "r9", "memory");
253 return ret == 0 ? 0 : -1;