1 //----------------------------------------------------------------------------
2 IMPLEMENTATION [amd64]:
5 PUBLIC template<typename T> inline
7 Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
9 assert_kdb(cpu_lock.test());
10 assert_kdb(current() == this);
11 assert_kdb(regs()->cs() & 3 == 3);
19 : "r" (static_cast<Return_frame*>(regs())), "d"(arg)
26 Thread::invoke_arch(L4_msg_tag & /*tag*/, Utcb * /*utcb*/)
33 Thread::user_sp() const
34 { return exception_triggered()?_exc_cont.sp(regs()):regs()->sp(); }
38 Thread::user_sp(Mword sp)
40 if (exception_triggered())
41 _exc_cont.sp(regs(), sp);
48 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
50 if (!exception_triggered())
52 _exc_cont.activate(r, ret_handler);
55 // else ignore change of IP because triggered exception already pending
61 Thread::restore_exc_state()
63 _exc_cont.restore(regs());
68 Thread::trap_state_to_rf(Trap_state *ts)
70 char *im = reinterpret_cast<char*>(ts + 1);
71 return reinterpret_cast<Return_frame*>(im)-1;
74 PRIVATE static inline NEEDS[Thread::trap_is_privileged,
75 Thread::trap_state_to_rf]
76 bool FIASCO_WARN_RESULT
77 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
80 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
81 Mword s = tag.words();
82 Unsigned32 cs = ts->cs();
83 Utcb *snd_utcb = snd->utcb().access();
85 if (EXPECT_FALSE(rcv->exception_triggered()))
87 // triggered exception pending
88 Mem::memcpy_mwords (ts, snd_utcb->values, s > 19 ? 19 : s);
89 if (EXPECT_TRUE(s > 22))
91 Continuation::User_return_frame const *s
92 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[19]);
94 rcv->_exc_cont.set(trap_state_to_rf(ts), s);
98 Mem::memcpy_mwords (ts, snd_utcb->values, s > 23 ? 23 : s);
100 if (tag.transfer_fpu() && (rights & L4_fpage::W))
101 snd->transfer_fpu(rcv);
104 // XXX: ia32 in here!
105 if (!rcv->trap_is_privileged(0))
106 ts->flags((ts->flags() & ~(EFLAGS_IOPL | EFLAGS_NT)) | EFLAGS_IF);
108 // don't allow to overwrite the code selector!
111 bool ret = transfer_msg_items(tag, snd, snd_utcb,
112 rcv, rcv->utcb().access(), rights);
114 rcv->state_del(Thread_in_exception);
118 PRIVATE static inline NEEDS[Thread::trap_state_to_rf]
119 bool FIASCO_WARN_RESULT
120 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
121 unsigned char rights)
123 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
124 Utcb *rcv_utcb = rcv->utcb().access();
126 Lock_guard <Cpu_lock> guard (&cpu_lock);
127 if (EXPECT_FALSE(snd->exception_triggered()))
129 Mem::memcpy_mwords (rcv_utcb->values, ts, 19);
130 Continuation::User_return_frame *d
131 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[19]);
133 snd->_exc_cont.get(d, trap_state_to_rf(ts));
136 Mem::memcpy_mwords (rcv_utcb->values, ts, 23);
138 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
139 snd->transfer_fpu(rcv);
147 Thread::user_invoke()
149 user_invoke_generic();
152 (" mov %%rax,%%rsp \n" // set stack pointer to regs structure
155 " xor %%rax,%%rax \n"
156 " xor %%rcx,%%rcx \n" // clean out user regs
157 " xor %%rdx,%%rdx \n"
158 " xor %%rsi,%%rsi \n"
159 " xor %%rdi,%%rdi \n"
160 " xor %%rbx,%%rbx \n"
161 " xor %%rbp,%%rbp \n"
164 " xor %%r10,%%r10 \n"
165 " xor %%r11,%%r11 \n"
166 " xor %%r12,%%r12 \n"
167 " xor %%r13,%%r13 \n"
168 " xor %%r14,%%r14 \n"
169 " xor %%r15,%%r15 \n"
173 : "a" (nonull_static_cast<Return_frame*>(current()->regs())),
174 "c" (Gdt::gdt_data_user | Gdt::Selector_user)
177 // never returns here
182 Thread::check_trap13_kernel (Trap_state * /*ts*/)
186 //----------------------------------------------------------------------------
187 IMPLEMENTATION [amd64 & (debug | kdb)]:
189 #include "kernel_task.h"
191 /** Call the nested trap handler (either Jdb::enter_kdebugger() or the
192 * gdb stub. Setup our own stack frame */
195 Thread::call_nested_trap_handler(Trap_state *ts)
199 unsigned long phys_cpu = Cpu::phys_id_direct();
200 unsigned log_cpu = Cpu::p2l(phys_cpu);
203 printf("Trap on unknown CPU phys_id=%lx\n", phys_cpu);
207 unsigned long &ntr = nested_trap_recover.cpu(log_cpu);
210 printf("%s: lcpu%u sp=%p t=%u nested_trap_recover=%ld\n",
211 __func__, log_cpu, (void*)Proc::stack_pointer(), ts->_trapno,
218 stack = dbg_stack.cpu(log_cpu).stack_top;
220 Unsigned64 dummy1, dummy2, dummy3;
222 // don't set %esp if gdb fault recovery to ensure that exceptions inside
223 // kdb/jdb don't overwrite the stack
225 ("mov %%rsp,%[d2] \n\t" // save old stack pointer
226 "cmpq $0,%[recover] \n\t"
227 "jne 1f \n\t" // check trap within trap handler
228 "mov %[stack],%%rsp \n\t" // setup clean stack pointer
230 "incq %[recover] \n\t"
231 "mov %%cr3, %[d1] \n\t"
232 "push %[d2] \n\t" // save old stack pointer on new stack
233 "push %[d1] \n\t" // save old pdbr
234 "mov %[pdbr], %%cr3 \n\t"
235 "callq *%[handler] \n\t"
237 "mov %[d1], %%cr3 \n\t"
238 "pop %%rsp \n\t" // restore old stack pointer
239 "cmpq $0,%[recover] \n\t" // check trap within trap handler
241 "decq %[recover] \n\t"
243 : [ret] "=a"(ret), [d2] "=&r"(dummy2), [d1] "=&r"(dummy1), "=D"(dummy3),
246 [pdbr] "r" (Kernel_task::kernel_task()->mem_space()->virt_to_phys((Address)Kmem::dir())),
249 [handler] "m" (nested_trap_handler)
250 : "rdx", "rcx", "r8", "r9", "memory");
252 return ret == 0 ? 0 : -1;