1 IMPLEMENTATION[ia32 || ux]:
3 PUBLIC template<typename T> inline
5 Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
7 assert_kdb(cpu_lock.test());
8 assert_kdb(current() == this);
9 assert_kdb(Config::Is_ux || (regs()->cs() & 3 == 3));
13 regs()->flags(EFLAGS_IF);
18 : "r" (static_cast<Return_frame*>(regs())), "a" (arg)
25 Thread::user_sp() const
26 { return regs()->sp(); }
30 Thread::user_sp(Mword sp)
35 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
37 if (!exception_triggered())
39 _exc_cont.activate(r, ret_handler);
42 // else ignore change of IP because triggered exception already pending
49 Thread::restore_exc_state()
51 assert (cpu_lock.test());
52 _exc_cont.restore(regs());
56 r->cs (exception_cs() & ~1);
58 r->cs (exception_cs());
61 r->flags (_exc_flags);
68 Thread::trap_state_to_rf(Trap_state *ts)
70 char *im = reinterpret_cast<char*>(ts + 1);
71 return reinterpret_cast<Return_frame*>(im)-1;
74 PRIVATE static inline NEEDS[Thread::trap_is_privileged,
75 Thread::trap_state_to_rf]
76 bool FIASCO_WARN_RESULT
77 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
80 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
81 Mword s = tag.words();
82 Unsigned32 cs = ts->cs();
83 Utcb *snd_utcb = snd->utcb().access();
85 // XXX: check that gs and fs point to valid user_entry only, for gdt and
87 if (EXPECT_FALSE(rcv->exception_triggered()))
89 // triggered exception pending
90 Mem::memcpy_mwords(&ts->_gs, snd_utcb->values, s > 12 ? 12 : s);
91 if (EXPECT_TRUE(s > 15))
93 Continuation::User_return_frame const *s
94 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[12]);
96 rcv->_exc_cont.set(trap_state_to_rf(ts), s);
100 Mem::memcpy_mwords (&ts->_gs, snd_utcb->values, s > 16 ? 16 : s);
103 rcv->_gs = rcv->_fs = 0;
105 if (tag.transfer_fpu() && (rights & L4_fpage::W))
106 snd->transfer_fpu(rcv);
109 if (!rcv->trap_is_privileged(0))
110 ts->flags((ts->flags() & ~(EFLAGS_IOPL | EFLAGS_NT)) | EFLAGS_IF);
112 // don't allow to overwrite the code selector!
115 bool ret = transfer_msg_items(tag, snd, snd_utcb,
116 rcv, rcv->utcb().access(), rights);
118 rcv->state_del(Thread_in_exception);
122 PRIVATE static inline
123 bool FIASCO_WARN_RESULT
124 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
125 unsigned char rights)
127 Utcb *rcv_utcb = rcv->utcb().access();
128 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
129 Mword r = Utcb::Max_words;
132 Lock_guard <Cpu_lock> guard (&cpu_lock);
133 if (EXPECT_FALSE(snd->exception_triggered()))
135 Mem::memcpy_mwords (rcv_utcb->values, &ts->_gs, r > 12 ? 12 : r);
136 Continuation::User_return_frame *d
137 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[12]);
139 snd->_exc_cont.get(d, trap_state_to_rf(ts));
142 Mem::memcpy_mwords (rcv_utcb->values, &ts->_gs, r > 16 ? 16 : r);
144 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
145 snd->transfer_fpu(rcv);
151 //----------------------------------------------------------------------------
152 IMPLEMENTATION [ia32 && !ux]:
154 IMPLEMENT inline NEEDS[Thread::exception_triggered]
156 Thread::user_ip(Mword ip)
158 if (exception_triggered())
167 Thread::check_trap13_kernel(Trap_state *ts)
169 if (EXPECT_FALSE(ts->_trapno == 13 && (ts->_err & 3) == 0))
171 // First check if user loaded a segment register with 0 because the
172 // resulting exception #13 can be raised from user _and_ kernel. If
173 // the user tried to load another segment selector, the thread gets
175 // XXX Should we emulate this too? Michael Hohmuth: Yes, we should.
176 if (EXPECT_FALSE(!(ts->_ds & 0xffff)))
178 Cpu::set_ds(Gdt::data_segment());
181 if (EXPECT_FALSE(!(ts->_es & 0xffff)))
183 Cpu::set_es(Gdt::data_segment());
186 if (EXPECT_FALSE(!(ts->_fs & 0xffff)))
188 ts->_fs = Utcb_init::utcb_segment();
191 if (EXPECT_FALSE(!(ts->_gs & 0xffff)))
193 ts->_gs = Utcb_init::utcb_segment();
196 if (EXPECT_FALSE(ts->_ds & 0xfff8) == Gdt::gdt_code_user)
198 WARN("%p eip=%08lx: code selector ds=%04lx",
199 this, ts->ip(), ts->_ds & 0xffff);
200 Cpu::set_ds(Gdt::data_segment());
203 if (EXPECT_FALSE(ts->_es & 0xfff8) == Gdt::gdt_code_user)
205 WARN("%p eip=%08lx: code selector es=%04lx",
206 this, ts->ip(), ts->_es & 0xffff);
207 Cpu::set_es(Gdt::data_segment());
210 if (EXPECT_FALSE(ts->_fs & 0xfff8) == Gdt::gdt_code_user)
212 WARN("%p eip=%08lx: code selector fs=%04lx",
213 this, ts->ip(), ts->_fs & 0xffff);
214 ts->_fs = Utcb_init::utcb_segment();
217 if (EXPECT_FALSE(ts->_gs & 0xfff8) == Gdt::gdt_code_user)
219 WARN("%p eip=%08lx: code selector gs=%04lx",
220 this, ts->ip(), ts->_gs & 0xffff);
221 ts->_gs = Utcb_init::utcb_segment();
232 Thread::user_invoke()
234 user_invoke_generic();
237 (" movl %%eax,%%esp \n" // set stack pointer to regs structure
238 " movl %%ecx,%%es \n"
239 " movl %%ecx,%%ds \n"
240 " xorl %%eax,%%eax \n" // clean out user regs
241 " xorl %%ecx,%%ecx \n"
242 " xorl %%edx,%%edx \n"
243 " xorl %%esi,%%esi \n"
244 " xorl %%edi,%%edi \n"
245 " xorl %%ebx,%%ebx \n"
246 " xorl %%ebp,%%ebp \n"
249 : "a" (nonull_static_cast<Return_frame*>(current()->regs())),
250 "c" (Gdt::gdt_data_user | Gdt::Selector_user)
253 // never returns here
256 //---------------------------------------------------------------------------
257 IMPLEMENTATION [ia32]:
260 KIP_KERNEL_FEATURE("segments");
264 Thread::invoke_arch(L4_msg_tag &tag, Utcb *utcb)
266 switch (utcb->values[0] & Opcode_mask)
270 // if no words given then return the first gdt entry
271 if (EXPECT_FALSE(tag.words() == 1))
273 utcb->values[0] = Gdt::gdt_user_entry1 >> 3;
274 tag = Kobject_iface::commit_result(0, 1);
279 unsigned entry_number = utcb->values[1];
282 for (; entry_number < Gdt_user_entries
284 ; idx += 2, ++entry_number)
286 Gdt_entry *d = (Gdt_entry *)&utcb->values[idx];
288 _gdt_user_entries[entry_number] = *d;
291 if (this == current_thread())
292 switch_gdt_user_entries(this);
294 tag = Kobject_iface::commit_result((utcb->values[1] << 3) + Gdt::gdt_user_entry1 + 3);
303 //---------------------------------------------------------------------------
304 IMPLEMENTATION [ia32 & (debug | kdb)]:
307 #include "kernel_task.h"
309 /** Call the nested trap handler (either Jdb::enter_kdebugger() or the
310 * gdb stub. Setup our own stack frame */
313 Thread::call_nested_trap_handler(Trap_state *ts)
315 unsigned long phys_cpu = Cpu::phys_id_direct();
316 unsigned log_cpu = Cpu::p2l(phys_cpu);
319 printf("Trap on unknown CPU phys_id=%lx\n", phys_cpu);
323 unsigned long &ntr = nested_trap_recover.cpu(log_cpu);
326 printf("%s: lcpu%u sp=%p t=%lu nested_trap_recover=%ld\n",
327 __func__, log_cpu, (void*)Proc::stack_pointer(), ts->_trapno, ntr);
332 unsigned dummy1, dummy2, dummy3;
337 FIASCO_FASTCALL int (*handler)(Trap_state*, unsigned);
342 p.stack = dbg_stack.cpu(log_cpu).stack_top;
346 p.pdir = Kernel_task::kernel_task()->mem_space()->virt_to_phys((Address)Kmem::dir());
347 p.handler = nested_trap_handler;
350 // don't set %esp if gdb fault recovery to ensure that exceptions inside
351 // kdb/jdb don't overwrite the stack
353 ("mov %%esp,%[d2] \n\t"
354 "cmpl $0,(%[ntr]) \n\t"
356 "mov 8(%[p]),%%esp \n\t"
359 "mov %%cr3, %[d1] \n\t"
363 "mov (%[p]), %[d1] \n\t"
364 "mov %[d1], %%cr3 \n\t"
367 "mov %[d1], %%cr3 \n\t"
370 "cmpl $0,(%[ntr]) \n\t"
385 return ret == 0 ? 0 : -1;