12 // ------------------------------------------------------------------------
19 #include "kmem_space.h"
21 #include "static_assert.h"
22 #include "thread_state.h"
24 #include "vmem_alloc.h"
27 FSR_STATUS_MASK = 0x0d,
30 FSR_PERMISSION = 0x0d,
33 Per_cpu<Thread::Dbg_stack> DEFINE_PER_CPU Thread::dbg_stack;
37 Thread::print_page_fault_error(Mword e)
39 char const *const excpts[] =
40 { "reset","undef. insn", "swi", "pref. abort", "data abort",
41 "XXX", "XXX", "XXX" };
43 unsigned ex = (e >> 20) & 0x07;
45 printf("(%lx) %s, %s(%c)",e & 0xff, excpts[ex],
46 (e & 0x00010000)?"user":"kernel",
47 (e & 0x00020000)?'r':'w');
52 Thread::fast_return_to_user(Mword ip, Mword sp)
56 regs()->sp(sp); // user-sp is in lazy user state and thus handled by
60 regs()->psr &= ~Proc::Status_thumb;
66 : "r" (nonull_static_cast<Return_frame*>(regs())), "r" (__iret)
68 panic("__builtin_trap()");
79 user_invoke_generic();
80 assert (current()->state() & Thread_ready);
82 Trap_state *ts = nonull_static_cast<Trap_state*>
83 (nonull_static_cast<Return_frame*>(current()->regs()));
85 static_assert(sizeof(ts->r[0]), sizeof(Mword));
86 Mem::memset_mwords(&ts->r[0], 0, sizeof(ts->r) / sizeof(ts->r[0]));
88 if (current()->space() == sigma0_task)
89 ts->r[0] = Kmem_space::kdir()->walk(Kip::k(), 0, false, 0).phys(Kip::k());
91 extern char __return_from_exception;
94 (" mov sp, %[stack_p] \n" // set stack pointer to regs structure
99 [rfe] "r" (&__return_from_exception)
102 panic("should never be reached");
105 current()->state_del(Thread_ready);
106 current()->schedule();
109 // never returns here
112 IMPLEMENT inline NEEDS["space.h", <cstdio>, "types.h" ,"config.h"]
113 bool Thread::handle_sigma0_page_fault( Address pfa )
115 return (mem_space()->v_insert(
116 Mem_space::Phys_addr::create((pfa & Config::SUPERPAGE_MASK)),
117 Mem_space::Addr::create(pfa & Config::SUPERPAGE_MASK),
118 Mem_space::Size(Config::SUPERPAGE_SIZE),
119 Mem_space::Page_writable | Mem_space::Page_user_accessible
120 | Mem_space::Page_cacheable)
121 != Mem_space::Insert_err_nomem);
128 * The low-level page fault handler called from entry.S. We're invoked with
129 * interrupts turned off. Apart from turning on interrupts in almost
130 * all cases (except for kernel page faults in TCB area), just forwards
131 * the call to Thread::handle_page_fault().
132 * @param pfa page-fault virtual address
133 * @param error_code CPU error code
134 * @return true if page fault could be resolved, false otherwise
136 Mword pagefault_entry(const Mword pfa, Mword error_code,
137 const Mword pc, Return_frame *ret_frame)
139 #if 0 // Double PF detect
140 static unsigned long last_pfa = ~0UL;
141 LOG_MSG_3VAL(current(),"PF", pfa, last_pfa, pc);
146 if (EXPECT_FALSE(PF::is_alignment_error(error_code)))
148 printf("KERNEL%d: alignment error at %08lx (PC: %08lx, SP: %08lx, FSR: %lx, PSR: %lx)\n",
149 current_cpu(), pfa, pc, ret_frame->usp, error_code, ret_frame->psr);
153 Thread *t = current_thread();
155 // Pagefault in user mode
156 if (PF::is_usermode_error(error_code))
158 if (t->vcpu_pagefault(pfa, error_code, pc))
160 t->state_del(Thread_cancel);
163 // or interrupts were enabled
164 else if (!(ret_frame->psr & Proc::Status_IRQ_disabled))
167 // Pagefault in kernel mode and interrupts were disabled
170 // page fault in kernel memory region, not present, but mapping exists
171 if (Kmem::is_kmem_page_fault (pfa, error_code))
173 // We've interrupted a context in the kernel with disabled interrupts,
174 // the page fault address is in the kernel region, the error code is
175 // "not mapped" (as opposed to "access error"), and the region is
176 // actually valid (that is, mapped in Kmem's shared page directory,
177 // just not in the currently active page directory)
180 else if (!Config::conservative &&
181 !Kmem::is_kmem_page_fault (pfa, error_code))
183 // No error -- just enable interrupts.
188 // Error: We interrupted a cli'd kernel context touching kernel space
189 if (!Thread::log_page_fault())
190 printf("*P[%lx,%lx,%lx] ", pfa, error_code, pc);
192 kdb_ke ("page fault in cli mode");
197 // cache operations we carry out for user space might cause PFs, we just
199 if (EXPECT_FALSE(t->is_ignore_mem_op_in_progress()))
205 // PFs in the kern_lib_page are always write PFs due to rollbacks and
207 if (EXPECT_FALSE((pc & Kmem::Kern_lib_base) == Kmem::Kern_lib_base))
208 error_code |= (1UL << 11);
210 return t->handle_page_fault(pfa, error_code, pc, ret_frame);
213 void slowtrap_entry(Trap_state *ts)
215 Thread *t = current_thread();
219 if (Config::Support_arm_linux_cache_API)
221 if ( ts->error_code == 0x00200000
222 && ts->r[7] == 0xf0002)
225 Mem_op::arm_mem_cache_maint(Mem_op::Op_cache_coherent,
226 (void *)ts->r[0], (void *)ts->r[1]);
231 if (ts->exception_is_undef_insn())
233 switch (Fpu::handle_fpu_trap(ts))
235 case Fpu::Fpu_except_emulated: return;
236 case Fpu::Fpu_except_fault:
238 if (!(current_thread()->state() & Thread_vcpu_enabled)
239 && Fpu::is_enabled() && Fpu::owner(t->cpu()) == t)
240 printf("KERNEL: FPU doesn't like us?\n");
243 if (t->switchin_fpu())
245 ts->pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4;
249 ts->error_code |= 0x01000000; // tag fpu undef insn
250 if (Fpu::exc_pending())
251 ts->error_code |= 0x02000000; // fpinst and fpinst2 in utcb will be valid
253 case Fpu::Fpu_except_none: break;
257 // send exception IPC if requested
258 if (t->send_exception(ts))
261 // exception handling failed
262 if (Config::conservative)
263 kdb_ke ("thread killed");
273 Thread::pagein_tcb_request(Return_frame *regs)
275 //if ((*(Mword*)regs->pc & 0xfff00fff ) == 0xe5900000)
276 if (*(Mword*)regs->pc == 0xe59ee000)
278 // printf("TCBR: %08lx\n", *(Mword*)regs->pc);
279 // skip faulting instruction
281 // tell program that a pagefault occured we cannot handle
282 regs->psr |= 0x40000000; // set zero flag in psr
290 //---------------------------------------------------------------------------
291 IMPLEMENTATION [arm]:
293 #include "trap_state.h"
297 @param space the address space
298 @param id user-visible thread ID of the sender
299 @param init_prio initial priority
300 @param mcp thread's maximum controlled priority
301 @post state() != Thread_invalid
305 : Receiver(&_thread_lock),
306 Sender(0), // select optimized version of constructor
307 _pager(Thread_ptr::Invalid),
308 _exc_handler(Thread_ptr::Invalid),
311 assert (state() == Thread_invalid);
315 if (Config::stack_depth)
316 std::memset((char*)this + sizeof(Thread), '5',
317 Config::thread_block_size-sizeof(Thread)-64);
319 // set a magic value -- we use it later to verify the stack hasn't
324 _in_exception = false;
326 *reinterpret_cast<void(**)()> (--_kernel_sp) = user_invoke;
328 // clear out user regs that can be returned from the thread_ex_regs
329 // system call to prevent covert channel
330 Entry_frame *r = regs();
333 r->psr = Proc::Status_mode_user;
335 state_add(Thread_dead | Thread_suspended);
337 // ok, we're ready to go!
342 Thread::user_sp() const
343 { return regs()->sp(); }
347 Thread::user_sp(Mword sp)
348 { return regs()->sp(sp); }
350 IMPLEMENT inline NEEDS[Thread::exception_triggered]
352 Thread::user_ip() const
353 { return exception_triggered() ? _exc_cont.ip() : regs()->ip(); }
357 Thread::user_flags() const
360 IMPLEMENT inline NEEDS[Thread::exception_triggered]
362 Thread::user_ip(Mword ip)
364 if (exception_triggered())
368 Entry_frame *r = regs();
370 r->psr = (r->psr & ~Proc::Status_mode_mask) | Proc::Status_mode_user;
375 PUBLIC inline NEEDS ["trap_state.h"]
377 Thread::send_exception_arch(Trap_state *)
379 // nothing to tweak on ARM
383 PRIVATE static inline
385 Thread::save_fpu_state_to_utcb(Trap_state *ts, Utcb *u)
387 char *esu = (char *)&u->values[21];
388 Fpu::save_user_exception_state(ts, (Fpu::Exception_state_user *)esu);
393 Thread::invalid_ipc_buffer(void const *a)
396 return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK)
397 + Config::SUPERPAGE_SIZE - 1);
404 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
406 if (!_exc_cont.valid())
408 _exc_cont.activate(r, ret_handler);
415 PRIVATE static inline
416 bool FIASCO_WARN_RESULT
417 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
418 unsigned char rights)
420 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
421 Utcb *snd_utcb = snd->access_utcb();
422 Mword s = tag.words();
424 if (EXPECT_FALSE(rcv->exception_triggered()))
426 // triggered exception pending
427 Mem::memcpy_mwords (ts, snd_utcb->values, s > 15 ? 15 : s);
428 if (EXPECT_TRUE(s > 19))
430 // sanitize processor mode
432 snd_utcb->values[19] &= ~Proc::Status_mode_mask; // clear mode
433 snd_utcb->values[19] |= Proc::Status_mode_supervisor
434 | Proc::Status_interrupts_disabled;
436 Continuation::User_return_frame const *s
437 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[15]);
439 rcv->_exc_cont.set(ts, s);
444 Mem::memcpy_mwords (ts, snd_utcb->values, s > 18 ? 18 : s);
445 if (EXPECT_TRUE(s > 18))
446 ts->pc = snd_utcb->values[18];
447 if (EXPECT_TRUE(s > 19))
449 // sanitize processor mode
450 Mword p = snd_utcb->values[19];
451 p &= ~(Proc::Status_mode_mask | Proc::Status_interrupts_mask); // clear mode & irqs
452 p |= Proc::Status_mode_user;
457 if (tag.transfer_fpu() && (rights & L4_fpage::W))
458 snd->transfer_fpu(rcv);
460 if ((tag.flags() & 0x8000) && (rights & L4_fpage::W))
461 rcv->access_utcb()->user[2] = snd_utcb->values[25];
463 bool ret = transfer_msg_items(tag, snd, snd_utcb,
464 rcv, rcv->access_utcb(), rights);
466 rcv->state_del(Thread_in_exception);
471 PRIVATE static inline NEEDS[Thread::save_fpu_state_to_utcb]
472 bool FIASCO_WARN_RESULT
473 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
474 unsigned char rights)
476 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
479 Lock_guard <Cpu_lock> guard (&cpu_lock);
480 Utcb *rcv_utcb = rcv->access_utcb();
482 Mem::memcpy_mwords (rcv_utcb->values, ts, 15);
483 Continuation::User_return_frame *d
484 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[15]);
486 snd->_exc_cont.get(d, ts);
489 if (EXPECT_TRUE(!snd->exception_triggered()))
491 rcv_utcb->values[18] = ts->pc;
492 rcv_utcb->values[19] = ts->psr;
495 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
496 snd->transfer_fpu(rcv);
498 save_fpu_state_to_utcb(ts, rcv_utcb);
505 Thread::invoke_arch(L4_msg_tag & /*tag*/, Utcb * /*utcb*/)
512 Thread::sys_control_arch(Utcb *)
517 // ------------------------------------------------------------------------
518 IMPLEMENTATION [arm && armv6plus]:
522 Thread::vcpu_resume_user_arch()
524 // just an experiment for now, we cannot really take the
525 // user-writable register because user-land might already use it
526 asm volatile("mcr p15, 0, %0, c13, c0, 2"
527 : : "r" (access_utcb()->values[25]) : "memory");
530 // ------------------------------------------------------------------------
531 IMPLEMENTATION [arm && !armv6plus]:
535 Thread::vcpu_resume_user_arch()
539 //-----------------------------------------------------------------------------
544 EXTENSION class Thread
547 static void kern_kdebug_ipi_entry() asm("kern_kdebug_ipi_entry");
550 PUBLIC static inline NEEDS["ipi.h"]
552 Thread::check_for_ipi(unsigned irq)
554 if (Ipi::is_ipi(irq))
559 Thread::handle_remote_requests_irq();
562 Ipi::eoi(Ipi::Debug);
563 kern_kdebug_ipi_entry();
565 case Ipi::Global_request:
566 handle_global_remote_requests_irq();
575 //-----------------------------------------------------------------------------
576 IMPLEMENTATION [!mp]:
580 Thread::check_for_ipi(unsigned)