12 // ------------------------------------------------------------------------
20 #include "kmem_space.h"
21 #include "static_assert.h"
22 #include "thread_state.h"
24 #include "vmem_alloc.h"
27 FSR_STATUS_MASK = 0x0d,
30 FSR_PERMISSION = 0x0d,
33 Per_cpu<Thread::Dbg_stack> DEFINE_PER_CPU Thread::dbg_stack;
37 Thread::print_page_fault_error(Mword e)
39 char const *const excpts[] =
40 { "reset","undef. insn", "swi", "pref. abort", "data abort",
41 "XXX", "XXX", "XXX" };
43 unsigned ex = (e >> 20) & 0x07;
45 printf("(%lx) %s, %s(%c)",e & 0xff, excpts[ex],
46 (e & 0x00010000)?"user":"kernel",
47 (e & 0x00020000)?'r':'w');
52 Thread::fast_return_to_user(Mword ip, Mword sp, bool do_fill_user_state = true)
56 if (do_fill_user_state)
58 regs()->sp(sp); // user-sp is in lazy user state and thus handled by
63 regs()->psr &= ~Proc::Status_thumb;
69 : "r" (nonull_static_cast<Return_frame*>(regs())), "r" (__iret)
71 panic("__builtin_trap()");
82 user_invoke_generic();
83 assert (current()->state() & Thread_ready);
85 Trap_state *ts = nonull_static_cast<Trap_state*>
86 (nonull_static_cast<Return_frame*>(current()->regs()));
88 static_assert(sizeof(ts->r[0]), sizeof(Mword));
89 Mem::memset_mwords(&ts->r[0], 0, sizeof(ts->r) / sizeof(ts->r[0]));
91 if (current()->space() == sigma0_task)
92 ts->r[0] = Kmem_space::kdir()->walk(Kip::k(), 0, false, 0).phys(Kip::k());
94 extern char __return_from_exception;
97 (" mov sp, %[stack_p] \n" // set stack pointer to regs structure
102 [rfe] "r" (&__return_from_exception)
105 panic("should never be reached");
108 current()->state_del(Thread_ready);
109 current()->schedule();
112 // never returns here
115 IMPLEMENT inline NEEDS["space.h", <cstdio>, "types.h" ,"config.h"]
116 bool Thread::handle_sigma0_page_fault( Address pfa )
118 return (mem_space()->v_insert(
119 Mem_space::Phys_addr::create((pfa & Config::SUPERPAGE_MASK)),
120 Mem_space::Addr::create(pfa & Config::SUPERPAGE_MASK),
121 Mem_space::Size(Config::SUPERPAGE_SIZE),
122 Mem_space::Page_writable | Mem_space::Page_user_accessible
123 | Mem_space::Page_cacheable)
124 != Mem_space::Insert_err_nomem);
131 * The low-level page fault handler called from entry.S. We're invoked with
132 * interrupts turned off. Apart from turning on interrupts in almost
133 * all cases (except for kernel page faults in TCB area), just forwards
134 * the call to Thread::handle_page_fault().
135 * @param pfa page-fault virtual address
136 * @param error_code CPU error code
137 * @return true if page fault could be resolved, false otherwise
139 Mword pagefault_entry(const Mword pfa, Mword error_code,
140 const Mword pc, Return_frame *ret_frame)
142 #if 0 // Double PF detect
143 static unsigned long last_pfa = ~0UL;
144 LOG_MSG_3VAL(current(),"PF", pfa, last_pfa, pc);
149 if (EXPECT_FALSE(PF::is_alignment_error(error_code)))
151 printf("KERNEL%d: alignment error at %08lx (PC: %08lx, SP: %08lx, FSR: %lx)\n",
152 current_cpu(), pfa, pc, ret_frame->usp, error_code);
156 Thread *t = current_thread();
158 // Pagefault in user mode
159 if (PF::is_usermode_error(error_code))
161 if (t->vcpu_pagefault(pfa, error_code, pc))
163 t->state_del(Thread_cancel);
166 // or interrupts were enabled
167 else if (!(ret_frame->psr & Proc::Status_IRQ_disabled))
170 // Pagefault in kernel mode and interrupts were disabled
173 // page fault in kernel memory region, not present, but mapping exists
174 if (Kmem::is_kmem_page_fault (pfa, error_code))
176 // We've interrupted a context in the kernel with disabled interrupts,
177 // the page fault address is in the kernel region, the error code is
178 // "not mapped" (as opposed to "access error"), and the region is
179 // actually valid (that is, mapped in Kmem's shared page directory,
180 // just not in the currently active page directory)
183 else if (!Config::conservative &&
184 !Kmem::is_kmem_page_fault (pfa, error_code))
186 // No error -- just enable interrupts.
191 // Error: We interrupted a cli'd kernel context touching kernel space
192 if (!Thread::log_page_fault())
193 printf("*P[%lx,%lx,%lx] ", pfa, error_code, pc);
195 kdb_ke ("page fault in cli mode");
200 // cache operations we carry out for user space might cause PFs, we just
202 if (EXPECT_FALSE(t->is_cache_op_in_progress()))
208 // PFs in the kern_lib_page are always write PFs due to rollbacks and
210 if (EXPECT_FALSE((pc & Kmem::Kern_lib_base) == Kmem::Kern_lib_base))
211 error_code |= (1UL << 11);
213 return t->handle_page_fault(pfa, error_code, pc, ret_frame);
216 void slowtrap_entry(Trap_state *ts)
218 Thread *t = current_thread();
222 if (Config::Support_arm_linux_cache_API)
224 if ( ts->error_code == 0x00200000
225 && ts->r[7] == 0xf0002)
228 Cache_op::arm_cache_maint(Cache_op::Op_coherent,
229 (void *)ts->r[0], (void *)ts->r[1]);
234 if (ts->exception_is_undef_insn())
236 switch (Fpu::handle_fpu_trap(ts))
238 case Fpu::Fpu_except_emulated: return;
239 case Fpu::Fpu_except_fault:
241 if (!(current_thread()->state() & Thread_vcpu_enabled)
242 && Fpu::is_enabled() && Fpu::owner(t->cpu()) == t)
243 printf("KERNEL: FPU doesn't like us?\n");
246 if (t->switchin_fpu())
248 ts->pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4;
252 ts->error_code |= 0x01000000; // tag fpu undef insn
253 if (Fpu::exc_pending())
254 ts->error_code |= 0x02000000; // fpinst and fpinst2 in utcb will be valid
256 case Fpu::Fpu_except_none: break;
260 // send exception IPC if requested
261 if (t->send_exception(ts))
264 // exception handling failed
265 if (Config::conservative)
266 kdb_ke ("thread killed");
276 Thread::pagein_tcb_request(Return_frame *regs)
278 //if ((*(Mword*)regs->pc & 0xfff00fff ) == 0xe5900000)
279 if (*(Mword*)regs->pc == 0xe59ee000)
281 // printf("TCBR: %08lx\n", *(Mword*)regs->pc);
282 // skip faulting instruction
284 // tell program that a pagefault occured we cannot handle
285 regs->psr |= 0x40000000; // set zero flag in psr
293 //---------------------------------------------------------------------------
294 IMPLEMENTATION [arm]:
296 #include "trap_state.h"
300 @param space the address space
301 @param id user-visible thread ID of the sender
302 @param init_prio initial priority
303 @param mcp thread's maximum controlled priority
304 @post state() != Thread_invalid
308 : Receiver(&_thread_lock),
309 Sender(0), // select optimized version of constructor
310 _pager(Thread_ptr::Invalid),
311 _exc_handler(Thread_ptr::Invalid),
314 assert (state() == Thread_invalid);
318 if (Config::stack_depth)
319 std::memset((char*)this + sizeof(Thread), '5',
320 Config::thread_block_size-sizeof(Thread)-64);
322 // set a magic value -- we use it later to verify the stack hasn't
327 _in_exception = false;
329 *reinterpret_cast<void(**)()> (--_kernel_sp) = user_invoke;
331 // clear out user regs that can be returned from the thread_ex_regs
332 // system call to prevent covert channel
333 Entry_frame *r = regs();
336 r->psr = Proc::Status_mode_user;
338 state_add(Thread_dead | Thread_suspended);
340 // ok, we're ready to go!
345 Thread::user_sp() const
346 { return regs()->sp(); }
350 Thread::user_sp(Mword sp)
351 { return regs()->sp(sp); }
353 IMPLEMENT inline NEEDS[Thread::exception_triggered]
355 Thread::user_ip() const
356 { return exception_triggered() ? _exc_cont.ip() : regs()->ip(); }
360 Thread::user_flags() const
363 IMPLEMENT inline NEEDS[Thread::exception_triggered]
365 Thread::user_ip(Mword ip)
367 if (exception_triggered())
371 Entry_frame *r = regs();
373 r->psr = (r->psr & ~Proc::Status_mode_mask) | Proc::Status_mode_user;
378 PUBLIC inline NEEDS ["trap_state.h"]
380 Thread::send_exception_arch(Trap_state *)
382 // nothing to tweak on ARM
386 PRIVATE static inline
388 Thread::save_fpu_state_to_utcb(Trap_state *ts, Utcb *u)
390 char *esu = (char *)&u->values[21];
391 Fpu::save_user_exception_state(ts, (Fpu::Exception_state_user *)esu);
396 Thread::invalid_ipc_buffer(void const *a)
399 return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK)
400 + Config::SUPERPAGE_SIZE - 1);
407 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
409 if (!_exc_cont.valid())
411 _exc_cont.activate(r, ret_handler);
418 PRIVATE static inline
419 bool FIASCO_WARN_RESULT
420 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
421 unsigned char rights)
423 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
424 Utcb *snd_utcb = snd->access_utcb();
425 Mword s = tag.words();
427 if (EXPECT_FALSE(rcv->exception_triggered()))
429 // triggered exception pending
430 Mem::memcpy_mwords (ts, snd_utcb->values, s > 15 ? 15 : s);
431 if (EXPECT_TRUE(s > 19))
433 // sanitize processor mode
435 snd_utcb->values[19] &= ~Proc::Status_mode_mask; // clear mode
436 snd_utcb->values[19] |= Proc::Status_mode_supervisor
437 | Proc::Status_interrupts_disabled;
439 Continuation::User_return_frame const *s
440 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[15]);
442 rcv->_exc_cont.set(ts, s);
447 Mem::memcpy_mwords (ts, snd_utcb->values, s > 18 ? 18 : s);
448 if (EXPECT_TRUE(s > 18))
449 ts->pc = snd_utcb->values[18];
450 if (EXPECT_TRUE(s > 19))
452 // sanitize processor mode
453 Mword p = snd_utcb->values[19];
454 p &= ~(Proc::Status_mode_mask | Proc::Status_interrupts_mask); // clear mode & irqs
455 p |= Proc::Status_mode_user;
460 if (tag.transfer_fpu() && (rights & L4_fpage::W))
461 snd->transfer_fpu(rcv);
463 if ((tag.flags() & 0x8000) && (rights & L4_fpage::W))
464 rcv->access_utcb()->user[2] = snd_utcb->values[25];
466 bool ret = transfer_msg_items(tag, snd, snd_utcb,
467 rcv, rcv->access_utcb(), rights);
469 rcv->state_del(Thread_in_exception);
474 PRIVATE static inline NEEDS[Thread::access_utcb, Thread::save_fpu_state_to_utcb]
475 bool FIASCO_WARN_RESULT
476 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
477 unsigned char rights)
479 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
482 Lock_guard <Cpu_lock> guard (&cpu_lock);
483 Utcb *rcv_utcb = rcv->access_utcb();
485 Mem::memcpy_mwords (rcv_utcb->values, ts, 15);
486 Continuation::User_return_frame *d
487 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[15]);
489 snd->_exc_cont.get(d, ts);
492 if (EXPECT_TRUE(!snd->exception_triggered()))
494 rcv_utcb->values[18] = ts->pc;
495 rcv_utcb->values[19] = ts->psr;
498 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
499 snd->transfer_fpu(rcv);
501 save_fpu_state_to_utcb(ts, rcv_utcb);
508 Thread::invoke_arch(L4_msg_tag & /*tag*/, Utcb * /*utcb*/)
515 Thread::sys_control_arch(Utcb *)
520 // ------------------------------------------------------------------------
521 IMPLEMENTATION [arm && armv6plus]:
525 Thread::vcpu_resume_user_arch()
527 // just an experiment for now, we cannot really take the
528 // user-writable register because user-land might already use it
529 asm volatile("mcr p15, 0, %0, c13, c0, 2"
530 : : "r" (access_utcb()->values[25]) : "memory");
533 // ------------------------------------------------------------------------
534 IMPLEMENTATION [arm && !armv6plus]:
538 Thread::vcpu_resume_user_arch()
542 //-----------------------------------------------------------------------------
543 IMPLEMENTATION [arm && vcache]:
547 Thread::access_utcb() const
549 // Do not use the alias mapping of the UTCB for the current address space
550 return Mem_space::current_mem_space(current_cpu()) == mem_space()
556 //-----------------------------------------------------------------------------
557 IMPLEMENTATION [arm && !vcache]:
561 Thread::access_utcb() const
563 return current_cpu() == cpu() && Mem_space::current_mem_space(current_cpu()) == mem_space()
569 //-----------------------------------------------------------------------------
574 EXTENSION class Thread
577 static void kern_kdebug_ipi_entry() asm("kern_kdebug_ipi_entry");
580 PUBLIC static inline NEEDS["ipi.h"]
582 Thread::check_for_ipi(unsigned irq)
584 if (Ipi::is_ipi(irq))
589 Thread::handle_remote_requests_irq();
592 Ipi::eoi(Ipi::Debug);
593 kern_kdebug_ipi_entry();
595 case Ipi::Global_request:
596 handle_global_remote_requests_irq();
605 //-----------------------------------------------------------------------------
606 IMPLEMENTATION [!mp]:
610 Thread::check_for_ipi(unsigned)