8 static void init_per_cpu(unsigned cpu);
14 // ------------------------------------------------------------------------
21 #include "kmem_space.h"
23 #include "static_assert.h"
24 #include "thread_state.h"
28 FSR_STATUS_MASK = 0x0d,
31 FSR_PERMISSION = 0x0d,
34 DEFINE_PER_CPU Per_cpu<Thread::Dbg_stack> Thread::dbg_stack;
38 Thread::print_page_fault_error(Mword e)
40 char const *const excpts[] =
41 { "reset","undef. insn", "swi", "pref. abort", "data abort",
42 "XXX", "XXX", "XXX" };
44 unsigned ex = (e >> 20) & 0x07;
46 printf("(%lx) %s, %s(%c)",e & 0xff, excpts[ex],
47 (e & 0x00010000)?"user":"kernel",
48 (e & 0x00020000)?'r':'w');
53 Thread::fast_return_to_user(Mword ip, Mword sp, Vcpu_state *arg)
56 Entry_frame *r = regs();
57 assert_kdb((r->psr & Proc::Status_mode_mask) == Proc::Status_mode_user);
60 r->sp(sp); // user-sp is in lazy user state and thus handled by
66 r->psr &= ~Proc::Status_thumb;
69 register Vcpu_state *r0 asm("r0") = arg;
75 : "r" (nonull_static_cast<Return_frame*>(r)), "r" (__iret), "r"(r0)
78 panic("__builtin_trap()");
81 IMPLEMENT_DEFAULT inline
83 Thread::init_per_cpu(unsigned)
95 user_invoke_generic();
96 assert (current()->state() & Thread_ready);
98 Trap_state *ts = nonull_static_cast<Trap_state*>
99 (nonull_static_cast<Return_frame*>(current()->regs()));
101 static_assert(sizeof(ts->r[0]) == sizeof(Mword), "Size mismatch");
102 Mem::memset_mwords(&ts->r[0], 0, sizeof(ts->r) / sizeof(ts->r[0]));
104 if (current()->space()->is_sigma0())
105 ts->r[0] = Kmem_space::kdir()->walk(Kip::k(), 0, false, Ptab::Null_alloc(),
108 extern char __return_from_exception;
111 (" mov sp, %[stack_p] \n" // set stack pointer to regs structure
116 [rfe] "r" (&__return_from_exception)
119 panic("should never be reached");
122 current()->state_del(Thread_ready);
123 current()->schedule();
126 // never returns here
129 IMPLEMENT inline NEEDS["space.h", <cstdio>, "types.h" ,"config.h"]
130 bool Thread::handle_sigma0_page_fault( Address pfa )
132 return (mem_space()->v_insert(
133 Mem_space::Phys_addr::create((pfa & Config::SUPERPAGE_MASK)),
134 Mem_space::Addr::create(pfa & Config::SUPERPAGE_MASK),
135 Mem_space::Size(Config::SUPERPAGE_SIZE),
136 Mem_space::Page_writable | Mem_space::Page_user_accessible
137 | Mem_space::Page_cacheable)
138 != Mem_space::Insert_err_nomem);
143 Thread::no_copro_handler(Unsigned32, Trap_state *)
146 typedef bool (*Coproc_insn_handler)(Unsigned32 opcode, Trap_state *ts);
147 static Coproc_insn_handler handle_copro_fault[16] =
149 Thread::no_copro_handler,
150 Thread::no_copro_handler,
151 Thread::no_copro_handler,
152 Thread::no_copro_handler,
153 Thread::no_copro_handler,
154 Thread::no_copro_handler,
155 Thread::no_copro_handler,
156 Thread::no_copro_handler,
157 Thread::no_copro_handler,
158 Thread::no_copro_handler,
159 Thread::no_copro_handler,
160 Thread::no_copro_handler,
161 Thread::no_copro_handler,
162 Thread::no_copro_handler,
163 Thread::no_copro_handler,
164 Thread::no_copro_handler,
171 * The low-level page fault handler called from entry.S. We're invoked with
172 * interrupts turned off. Apart from turning on interrupts in almost
173 * all cases (except for kernel page faults in TCB area), just forwards
174 * the call to Thread::handle_page_fault().
175 * @param pfa page-fault virtual address
176 * @param error_code CPU error code
177 * @return true if page fault could be resolved, false otherwise
179 Mword pagefault_entry(const Mword pfa, Mword error_code,
180 const Mword pc, Return_frame *ret_frame)
182 #if 0 // Double PF detect
183 static unsigned long last_pfa = ~0UL;
184 LOG_MSG_3VAL(current(),"PF", pfa, error_code, pc);
185 if (last_pfa == pfa || pfa == 0)
189 if (EXPECT_FALSE(PF::is_alignment_error(error_code)))
191 printf("KERNEL%d: alignment error at %08lx (PC: %08lx, SP: %08lx, FSR: %lx, PSR: %lx)\n",
192 current_cpu(), pfa, pc, ret_frame->usp, error_code, ret_frame->psr);
196 Thread *t = current_thread();
198 // Pagefault in user mode
199 if (PF::is_usermode_error(error_code))
201 if (t->vcpu_pagefault(pfa, error_code, pc))
203 t->state_del(Thread_cancel);
206 // or interrupts were enabled
207 else if (!(ret_frame->psr & Proc::Status_IRQ_disabled))
210 // Pagefault in kernel mode and interrupts were disabled
213 // page fault in kernel memory region, not present, but mapping exists
214 if (Kmem::is_kmem_page_fault (pfa, error_code))
216 // We've interrupted a context in the kernel with disabled interrupts,
217 // the page fault address is in the kernel region, the error code is
218 // "not mapped" (as opposed to "access error"), and the region is
219 // actually valid (that is, mapped in Kmem's shared page directory,
220 // just not in the currently active page directory)
223 else if (!Kmem::is_kmem_page_fault (pfa, error_code))
225 // No error -- just enable interrupts.
230 // Error: We interrupted a cli'd kernel context touching kernel space
231 if (!Thread::log_page_fault())
232 printf("*P[%lx,%lx,%lx] ", pfa, error_code, pc);
234 kdb_ke ("page fault in cli mode");
239 // cache operations we carry out for user space might cause PFs, we just
241 if (EXPECT_FALSE(t->is_ignore_mem_op_in_progress()))
247 // PFs in the kern_lib_page are always write PFs due to rollbacks and
249 if (EXPECT_FALSE((pc & Kmem::Kern_lib_base) == Kmem::Kern_lib_base))
250 error_code |= (1UL << 11);
252 return t->handle_page_fault(pfa, error_code, pc, ret_frame);
255 void slowtrap_entry(Trap_state *ts)
258 printf("Trap: pfa=%08lx pc=%08lx err=%08lx psr=%lx\n", ts->pf_address, ts->pc, ts->error_code, ts->psr);
259 Thread *t = current_thread();
263 if (Config::Support_arm_linux_cache_API)
265 if ( ts->error_code == 0x00200000
266 && ts->r[7] == 0xf0002)
269 Mem_op::arm_mem_cache_maint(Mem_op::Op_cache_coherent,
270 (void *)ts->r[0], (void *)ts->r[1]);
276 if (ts->exception_is_undef_insn())
280 if (ts->psr & Proc::Status_thumb)
282 Unsigned16 v = *(Unsigned16 *)(ts->pc - 2);
283 if ((v >> 11) <= 0x1c)
286 opcode = (v << 16) | *(Unsigned16 *)ts->pc;
289 opcode = *(Unsigned32 *)(ts->pc - 4);
291 if (ts->psr & Proc::Status_thumb)
293 if ( (opcode & 0xef000000) == 0xef000000 // A6.3.18
294 || (opcode & 0xff100000) == 0xf9000000)
296 if (handle_copro_fault[10](opcode, ts))
303 if ( (opcode & 0xfe000000) == 0xf2000000 // A5.7.1
304 || (opcode & 0xff100000) == 0xf4000000)
306 if (handle_copro_fault[10](opcode, ts))
312 if ((opcode & 0x0c000000) == 0x0c000000)
314 unsigned copro = (opcode >> 8) & 0xf;
315 if (handle_copro_fault[copro](opcode, ts))
321 // send exception IPC if requested
322 if (t->send_exception(ts))
332 Thread::pagein_tcb_request(Return_frame *regs)
334 //if ((*(Mword*)regs->pc & 0xfff00fff ) == 0xe5900000)
335 if (*(Mword*)regs->pc == 0xe59ee000)
337 // printf("TCBR: %08lx\n", *(Mword*)regs->pc);
338 // skip faulting instruction
340 // tell program that a pagefault occured we cannot handle
341 regs->psr |= 0x40000000; // set zero flag in psr
349 //---------------------------------------------------------------------------
350 IMPLEMENTATION [arm]:
352 #include "trap_state.h"
356 @param space the address space
357 @param id user-visible thread ID of the sender
358 @param init_prio initial priority
359 @param mcp thread's maximum controlled priority
360 @post state() != Thread_invalid
364 : Sender(0), // select optimized version of constructor
365 _pager(Thread_ptr::Invalid),
366 _exc_handler(Thread_ptr::Invalid),
369 assert (state(false) == Thread_invalid);
372 _space.space(Kernel_task::kernel_task());
374 if (Config::Stack_depth)
375 std::memset((char*)this + sizeof(Thread), '5',
376 Thread::Size-sizeof(Thread)-64);
378 // set a magic value -- we use it later to verify the stack hasn't
383 _in_exception = false;
385 *reinterpret_cast<void(**)()> (--_kernel_sp) = user_invoke;
387 // clear out user regs that can be returned from the thread_ex_regs
388 // system call to prevent covert channel
389 Entry_frame *r = regs();
392 r->psr = Proc::Status_mode_user;
394 state_add_dirty(Thread_dead, false);
396 // ok, we're ready to go!
401 Thread::user_sp() const
402 { return regs()->sp(); }
406 Thread::user_sp(Mword sp)
407 { return regs()->sp(sp); }
409 IMPLEMENT inline NEEDS[Thread::exception_triggered]
411 Thread::user_ip() const
412 { return exception_triggered() ? _exc_cont.ip() : regs()->ip(); }
416 Thread::user_flags() const
419 IMPLEMENT inline NEEDS[Thread::exception_triggered]
421 Thread::user_ip(Mword ip)
423 if (exception_triggered())
427 Entry_frame *r = regs();
429 r->psr = (r->psr & ~Proc::Status_mode_mask) | Proc::Status_mode_user;
434 PUBLIC inline NEEDS ["trap_state.h"]
436 Thread::send_exception_arch(Trap_state *)
438 // nothing to tweak on ARM
442 PRIVATE static inline
444 Thread::save_fpu_state_to_utcb(Trap_state *ts, Utcb *u)
446 char *esu = (char *)&u->values[21];
447 Fpu::save_user_exception_state(ts, (Fpu::Exception_state_user *)esu);
452 Thread::invalid_ipc_buffer(void const *a)
455 return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK)
456 + Config::SUPERPAGE_SIZE - 1);
463 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
465 if (!_exc_cont.valid())
467 _exc_cont.activate(r, ret_handler);
474 PRIVATE static inline NEEDS[Thread::get_ts_tpidruro]
475 bool FIASCO_WARN_RESULT
476 Thread::copy_utcb_to_ts(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
477 unsigned char rights)
479 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
480 Utcb *snd_utcb = snd->utcb().access();
481 Mword s = tag.words();
483 if (EXPECT_FALSE(rcv->exception_triggered()))
485 // triggered exception pending
486 Mem::memcpy_mwords (ts, snd_utcb->values, s > 16 ? 16 : s);
487 if (EXPECT_TRUE(s > 20))
489 // sanitize processor mode
491 snd_utcb->values[20] &= ~Proc::Status_mode_mask; // clear mode
492 snd_utcb->values[20] |= Proc::Status_mode_supervisor
493 | Proc::Status_interrupts_disabled;
495 Continuation::User_return_frame const *s
496 = reinterpret_cast<Continuation::User_return_frame const *>((char*)&snd_utcb->values[16]);
498 rcv->_exc_cont.set(ts, s);
503 Mem::memcpy_mwords (ts, snd_utcb->values, s > 19 ? 19 : s);
504 if (EXPECT_TRUE(s > 19))
505 ts->pc = snd_utcb->values[19];
506 if (EXPECT_TRUE(s > 20))
508 // sanitize processor mode
509 Mword p = snd_utcb->values[20];
510 p &= ~(Proc::Status_mode_mask | Proc::Status_interrupts_mask); // clear mode & irqs
511 p |= Proc::Status_mode_user;
516 if (tag.transfer_fpu() && (rights & L4_fpage::W))
517 snd->transfer_fpu(rcv);
519 if ((tag.flags() & 0x8000) && (rights & L4_fpage::W))
520 rcv->utcb().access()->user[2] = snd_utcb->values[25];
522 rcv->get_ts_tpidruro(ts);
524 bool ret = transfer_msg_items(tag, snd, snd_utcb,
525 rcv, rcv->utcb().access(), rights);
527 rcv->state_del(Thread_in_exception);
532 PRIVATE static inline NEEDS[Thread::save_fpu_state_to_utcb,
533 Thread::set_ts_tpidruro]
534 bool FIASCO_WARN_RESULT
535 Thread::copy_ts_to_utcb(L4_msg_tag const &, Thread *snd, Thread *rcv,
536 unsigned char rights)
538 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
541 auto guard = lock_guard(cpu_lock);
542 Utcb *rcv_utcb = rcv->utcb().access();
544 snd->set_ts_tpidruro(ts);
546 Mem::memcpy_mwords(rcv_utcb->values, ts, 16);
547 Continuation::User_return_frame *d
548 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[16]);
550 snd->_exc_cont.get(d, ts);
553 if (EXPECT_TRUE(!snd->exception_triggered()))
555 rcv_utcb->values[19] = ts->pc;
556 rcv_utcb->values[20] = ts->psr;
559 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
560 snd->transfer_fpu(rcv);
562 save_fpu_state_to_utcb(ts, rcv_utcb);
567 PROTECTED inline NEEDS[Thread::set_tpidruro]
569 Thread::invoke_arch(L4_msg_tag tag, Utcb *utcb)
571 switch (utcb->values[0] & Opcode_mask)
573 case Op_set_tpidruro_arm:
574 return set_tpidruro(tag, utcb);
576 return commit_result(-L4_err::ENosys);
582 Thread::sys_control_arch(Utcb *)
589 Thread::condition_valid(Unsigned32 insn, Unsigned32 psr)
591 // Matrix of instruction conditions and PSR flags,
592 // index into the table is the condition from insn
613 return (v[insn >> 28] >> (psr >> 28)) & 1;
616 // ------------------------------------------------------------------------
617 IMPLEMENTATION [arm && armv6plus]:
621 Thread::vcpu_resume_user_arch()
623 // just an experiment for now, we cannot really take the
624 // user-writable register because user-land might already use it
625 asm volatile("mcr p15, 0, %0, c13, c0, 2"
626 : : "r" (utcb().access(true)->values[25]) : "memory");
631 Thread::set_tpidruro(L4_msg_tag tag, Utcb *utcb)
633 if (EXPECT_FALSE(tag.words() < 2))
634 return commit_result(-L4_err::EInval);
636 _tpidruro = utcb->values[1];
637 if (EXPECT_FALSE(state() & Thread_vcpu_enabled))
638 arch_update_vcpu_state(vcpu_state().access());
640 if (this == current_thread())
643 return commit_result(0);
648 Thread::get_ts_tpidruro(Trap_state *ts)
650 _tpidruro = ts->tpidruro;
655 Thread::set_ts_tpidruro(Trap_state *ts)
657 ts->tpidruro = _tpidruro;
660 // ------------------------------------------------------------------------
661 IMPLEMENTATION [arm && !armv6plus]:
665 Thread::vcpu_resume_user_arch()
670 Thread::set_tpidruro(L4_msg_tag, Utcb *)
672 return commit_result(-L4_err::EInval);
677 Thread::get_ts_tpidruro(Trap_state *)
682 Thread::set_ts_tpidruro(Trap_state *)
685 //-----------------------------------------------------------------------------
691 EXTENSION class Thread
694 static void kern_kdebug_ipi_entry() asm("kern_kdebug_ipi_entry");
697 class Thread_remote_rq_irq : public Irq_base
700 // we assume IPIs to be top level, no upstream IRQ chips
701 void handle(Upstream_irq const *)
702 { Thread::handle_remote_requests_irq(); }
704 Thread_remote_rq_irq()
705 { set_hit(&handler_wrapper<Thread_remote_rq_irq>); }
707 void switch_mode(unsigned) {}
710 class Thread_glbl_remote_rq_irq : public Irq_base
713 // we assume IPIs to be top level, no upstream IRQ chips
714 void handle(Upstream_irq const *)
715 { Thread::handle_global_remote_requests_irq(); }
717 Thread_glbl_remote_rq_irq()
718 { set_hit(&handler_wrapper<Thread_glbl_remote_rq_irq>); }
720 void switch_mode(unsigned) {}
723 class Thread_debug_ipi : public Irq_base
726 // we assume IPIs to be top level, no upstream IRQ chips
727 void handle(Upstream_irq const *)
729 Ipi::eoi(Ipi::Debug, current_cpu());
730 Thread::kern_kdebug_ipi_entry();
734 { set_hit(&handler_wrapper<Thread_debug_ipi>); }
736 void switch_mode(unsigned) {}
739 //-----------------------------------------------------------------------------
740 IMPLEMENTATION [mp && !irregular_gic]:
747 Irq_mgr::mgr->alloc(&remote_rq_ipi, Ipi::Request);
748 Irq_mgr::mgr->alloc(&glbl_remote_rq_ipi, Ipi::Global_request);
749 Irq_mgr::mgr->alloc(&debug_ipi, Ipi::Debug);
752 Thread_remote_rq_irq remote_rq_ipi;
753 Thread_glbl_remote_rq_irq glbl_remote_rq_ipi;
754 Thread_debug_ipi debug_ipi;
757 static Arm_ipis _arm_ipis;
760 //-----------------------------------------------------------------------------
761 IMPLEMENTATION [arm && fpu]:
765 Thread::handle_fpu_trap(Unsigned32 opcode, Trap_state *ts)
767 if (!condition_valid(opcode, ts->psr))
769 if (ts->psr & Proc::Status_thumb)
774 if (Fpu::is_enabled())
776 assert(Fpu::fpu.current().owner() == current());
777 if (Fpu::is_emu_insn(opcode))
778 return Fpu::emulate_insns(opcode, ts);
780 else if (current_thread()->switchin_fpu())
782 if (Fpu::is_emu_insn(opcode))
783 return Fpu::emulate_insns(opcode, ts);
784 ts->pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4;
788 ts->error_code |= 0x01000000; // tag fpu undef insn
789 if (Fpu::exc_pending())
790 ts->error_code |= 0x02000000; // fpinst and fpinst2 in utcb will be valid
797 Thread::init_fpu_trap_handling()
799 handle_copro_fault[10] = Thread::handle_fpu_trap;
800 handle_copro_fault[11] = Thread::handle_fpu_trap;
803 STATIC_INITIALIZEX(Thread, init_fpu_trap_handling);