8 static void init_per_cpu(Cpu_number cpu, bool resume);
13 // ------------------------------------------------------------------------
20 #include "kmem_space.h"
22 #include "static_assert.h"
23 #include "thread_state.h"
27 FSR_STATUS_MASK = 0x0d,
30 FSR_PERMISSION = 0x0d,
33 DEFINE_PER_CPU Per_cpu<Thread::Dbg_stack> Thread::dbg_stack;
37 Thread::print_page_fault_error(Mword e)
39 char const *const excpts[] =
40 { "reset","undef. insn", "swi", "pref. abort", "data abort",
41 "XXX", "XXX", "XXX" };
43 unsigned ex = (e >> 20) & 0x07;
45 printf("(%lx) %s, %s(%c)",e & 0xff, excpts[ex],
46 (e & 0x00010000)?"user":"kernel",
47 (e & 0x00020000)?'r':'w');
52 Thread::fast_return_to_user(Mword ip, Mword sp, Vcpu_state *arg)
55 Entry_frame *r = regs();
56 assert(r->check_valid_user_psr());
59 r->sp(sp); // user-sp is in lazy user state and thus handled by
64 r->psr &= ~Proc::Status_thumb;
66 // extended vCPU runs the host code in ARM system mode
67 if (Proc::Is_hyp && (state() & Thread_ext_vcpu_enabled))
68 r->psr_set_mode(Proc::PSR_m_svc);
71 register Vcpu_state *r0 asm("r0") = arg;
77 : "r" (nonull_static_cast<Return_frame*>(r)), "r" (__iret), "r"(r0)
80 panic("__builtin_trap()");
83 IMPLEMENT_DEFAULT inline
85 Thread::init_per_cpu(Cpu_number, bool)
97 user_invoke_generic();
98 assert (current()->state() & Thread_ready);
100 Trap_state *ts = nonull_static_cast<Trap_state*>
101 (nonull_static_cast<Return_frame*>(current()->regs()));
103 assert (((Mword)ts & 7) == 4); // Return_frame has 5 words
105 static_assert(sizeof(ts->r[0]) == sizeof(Mword), "Size mismatch");
106 Mem::memset_mwords(&ts->r[0], 0, sizeof(ts->r) / sizeof(ts->r[0]));
108 if (current()->space()->is_sigma0())
109 ts->r[0] = Kmem_space::kdir()->virt_to_phys((Address)Kip::k());
111 ts->psr |= Proc::Status_always_mask;
113 extern char __return_from_user_invoke;
116 (" mov sp, %[stack_p] \n" // set stack pointer to regs structure
121 [rfe] "r" (&__return_from_user_invoke)
124 panic("should never be reached");
127 current()->state_del(Thread_ready);
128 current()->schedule();
131 // never returns here
134 IMPLEMENT inline NEEDS["space.h", "types.h", "config.h"]
135 bool Thread::handle_sigma0_page_fault(Address pfa)
138 ->v_insert(Mem_space::Phys_addr((pfa & Config::SUPERPAGE_MASK)),
139 Virt_addr(pfa & Config::SUPERPAGE_MASK),
140 Virt_order(Config::SUPERPAGE_SHIFT) /*mem_space()->largest_page_size()*/,
141 Mem_space::Attr(L4_fpage::Rights::URWX()))
142 != Mem_space::Insert_err_nomem;
148 Thread::check_for_kernel_mem_access_pf(Trap_state *ts, Thread *t)
150 if (EXPECT_FALSE(t->is_kernel_mem_op_hit_and_clear()))
152 Mword pc = t->exception_triggered() ? t->_exc_cont.ip() : ts->pc;
154 pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4;
156 if (t->exception_triggered())
171 * The low-level page fault handler called from entry.S. We're invoked with
172 * interrupts turned off. Apart from turning on interrupts in almost
173 * all cases (except for kernel page faults in TCB area), just forwards
174 * the call to Thread::handle_page_fault().
175 * @param pfa page-fault virtual address
176 * @param error_code CPU error code
177 * @return true if page fault could be resolved, false otherwise
179 Mword pagefault_entry(const Mword pfa, Mword error_code,
180 const Mword pc, Return_frame *ret_frame)
182 if (EXPECT_FALSE(PF::is_alignment_error(error_code)))
184 printf("KERNEL%d: alignment error at %08lx (PC: %08lx, SP: %08lx, FSR: %lx, PSR: %lx)\n",
185 cxx::int_value<Cpu_number>(current_cpu()), pfa, pc,
186 ret_frame->usp, error_code, ret_frame->psr);
190 if (EXPECT_FALSE(Thread::is_debug_exception(error_code, true)))
193 Thread *t = current_thread();
195 // cache operations we carry out for user space might cause PFs, we just
197 if (EXPECT_FALSE(!PF::is_usermode_error(error_code))
198 && EXPECT_FALSE(t->is_ignore_mem_op_in_progress()))
200 t->set_kernel_mem_op_hit();
205 // Pagefault in user mode
206 if (PF::is_usermode_error(error_code))
208 // PFs in the kern_lib_page are always write PFs due to rollbacks and
210 if (EXPECT_FALSE((pc & Kmem::Kern_lib_base) == Kmem::Kern_lib_base))
211 error_code |= (1UL << 6);
213 // TODO: Avoid calling Thread::map_fsr_user here everytime!
214 if (t->vcpu_pagefault(pfa, Thread::map_fsr_user(error_code, true), pc))
216 t->state_del(Thread_cancel);
219 if (EXPECT_TRUE(PF::is_usermode_error(error_code))
220 || !(ret_frame->psr & Proc::Status_preempt_disabled)
221 || !Kmem::is_kmem_page_fault(pfa, error_code))
224 return t->handle_page_fault(pfa, error_code, pc, ret_frame);
227 void slowtrap_entry(Trap_state *ts)
229 ts->error_code = Thread::map_fsr_user(ts->error_code, false);
232 printf("Trap: pfa=%08lx pc=%08lx err=%08lx psr=%lx\n", ts->pf_address, ts->pc, ts->error_code, ts->psr);
233 Thread *t = current_thread();
237 if (Config::Support_arm_linux_cache_API)
239 if ( ts->hsr().ec() == 0x11
240 && ts->r[7] == 0xf0002)
243 Mem_op::arm_mem_cache_maint(Mem_op::Op_cache_coherent,
244 (void *)ts->r[0], (void *)ts->r[1]);
250 if (t->check_and_handle_coproc_faults(ts))
253 if (Thread::is_debug_exception(ts->error_code))
255 Thread::handle_debug_exception(ts);
259 // send exception IPC if requested
260 if (t->send_exception(ts))
267 PUBLIC static inline NEEDS[Thread::call_nested_trap_handler]
269 Thread::handle_debug_exception(Trap_state *ts)
271 call_nested_trap_handler(ts);
276 Thread::pagein_tcb_request(Return_frame *regs)
278 //if ((*(Mword*)regs->pc & 0xfff00fff ) == 0xe5900000)
279 if (*(Mword*)regs->pc == 0xe59ee000)
281 // printf("TCBR: %08lx\n", *(Mword*)regs->pc);
282 // skip faulting instruction
284 // tell program that a pagefault occured we cannot handle
285 regs->psr |= 0x40000000; // set zero flag in psr
293 //---------------------------------------------------------------------------
294 IMPLEMENTATION [arm && !arm_lpae]:
298 Thread::is_debug_exception(Mword error_code, bool just_native_type = false)
300 if (just_native_type)
301 return (error_code & 0x4f) == 2;
303 // LPAE type as already converted
304 return (error_code & 0xc000003f) == 0x80000022;
307 //---------------------------------------------------------------------------
308 IMPLEMENTATION [arm && arm_lpae]:
312 Thread::is_debug_exception(Mword error_code, bool just_native_type = false)
314 if (just_native_type)
315 return ((error_code >> 26) & 0x3f) == 0x22;
316 return (error_code & 0xc000003f) == 0x80000022;
319 //---------------------------------------------------------------------------
320 IMPLEMENTATION [arm]:
322 #include "trap_state.h"
326 @param space the address space
327 @param id user-visible thread ID of the sender
328 @param init_prio initial priority
329 @param mcp thread's maximum controlled priority
334 : Sender(0), // select optimized version of constructor
335 _pager(Thread_ptr::Invalid),
336 _exc_handler(Thread_ptr::Invalid),
339 assert (state(false) == 0);
342 _space.space(Kernel_task::kernel_task());
344 if (Config::Stack_depth)
345 std::memset((char*)this + sizeof(Thread), '5',
346 Thread::Size-sizeof(Thread)-64);
348 // set a magic value -- we use it later to verify the stack hasn't
353 _in_exception = false;
355 *reinterpret_cast<void(**)()> (--_kernel_sp) = user_invoke;
357 // clear out user regs that can be returned from the thread_ex_regs
358 // system call to prevent covert channel
359 Entry_frame *r = regs();
362 r->psr = Proc::Status_mode_user;
363 //r->psr = 0x1f; //Proc::Status_mode_user;
365 state_add_dirty(Thread_dead, false);
367 // ok, we're ready to go!
372 Thread::user_sp() const
373 { return regs()->sp(); }
377 Thread::user_sp(Mword sp)
378 { return regs()->sp(sp); }
381 IMPLEMENT inline NEEDS[Thread::exception_triggered]
383 Thread::user_ip() const
384 { return exception_triggered() ? _exc_cont.ip() : regs()->ip(); }
388 Thread::user_flags() const
391 IMPLEMENT inline NEEDS[Thread::exception_triggered]
393 Thread::user_ip(Mword ip)
395 if (exception_triggered())
399 Entry_frame *r = regs();
405 PUBLIC inline NEEDS ["trap_state.h"]
407 Thread::send_exception_arch(Trap_state *)
409 // nothing to tweak on ARM
415 Thread::save_fpu_state_to_utcb(Trap_state *ts, Utcb *u)
417 char *esu = (char *)&u->values[21];
418 Fpu::save_user_exception_state(state() & Thread_fpu_owner, fpu_state(),
419 ts, (Fpu::Exception_state_user *)esu);
424 Thread::invalid_ipc_buffer(void const *a)
427 return Mem_layout::in_kernel(((Address)a & Config::SUPERPAGE_MASK)
428 + Config::SUPERPAGE_SIZE - 1);
435 Thread::do_trigger_exception(Entry_frame *r, void *ret_handler)
437 if (!_exc_cont.valid(r))
439 _exc_cont.activate(r, ret_handler);
446 PRIVATE static inline NEEDS[Thread::get_ts_tpidruro]
447 bool FIASCO_WARN_RESULT
448 Thread::copy_utcb_to_ts(L4_msg_tag tag, Thread *snd, Thread *rcv,
449 L4_fpage::Rights rights)
451 // if the message is too short just skip the whole copy in
452 if (EXPECT_FALSE(tag.words() < (sizeof(Trap_state) / sizeof(Mword))))
455 Trap_state *ts = (Trap_state*)rcv->_utcb_handler;
456 Utcb *snd_utcb = snd->utcb().access();
458 if (EXPECT_FALSE(rcv->exception_triggered()))
460 // triggered exception pending
461 Mem::memcpy_mwords (ts, snd_utcb->values, 16);
462 Return_frame rf = *reinterpret_cast<Return_frame const *>((char const *)&snd_utcb->values[16]);
463 rcv->sanitize_user_state(&rf);
464 rcv->_exc_cont.set(ts, &rf);
467 rcv->copy_and_sanitize_trap_state(
468 ts, reinterpret_cast<Trap_state const *>(snd_utcb->values));
470 if (tag.transfer_fpu() && (rights & L4_fpage::Rights::W()))
471 snd->transfer_fpu(rcv);
473 if ((tag.flags() & 0x8000) && (rights & L4_fpage::Rights::W()))
474 rcv->utcb().access()->user[2] = snd_utcb->values[25];
476 rcv->get_ts_tpidruro(ts);
478 bool ret = transfer_msg_items(tag, snd, snd_utcb,
479 rcv, rcv->utcb().access(), rights);
481 rcv->state_del(Thread_in_exception);
486 PRIVATE static inline NEEDS[Thread::save_fpu_state_to_utcb,
487 Thread::set_ts_tpidruro]
488 bool FIASCO_WARN_RESULT
489 Thread::copy_ts_to_utcb(L4_msg_tag, Thread *snd, Thread *rcv,
490 L4_fpage::Rights rights)
492 Trap_state *ts = (Trap_state*)snd->_utcb_handler;
495 auto guard = lock_guard(cpu_lock);
496 Utcb *rcv_utcb = rcv->utcb().access();
498 snd->set_ts_tpidruro(ts);
500 Mem::memcpy_mwords(rcv_utcb->values, ts, 16);
501 Continuation::User_return_frame *d
502 = reinterpret_cast<Continuation::User_return_frame *>((char*)&rcv_utcb->values[16]);
504 snd->_exc_cont.get(d, ts);
507 if (EXPECT_TRUE(!snd->exception_triggered()))
509 rcv_utcb->values[19] = ts->pc;
510 rcv_utcb->values[20] = ts->psr;
513 if (rcv_utcb->inherit_fpu() && (rights & L4_fpage::Rights::W()))
515 snd->save_fpu_state_to_utcb(ts, rcv_utcb);
516 snd->transfer_fpu(rcv);
522 PROTECTED inline NEEDS[Thread::set_tpidruro]
524 Thread::invoke_arch(L4_msg_tag tag, Utcb *utcb)
526 switch (utcb->values[0] & Opcode_mask)
528 case Op_set_tpidruro_arm:
529 return set_tpidruro(tag, utcb);
531 return commit_result(-L4_err::ENosys);
537 Thread::sys_control_arch(Utcb *)
544 Thread::condition_valid(unsigned char cond, Unsigned32 psr)
546 // Matrix of instruction conditions and PSR flags,
547 // index into the table is the condition from insn
568 return (v[cond] >> (psr >> 28)) & 1;
571 // ------------------------------------------------------------------------
572 IMPLEMENTATION [arm && (arm_em_tz || hyp)]:
574 IMPLEMENT_OVERRIDE inline
576 Thread::arch_ext_vcpu_enabled()
579 // ------------------------------------------------------------------------
580 IMPLEMENTATION [arm && !arm_lpae]:
584 Thread::map_fsr_user(Mword fsr, bool is_only_pf)
586 static Unsigned16 const pf_map[32] =
589 /* 0x1 */ 0x21, /* Alignment */
590 /* 0x2 */ 0x22, /* Debug */
591 /* 0x3 */ 0x08, /* Access flag (1st level) */
592 /* 0x4 */ 0x2000, /* Insn cache maint */
593 /* 0x5 */ 0x04, /* Transl (1st level) */
594 /* 0x6 */ 0x09, /* Access flag (2nd level) */
595 /* 0x7 */ 0x05, /* Transl (2nd level) */
596 /* 0x8 */ 0x10, /* Sync ext abort */
597 /* 0x9 */ 0x3c, /* Domain (1st level) */
599 /* 0xb */ 0x3d, /* Domain (2nd level) */
600 /* 0xc */ 0x14, /* Sync ext abt on PT walk (1st level) */
601 /* 0xd */ 0x0c, /* Perm (1st level) */
602 /* 0xe */ 0x15, /* Sync ext abt on PT walk (2nd level) */
603 /* 0xf */ 0x0d, /* Perm (2nd level) */
604 /* 0x10 */ 0x30, /* TLB conflict abort */
608 /* 0x14 */ 0x34, /* Lockdown (impl-def) */
610 /* 0x16 */ 0x11, /* Async ext abort */
612 /* 0x18 */ 0x19, /* Async par err on mem access */
613 /* 0x19 */ 0x18, /* Sync par err on mem access */
614 /* 0x1a */ 0x3a, /* Copro abort (impl-def) */
616 /* 0x1c */ 0x14, /* Sync par err on PT walk (1st level) */
618 /* 0x1e */ 0x15, /* Sync par err on PT walk (2nd level) */
622 if (is_only_pf || (fsr & 0xc0000000) == 0x80000000)
623 return pf_map[((fsr >> 10) & 1) | (fsr & 0xf)] | (fsr & ~0x43f);
628 // ------------------------------------------------------------------------
629 IMPLEMENTATION [arm && arm_lpae]:
633 Thread::map_fsr_user(Mword fsr, bool)
636 // ------------------------------------------------------------------------
637 IMPLEMENTATION [arm && armv6plus]:
641 Thread::vcpu_resume_user_arch()
643 // just an experiment for now, we cannot really take the
644 // user-writable register because user-land might already use it
645 asm volatile("mcr p15, 0, %0, c13, c0, 2"
646 : : "r" (utcb().access(true)->values[25]) : "memory");
651 Thread::set_tpidruro(L4_msg_tag tag, Utcb *utcb)
653 if (EXPECT_FALSE(tag.words() < 2))
654 return commit_result(-L4_err::EInval);
656 _tpidruro = utcb->values[1];
657 if (EXPECT_FALSE(state() & Thread_vcpu_enabled))
658 arch_update_vcpu_state(vcpu_state().access());
660 if (this == current_thread())
663 return commit_result(0);
668 Thread::get_ts_tpidruro(Trap_state *ts)
670 _tpidruro = ts->tpidruro;
671 if (this == current_thread())
677 Thread::set_ts_tpidruro(Trap_state *ts)
679 ts->tpidruro = _tpidruro;
682 // ------------------------------------------------------------------------
683 IMPLEMENTATION [arm && !armv6plus]:
687 Thread::vcpu_resume_user_arch()
692 Thread::set_tpidruro(L4_msg_tag, Utcb *)
694 return commit_result(-L4_err::EInval);
699 Thread::get_ts_tpidruro(Trap_state *)
704 Thread::set_ts_tpidruro(Trap_state *)
707 //-----------------------------------------------------------------------------
713 EXTENSION class Thread
716 static void kern_kdebug_ipi_entry() asm("kern_kdebug_ipi_entry");
719 class Thread_remote_rq_irq : public Irq_base
722 // we assume IPIs to be top level, no upstream IRQ chips
723 void handle(Upstream_irq const *)
724 { Thread::handle_remote_requests_irq(); }
726 Thread_remote_rq_irq()
728 set_hit(&handler_wrapper<Thread_remote_rq_irq>);
732 void switch_mode(bool) {}
735 class Thread_glbl_remote_rq_irq : public Irq_base
738 // we assume IPIs to be top level, no upstream IRQ chips
739 void handle(Upstream_irq const *)
740 { Thread::handle_global_remote_requests_irq(); }
742 Thread_glbl_remote_rq_irq()
744 set_hit(&handler_wrapper<Thread_glbl_remote_rq_irq>);
748 void switch_mode(bool) {}
751 class Thread_debug_ipi : public Irq_base
754 // we assume IPIs to be top level, no upstream IRQ chips
755 void handle(Upstream_irq const *)
757 Ipi::eoi(Ipi::Debug, current_cpu());
758 Thread::kern_kdebug_ipi_entry();
763 set_hit(&handler_wrapper<Thread_debug_ipi>);
767 void switch_mode(bool) {}
770 class Thread_timer_tick_ipi : public Irq_base
773 void handle(Upstream_irq const *ui)
775 //Timer_tick *self = nonull_static_cast<Timer_tick *>(_s);
779 current_thread()->handle_timer_interrupt();
782 Thread_timer_tick_ipi()
783 { set_hit(&handler_wrapper<Thread_timer_tick_ipi>); }
785 void switch_mode(bool) {}
789 //-----------------------------------------------------------------------------
790 IMPLEMENTATION [mp && !irregular_gic]:
797 check(Irq_mgr::mgr->alloc(&remote_rq_ipi, Ipi::Request));
798 check(Irq_mgr::mgr->alloc(&glbl_remote_rq_ipi, Ipi::Global_request));
799 check(Irq_mgr::mgr->alloc(&debug_ipi, Ipi::Debug));
800 check(Irq_mgr::mgr->alloc(&timer_ipi, Ipi::Timer));
803 Thread_remote_rq_irq remote_rq_ipi;
804 Thread_glbl_remote_rq_irq glbl_remote_rq_ipi;
805 Thread_debug_ipi debug_ipi;
806 Thread_timer_tick_ipi timer_ipi;
809 static Arm_ipis _arm_ipis;
811 //-----------------------------------------------------------------------------
812 IMPLEMENTATION [arm && !fpu]:
816 Thread::check_and_handle_coproc_faults(Trap_state *)
822 //-----------------------------------------------------------------------------
823 IMPLEMENTATION [arm && fpu]:
827 Thread::check_and_handle_coproc_faults(Trap_state *ts)
829 if (!ts->exception_is_undef_insn())
834 if (ts->psr & Proc::Status_thumb)
836 Unsigned16 v = Thread::peek_user((Unsigned16 *)(ts->pc - 2), this);
838 if (EXPECT_FALSE(Thread::check_for_kernel_mem_access_pf(ts, this)))
841 if ((v >> 11) <= 0x1c)
844 opcode = (v << 16) | Thread::peek_user((Unsigned16 *)ts->pc, this);
847 opcode = Thread::peek_user((Unsigned32 *)(ts->pc - 4), this);
849 if (EXPECT_FALSE(Thread::check_for_kernel_mem_access_pf(ts, this)))
852 if (ts->psr & Proc::Status_thumb)
854 if ( (opcode & 0xef000000) == 0xef000000 // A6.3.18
855 || (opcode & 0xff100000) == 0xf9000000)
856 return Thread::handle_fpu_trap(opcode, ts);
860 if ( (opcode & 0xfe000000) == 0xf2000000 // A5.7.1
861 || (opcode & 0xff100000) == 0xf4000000)
862 return Thread::handle_fpu_trap(opcode, ts);
865 if ((opcode & 0x0c000e00) == 0x0c000a00)
866 return Thread::handle_fpu_trap(opcode, ts);
873 Thread::handle_fpu_trap(Unsigned32 opcode, Trap_state *ts)
875 if (!condition_valid(opcode >> 28, ts->psr))
877 // FPU insns are 32bit, even for thumb
878 if (ts->psr & Proc::Status_thumb)
883 if (Fpu::is_enabled())
885 assert(Fpu::fpu.current().owner() == current());
886 if (Fpu::is_emu_insn(opcode))
887 return Fpu::emulate_insns(opcode, ts);
889 ts->hsr().ec() = 0; // tag fpu undef insn
891 else if (current_thread()->switchin_fpu())
893 if (Fpu::is_emu_insn(opcode))
894 return Fpu::emulate_insns(opcode, ts);
895 ts->pc -= (ts->psr & Proc::Status_thumb) ? 2 : 4;
900 ts->hsr().ec() = 0x07;
901 ts->hsr().cond() = opcode >> 28;
903 ts->hsr().cpt_cpnr() = 10;
910 //-----------------------------------------------------------------------------
911 IMPLEMENTATION [arm && !hyp]:
913 PUBLIC static inline template<typename T>
914 T Thread::peek_user(T const *adr, Context *c)
917 c->set_ignore_mem_op_in_progress(true);
919 c->set_ignore_mem_op_in_progress(false);