3 #include <csetjmp> // typedef jmp_buf
7 #include "continuation.h"
8 #include "helping_lock.h"
9 #include "mem_layout.h"
10 #include "member_offs.h"
14 #include "space.h" // Space_index
15 #include "spin_lock.h"
16 #include "thread_lock.h"
22 typedef Context_ptr_base<Thread> Thread_ptr;
25 /** A thread. This class is the driver class for most kernel functionality.
33 FIASCO_DECLARE_KOBJ();
39 friend class Jdb_thread;
40 friend class Jdb_thread_list;
41 friend class Jdb_list_threads;
42 friend class Jdb_list_timeouts;
43 friend class Jdb_tbuf_show;
46 enum Context_mode_kernel { Kernel = 0 };
55 Op_register_del_irq = 5,
56 Op_modify_senders = 6,
62 Ctl_set_pager = 0x0010000,
63 Ctl_set_scheduler = 0x0020000,
64 Ctl_set_mcp = 0x0040000,
65 Ctl_set_prio = 0x0080000,
66 Ctl_set_quantum = 0x0100000,
67 Ctl_bind_task = 0x0200000,
68 Ctl_alien_thread = 0x0400000,
69 Ctl_ux_native = 0x0800000,
70 Ctl_set_exc_handler = 0x1000000,
71 Ctl_vcpu_enabled = 0x2000000,
77 Exr_trigger_exception = 0x20000,
90 enum { Stack_size = Config::PAGE_SIZE };
95 static Per_cpu<Dbg_stack> dbg_stack;
99 typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
104 * @param task the task the thread should reside in.
105 * @param id user-visible thread ID of the sender.
106 * @param init_prio initial priority.
107 * @param mcp maximum controlled priority.
109 * @post state() != Thread_invalid.
114 int handle_page_fault (Address pfa, Mword error, Mword pc,
121 struct Migration_helper_info
127 Thread(const Thread&); ///< Default copy constructor is undefined
128 void *operator new(size_t); ///< Default new operator undefined
130 bool handle_sigma0_page_fault (Address pfa);
135 * This function is the default routine run if a newly
136 * initialized context is being switch_exec()'ed.
138 static void user_invoke();
141 static bool pagein_tcb_request(Return_frame *regs);
143 inline Mword user_ip() const;
144 inline void user_ip(Mword);
146 inline Mword user_sp() const;
147 inline void user_sp(Mword);
149 inline Mword user_flags() const;
151 /** nesting level in debugger (always critical) if >1 */
152 static Per_cpu<unsigned long> nested_trap_recover;
153 static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
154 static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
156 // implementation details follow...
158 explicit Thread(Context_mode_kernel);
162 // Another critical TCB cache line:
163 Thread_lock _thread_lock;
167 Thread_ptr _exc_handler;
170 jmp_buf *_recover_jmpbuf; // setjmp buffer for page-fault recovery
174 Irq_base *_del_observer;
178 static const unsigned magic = 0xf001c001;
181 class Obj_cap : public L4_obj_ref
189 #include <cstdlib> // panic()
192 #include "entry_frame.h"
193 #include "fpu_alloc.h"
196 #include "kmem_alloc.h"
198 #include "map_util.h"
199 #include "ram_quota.h"
200 #include "sched_context.h"
202 #include "std_macros.h"
204 #include "thread_state.h"
208 FIASCO_DEFINE_KOBJ(Thread);
210 Per_cpu<unsigned long> DEFINE_PER_CPU Thread::nested_trap_recover;
214 Thread::Dbg_stack::Dbg_stack()
216 stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size);
218 stack_top = (char *)stack_top + Stack_size;
219 //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
223 PUBLIC inline NEEDS[Thread::thread_lock]
227 thread_lock()->lock();
233 Thread::operator new(size_t, Ram_quota *q) throw ()
235 void *t = Mapped_allocator::allocator()->q_unaligned_alloc(q, Config::thread_block_size);
238 memset(t, 0, sizeof(Thread));
239 reinterpret_cast<Thread*>(t)->_quota = q;
244 /** Class-specific allocator.
245 This allocator ensures that threads are allocated at a fixed virtual
246 address computed from their thread ID.
248 @return address of new thread control block
252 Thread::operator new(size_t, Thread *t) throw ()
254 // Allocate TCB in TCB space. Actually, do not allocate anything,
255 // just return the address. Allocation happens on the fly in
256 // Thread::handle_page_fault().
260 /** Deallocator. This function currently does nothing: We do not free up
261 space allocated to thread-control blocks.
265 Thread::operator delete(void *_t)
267 Thread * const t = static_cast<Thread*>(_t);
268 Ram_quota * const q = t->_quota;
269 Mapped_allocator::allocator()->q_unaligned_free(q, Config::thread_block_size, t);
271 LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
272 Log_destroy *l = tbe->payload<Log_destroy>();
276 l->ram = q->current());
280 PUBLIC inline NEEDS["space.h"]
282 Thread::bind(Space *t, void *_utcb)
284 // _utcb == 0 for all kernel threads
285 assert_kdb (!_utcb || t->is_utcb_valid(_utcb));
288 Lock_guard<Spin_lock> guard(&_space);
289 if (_space.get_unused())
292 _space.set_unused(t);
296 utcb(t->kernel_utcb(_utcb));
297 local_id(Address(_utcb));
304 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h", "space.h"]
311 Lock_guard<Spin_lock> guard(&_space);
313 if (!_space.get_unused())
316 old = _space.get_unused();
317 _space.set_unused(0);
319 Mem_space *oms = old->mem_space();
324 // switch to a safe page table
325 if (Mem_space::current_mem_space(current_cpu()) == oms)
326 Mem_space::kernel_space()->switchin_context(oms);
331 current()->rcu_wait();
338 /** Cut-down version of Thread constructor; only for kernel threads
339 Do only what's necessary to get a kernel thread started --
340 skip all fancy stuff, no locking is necessary.
341 @param task the address space
342 @param id user-visible thread ID of the sender
345 Thread::Thread(Context_mode_kernel)
346 : Receiver(&_thread_lock), Sender(), _del_observer(0), _magic(magic)
348 *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
352 if (Config::stack_depth)
353 std::memset((char*)this + sizeof(Thread), '5',
354 Config::thread_block_size-sizeof(Thread)-64);
358 /** Destructor. Reestablish the Context constructor's precondition.
359 @pre current() == thread_lock()->lock_owner()
360 && state() == Thread_dead
362 @post (_kernel_sp == 0) && (* (stack end) == 0) && !exists()
365 Thread::~Thread() // To be called in locked state.
368 unsigned long *init_sp = reinterpret_cast<unsigned long*>
369 (reinterpret_cast<unsigned long>(this) + size - sizeof(Entry_frame));
374 Fpu_alloc::free_state(fpu_state());
375 state_change(0, Thread_invalid);
380 Thread::destroy(Kobject ***rl)
382 Kobject::destroy(rl);
385 assert_kdb(state() == Thread_dead);
387 assert_kdb(_magic == magic);
392 // IPC-gate deletion stuff ------------------------------------
396 Thread::ipc_gate_deleted(Mword id)
399 Lock_guard<Cpu_lock> g(&cpu_lock);
401 _del_observer->hit();
404 class Del_irq_pin : public Irq_pin_dummy
409 Del_irq_pin::Del_irq_pin(Thread *o)
410 { payload()[0] = (Address)o; }
414 Del_irq_pin::thread() const
415 { return (Thread*)payload()[0]; }
419 Del_irq_pin::unbind_irq()
421 thread()->remove_delete_irq();
425 Del_irq_pin::~Del_irq_pin()
432 Thread::register_delete_irq(Irq_base *irq)
434 irq->pin()->unbind_irq();
435 irq->pin()->replace<Del_irq_pin>(this);
441 Thread::remove_delete_irq()
446 Irq_base *tmp = _del_observer;
448 tmp->pin()->unbind_irq();
451 // end of: IPC-gate deletion stuff -------------------------------
456 { return dec_ref() == 0; }
461 /** Lookup function: Find Thread instance that owns a given Context.
463 @return the thread that owns the context
467 Thread::lookup (Context* c)
469 return reinterpret_cast<Thread*>(c);
474 Thread::lookup (Context const * c)
476 return reinterpret_cast<Thread const *>(c);
479 /** Currently executing thread.
480 @return currently executing thread.
486 return Thread::lookup(current());
491 Thread::exception_triggered() const
492 { return _exc_cont.valid(); }
495 // state requests/manipulation
500 Overwrite Context's version of thread_lock() with a semantically
501 equivalent, but more efficient version.
502 @return lock used to synchronize accesses to the thread.
506 Thread::thread_lock()
508 return &_thread_lock;
512 PUBLIC inline NEEDS ["config.h", "timeout.h"]
514 Thread::handle_timer_interrupt()
516 unsigned _cpu = cpu(true);
517 // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
518 if (!Config::fine_grained_cputime)
519 consume_time(Config::scheduler_granularity);
521 bool resched = Rcu::do_pending_work(_cpu);
523 // Check if we need to reschedule due to timeouts or wakeups
524 if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
525 && !schedule_in_progress())
528 assert (timeslice_timeout.cpu(cpu(true))->is_set()); // Coma check
537 // Cancel must be cleared on all kernel entry paths. See slowtraps for
538 // why we delay doing it until here.
539 state_del (Thread_cancel);
541 // we haven't been re-initialized (cancel was not set) -- so sleep
542 if (state_change_safely (~Thread_ready, Thread_cancel | Thread_dead))
543 while (! (state() & Thread_ready))
549 Thread::halt_current ()
553 current_thread()->halt();
554 kdb_ke("Thread not halted");
558 PRIVATE static inline
560 Thread::user_invoke_generic()
562 Context *const c = current();
563 assert_kdb (c->state() & Thread_ready_mask);
565 if (c->handle_drq() && !c->schedule_in_progress())
568 // release CPU lock explicitly, because
569 // * the context that switched to us holds the CPU lock
570 // * we run on a newly-created stack without a CPU lock guard
576 Thread::leave_and_kill_myself()
578 current_thread()->do_kill();
580 WARN("dead thread scheduled: %lx\n", current_thread()->dbg_id());
582 kdb_ke("DEAD SCHED");
587 Thread::handle_kill_helper(Drq *src, Context *, void *)
589 delete nonull_static_cast<Thread*>(src->context());
590 return Drq::No_answer | Drq::Need_resched;
599 Lock_guard<Thread_lock> guard(thread_lock());
601 if (state() == Thread_invalid)
612 // But first prevent it from being woken up by asynchronous events
615 Lock_guard <Cpu_lock> guard (&cpu_lock);
617 // if IPC timeout active, reset it
621 // Switch to time-sharing mode
622 set_mode (Sched_mode (0));
624 // Switch to time-sharing scheduling context
625 if (sched() != sched_context())
626 switch_sched(sched_context());
628 if (current_sched()->context() == this)
629 set_current_sched(current()->sched());
632 // possibly dequeue from a wait queue
635 // if other threads want to send me IPC messages, abort these
638 Lock_guard <Cpu_lock> guard (&cpu_lock);
639 while (Sender *s = Sender::cast(sender_list()->head()))
641 s->ipc_receiver_aborted();
642 Proc::preemption_point();
646 // if engaged in IPC operation, stop it
648 sender_dequeue (receiver()->sender_list());
655 vcpu_set_user_space(0);
659 state_change_dirty (0, Thread_dead);
661 // dequeue from system queues
666 _del_observer->pin()->unbind_irq();
673 state_del_dirty(Thread_ready_mask);
675 WARN("woken up dead thread %lx\n", dbg_id());
681 state_del_dirty(Thread_ready_mask);
685 kernel_context_drq(handle_kill_helper, 0);
692 Thread::handle_remote_kill(Drq *, Context *self, void *)
694 Thread *c = nonull_static_cast<Thread*>(self);
695 c->state_add_dirty(Thread_cancel | Thread_ready);
696 c->_exc_cont.restore(c->regs());
697 c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
706 Lock_guard<Cpu_lock> guard(&cpu_lock);
710 if (cpu() == current_cpu())
712 state_add_dirty(Thread_cancel | Thread_ready);
713 sched()->deblock(cpu());
714 _exc_cont.restore(regs()); // overwrite an already triggered exception
715 do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
716 // current()->switch_exec (this, Helping);
720 drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
724 drq(Thread::handle_migration, reinterpret_cast<void*>(current_cpu()));
726 assert_kdb(cpu() == current_cpu());
735 Thread::set_sched_params(unsigned prio, Unsigned64 quantum)
737 Sched_context *sc = sched_context();
738 bool const change = prio != sc->prio()
739 || quantum != sc->quantum();
740 bool const ready_queued = in_ready_list();
742 if (!change && (ready_queued || this == current()))
748 sc->set_quantum(quantum);
751 if (sc == current_sched())
752 set_current_sched(sc);
754 if (state() & Thread_ready_mask)
756 if (this != current())
765 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler,
766 Space *task, void *user_utcb,
767 bool utcb_vcpu_flag = false, bool utcb_vcpu_val = false)
769 bool new_vcpu_state = state() & Thread_vcpu_enabled;
771 new_vcpu_state = utcb_vcpu_val;
775 if (EXPECT_FALSE(!task->is_utcb_valid(user_utcb, 1 + new_vcpu_state)))
776 return -L4_err::EInval;
778 if (EXPECT_FALSE(!bind(task, user_utcb)))
779 return -L4_err::EInval; // unbind first !!
782 vcpu_state(utcb() + 1);
789 if (!space()->is_utcb_valid((void *)local_id(), 2))
790 return -L4_err::EInval;
791 vcpu_state(utcb() + 1);
794 state_add_dirty(Thread_vcpu_enabled);
797 // we're not clearing the vcpu_state pointer, it's not used if vcpu mode
799 state_del_dirty(Thread_vcpu_enabled);
801 if (pager.is_valid())
804 if (exc_handler.is_valid())
805 _exc_handler = exc_handler;
811 /** Clears the utcb pointer of the Thread
812 * Reason: To avoid a stale pointer after unmapping and deallocating
813 * the UTCB. Without this the Thread_lock::clear will access the UTCB
814 * after the unmapping the UTCB -> POOFFF.
818 Thread::unset_utcb_ptr()
825 PRIVATE static inline
826 bool FIASCO_WARN_RESULT
827 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
828 unsigned char rights)
830 assert (cpu_lock.test());
832 Utcb *snd_utcb = snd->access_utcb();
833 Utcb *rcv_utcb = rcv->access_utcb();
834 Mword s = tag.words();
835 Mword r = Utcb::Max_words;
837 Mem::memcpy_mwords (rcv_utcb->values, snd_utcb->values, r < s ? r : s);
841 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
843 if (tag.transfer_fpu() && rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
844 snd->transfer_fpu(rcv);
850 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
851 Thread::copy_ts_to_utcb]
852 bool FIASCO_WARN_RESULT
853 Thread::copy_utcb_to(L4_msg_tag const &tag, Thread* receiver,
854 unsigned char rights)
856 // we cannot copy trap state to trap state!
857 assert_kdb (!this->_utcb_handler || !receiver->_utcb_handler);
858 if (EXPECT_FALSE(this->_utcb_handler != 0))
859 return copy_ts_to_utcb(tag, this, receiver, rights);
860 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
861 return copy_utcb_to_ts(tag, this, receiver, rights);
863 return copy_utcb_to_utcb(tag, this, receiver, rights);
869 Thread::recover_jmp_buf(jmp_buf *b)
870 { _recover_jmpbuf = b; }
875 Thread::is_tcb_address(Address a)
877 a &= ~(Config::thread_block_size - 1);
878 return reinterpret_cast<Thread *>(a)->_magic == magic;
883 Thread::assert_irq_entry()
885 assert_kdb(current_thread()->schedule_in_progress()
886 || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting));
890 // ---------------------------------------------------------------------------
893 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
895 PUBLIC inline NEEDS["kobject.h"]
897 Obj_cap::deref(unsigned char *rights = 0, bool dbg = false)
899 Thread *current = current_thread();
900 if (flags() & L4_obj_ref::Ipc_reply)
902 if (rights) *rights = current->caller_rights();
903 Thread *ca = static_cast<Thread*>(current->caller());
905 current->set_caller(0,0);
909 if (EXPECT_FALSE(invalid()))
914 if (rights) *rights = L4_fpage::RWX;
915 return current_thread();
918 return current->space()->obj_space()->lookup_local(cap(), rights);
921 PUBLIC inline NEEDS["kobject.h"]
923 Obj_cap::revalidate(Kobject_iface *o)
929 // ---------------------------------------------------------------------------
933 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
934 bool *have_recv) const
936 if (flags & L4_obj_ref::Ipc_recv)
938 *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
942 if (flags & L4_obj_ref::Ipc_send)
943 *partner = const_cast<Thread*>(this);
945 // FIXME: shall be removed flags == 0 is no-op
948 *sender = const_cast<Thread*>(this);
949 *partner = const_cast<Thread*>(this);
953 return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
960 Thread::invoke(L4_obj_ref /*self*/, Mword rights, Syscall_frame *f, Utcb *utcb)
962 register unsigned flags = f->ref().flags();
963 if (((flags != 0) && !(flags & L4_obj_ref::Ipc_send))
964 || (flags & L4_obj_ref::Ipc_reply)
965 || f->tag().proto() != L4_msg_tag::Label_thread)
968 Thread *ct = current_thread();
971 bool have_rcv = false;
973 if (EXPECT_FALSE(!check_sys_ipc(flags, &partner, &sender, &have_rcv)))
975 utcb->error = L4_error::Not_existent;
979 ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
980 f->timeout(), f, rights);
984 switch (utcb->values[0] & Opcode_mask)
987 f->tag(sys_control(rights, f->tag(), utcb));
990 f->tag(sys_ex_regs(f->tag(), utcb));
993 f->tag(sys_thread_switch(f->tag(), utcb));
996 f->tag(sys_thread_stats(f->tag(), utcb));
999 f->tag(sys_vcpu_resume(f->tag(), utcb));
1001 case Op_register_del_irq:
1002 f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
1004 case Op_modify_senders:
1005 f->tag(sys_modify_senders(f->tag(), utcb, utcb));
1008 L4_msg_tag tag = f->tag();
1009 if (invoke_arch(tag, utcb))
1012 f->tag(commit_result(-L4_err::ENosys));
1017 PRIVATE inline NOEXPORT
1019 Thread::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
1021 if (sender_list()->cursor())
1022 return Kobject_iface::commit_result(-L4_err::EBusy);
1025 printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
1026 in->values[1], in->values[2],
1027 in->values[3], in->values[4]);
1030 int elems = tag.words();
1033 return Kobject_iface::commit_result(0);
1039 ::Prio_list_elem *c = sender_list()->head();
1042 // this is kind of arbitrary
1043 for (int cnt = 50; c && cnt > 0; --cnt)
1045 Sender *s = Sender::cast(c);
1046 s->modify_label(&in->values[1], elems);
1051 return Kobject_iface::commit_result(0);
1053 sender_list()->cursor(c);
1054 Proc::preemption_point();
1055 c = sender_list()->cursor();
1057 return Kobject_iface::commit_result(0);
1060 PRIVATE inline NOEXPORT
1062 Thread::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
1064 L4_snd_item_iter snd_items(in, tag.words());
1066 if (!tag.items() || !snd_items.next())
1067 return Kobject_iface::commit_result(-L4_err::EInval);
1069 L4_fpage bind_irq(snd_items.get()->d);
1070 if (EXPECT_FALSE(!bind_irq.is_objpage()))
1071 return Kobject_iface::commit_error(in, L4_error::Overflow);
1073 register Context *const c_thread = ::current();
1074 register Space *const c_space = c_thread->space();
1075 register Obj_space *const o_space = c_space->obj_space();
1076 unsigned char irq_rights = 0;
1078 = Irq_base::dcast(o_space->lookup_local(bind_irq.obj_index(), &irq_rights));
1081 return Kobject_iface::commit_result(-L4_err::EInval);
1083 if (EXPECT_FALSE(!(irq_rights & L4_fpage::X)))
1084 return Kobject_iface::commit_result(-L4_err::EPerm);
1086 register_delete_irq(irq);
1087 return Kobject_iface::commit_result(0);
1091 PRIVATE inline NOEXPORT
1093 Thread::sys_control(unsigned char rights, L4_msg_tag const &tag, Utcb *utcb)
1095 if (EXPECT_FALSE(!(rights & L4_fpage::W)))
1096 return commit_result(-L4_err::EPerm);
1098 if (EXPECT_FALSE(tag.words() < 6))
1099 return commit_result(-L4_err::EInval);
1101 Context *curr = current();
1102 Obj_space *s = curr->space()->obj_space();
1103 L4_snd_item_iter snd_items(utcb, tag.words());
1105 void *utcb_addr = 0;
1107 Mword flags = utcb->values[0];
1109 Mword _old_pager = _pager.raw() << L4_obj_ref::Cap_shift;
1110 Mword _old_exc_handler = _exc_handler.raw() << L4_obj_ref::Cap_shift;
1112 Thread_ptr _new_pager(~0UL);
1113 Thread_ptr _new_exc_handler(~0UL);
1115 if (flags & Ctl_set_pager)
1116 _new_pager = Thread_ptr(utcb->values[1] >> L4_obj_ref::Cap_shift);
1118 if (flags & Ctl_set_exc_handler)
1119 _new_exc_handler = Thread_ptr(utcb->values[2] >> L4_obj_ref::Cap_shift);
1121 if (flags & Ctl_bind_task)
1123 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
1124 return commit_result(-L4_err::EInval);
1126 L4_fpage bind_task(snd_items.get()->d);
1128 if (EXPECT_FALSE(!bind_task.is_objpage()))
1129 return commit_result(-L4_err::EInval);
1131 unsigned char task_rights = 0;
1132 task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
1134 if (EXPECT_FALSE(!(task_rights & L4_fpage::W)))
1135 return commit_result(-L4_err::EPerm);
1138 return commit_result(-L4_err::EInval);
1140 utcb_addr = (void*)utcb->values[5];
1143 long res = control(_new_pager, _new_exc_handler,
1144 task, utcb_addr, flags & Ctl_vcpu_enabled,
1145 utcb->values[4] & Ctl_vcpu_enabled);
1148 return commit_result(res);
1150 if ((res = sys_control_arch(utcb)) < 0)
1151 return commit_result(res);
1154 // FIXME: must be done xcpu safe, may be some parts above too
1155 Lock_guard<Cpu_lock> guard(&cpu_lock);
1156 if (flags & Ctl_alien_thread)
1158 if (utcb->values[4] & Ctl_alien_thread)
1159 state_change_dirty (~Thread_dis_alien, Thread_alien, false);
1161 state_del_dirty(Thread_alien, false);
1165 utcb->values[1] = _old_pager;
1166 utcb->values[2] = _old_exc_handler;
1168 return commit_result(0, 3);
1171 // -------------------------------------------------------------------
1172 // Thread::ex_regs class system calls
1176 Thread::ex_regs(Address ip, Address sp,
1177 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
1180 if (state(false) == Thread_invalid || !space())
1183 if (current() == this)
1186 if (o_sp) *o_sp = user_sp();
1187 if (o_ip) *o_ip = user_ip();
1188 if (o_flags) *o_flags = user_flags();
1190 // Changing the run state is only possible when the thread is not in
1192 if (!(ops & Exr_cancel) && (state(false) & Thread_in_exception))
1193 // XXX Maybe we should return false here. Previously, we actually
1194 // did so, but we also actually didn't do any state modification.
1195 // If you change this value, make sure the logic in
1196 // sys_thread_ex_regs still works (in particular,
1197 // ex_regs_cap_handler and friends should still be called).
1200 if (state(false) & Thread_dead) // resurrect thread
1201 state_change_dirty (~Thread_dead, Thread_ready, false);
1203 else if (ops & Exr_cancel)
1204 // cancel ongoing IPC or other activity
1205 state_change_dirty (~(Thread_ipc_in_progress | Thread_delayed_deadline |
1206 Thread_delayed_ipc), Thread_cancel | Thread_ready, false);
1208 if (ops & Exr_trigger_exception)
1210 extern char leave_by_trigger_exception[];
1211 do_trigger_exception(regs(), leave_by_trigger_exception);
1220 if (current() == this)
1228 Thread::ex_regs(Utcb *utcb)
1230 Address ip = utcb->values[1];
1231 Address sp = utcb->values[2];
1233 Mword ops = utcb->values[0];
1235 LOG_TRACE("Ex-regs", "exr", current(), __fmt_thread_exregs,
1236 Log_thread_exregs *l = tbe->payload<Log_thread_exregs>();
1238 l->ip = ip; l->sp = sp; l->op = ops;);
1240 if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
1241 return commit_result(-L4_err::EInval);
1243 utcb->values[0] = flags;
1244 utcb->values[1] = ip;
1245 utcb->values[2] = sp;
1247 return commit_result(0, 3);
1252 Thread::handle_remote_ex_regs(Drq *, Context *self, void *p)
1254 Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
1255 params->result = nonull_static_cast<Thread*>(self)->ex_regs(params->thread->access_utcb());
1256 return params->result.proto() == 0 ? Drq::Need_resched : 0;
1259 PRIVATE inline NOEXPORT
1261 Thread::sys_ex_regs(L4_msg_tag const &tag, Utcb * /*utcb*/)
1263 if (tag.words() != 3)
1264 return commit_result(-L4_err::EInval);
1266 Remote_syscall params;
1267 params.thread = current_thread();
1269 drq(handle_remote_ex_regs, ¶ms, 0, Drq::Any_ctxt);
1270 return params.result;
1273 PRIVATE inline NOEXPORT NEEDS["timer.h"]
1275 Thread::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
1277 Context *curr = current();
1280 return commit_result(0);
1282 if (current_cpu() != cpu())
1283 return commit_result(0);
1286 Sched_context * const cs = current_sched();
1290 && ((state() & (Thread_ready | Thread_suspended)) == Thread_ready))
1292 curr->switch_exec_schedule_locked (this, Not_Helping);
1293 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
1294 return commit_result(0, Utcb::Time_val::Words);
1297 #if 0 // FIXME: provide API for multiple sched contexts
1298 // Compute remaining quantum length of timeslice
1299 regs->left (timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
1301 // Yield current global timeslice
1302 cs->owner()->switch_sched (cs->id() ? cs->next() : cs);
1304 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
1305 = timeslice_timeout.cpu(current_cpu())->get_timeout(Timer::system_clock());
1308 return commit_result(0, Utcb::Time_val::Words);
1313 // -------------------------------------------------------------------
1314 // Gather statistics information about thread execution
1318 Thread::sys_thread_stats_remote(void *data)
1320 update_consumed_time();
1321 *(Clock::Time *)data = consumed_time();
1327 Thread::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
1329 return nonull_static_cast<Thread*>(self)->sys_thread_stats_remote(data);
1332 PRIVATE inline NOEXPORT
1334 Thread::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
1338 if (cpu() != current_cpu())
1339 drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
1342 // Respect the fact that the consumed time is only updated on context switch
1343 if (this == current())
1344 update_consumed_time();
1345 value = consumed_time();
1348 reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
1350 return commit_result(0, Utcb::Time_val::Words);
1356 Thread::handle_migration_helper(Drq *, Context *, void *p)
1358 Migration_helper_info const *inf = (Migration_helper_info const *)p;
1359 return inf->victim->migration_helper(&inf->inf);
1365 Thread::do_migration()
1367 assert_kdb(cpu_lock.test());
1368 assert_kdb(current_cpu() == cpu(true));
1370 Migration_helper_info inf;
1373 Lock_guard<Spin_lock> g(affinity_lock());
1374 inf.inf = _migration_rq.inf;
1375 _migration_rq.pending = false;
1376 _migration_rq.in_progress = true;
1379 unsigned on_cpu = cpu();
1381 if (inf.inf.cpu == ~0U)
1383 state_add_dirty(Thread_suspended);
1384 set_sched_params(0, 0);
1385 _migration_rq.in_progress = false;
1389 state_del_dirty(Thread_suspended);
1391 if (inf.inf.cpu == on_cpu)
1394 set_sched_params(inf.inf.prio, inf.inf.quantum);
1395 _migration_rq.in_progress = false;
1399 // spill FPU state into memory before migration
1400 if (state() & Thread_fpu_owner)
1402 if (current() != this)
1406 Fpu::set_owner(on_cpu, 0);
1411 // if we are in the middle of the scheduler, leave it now
1412 if (schedule_in_progress() == this)
1413 reset_schedule_in_progress();
1417 if (current() == this && Config::Max_num_cpus > 1)
1418 kernel_context_drq(handle_migration_helper, &inf);
1420 migration_helper(&inf.inf);
1425 Thread::initiate_migration()
1430 Thread::finish_migration()
1431 { enqueue_timeout_again(); }
1436 Thread::migrate(Migration_info const &info)
1438 assert_kdb (cpu_lock.test());
1440 LOG_TRACE("Thread migration", "mig", this, __thread_migration_log_fmt,
1441 Migration_log *l = tbe->payload<Migration_log>();
1444 l->target_cpu = info.cpu;
1445 l->user_ip = regs()->ip();
1449 Lock_guard<Spin_lock> g(affinity_lock());
1450 _migration_rq.inf = info;
1451 _migration_rq.pending = true;
1454 unsigned cpu = this->cpu();
1456 if (current_cpu() == cpu)
1466 //---------------------------------------------------------------------------
1467 IMPLEMENTATION [fpu && !ux]:
1470 #include "fpu_alloc.h"
1471 #include "fpu_state.h"
1473 PUBLIC inline NEEDS ["fpu.h"]
1477 // If we own the FPU, we should never be getting an "FPU unavailable" trap
1478 assert_kdb (Fpu::owner(cpu()) == this);
1479 assert_kdb (state() & Thread_fpu_owner);
1480 assert_kdb (fpu_state());
1482 // Save the FPU state of the previous FPU owner (lazy) if applicable
1483 Fpu::save_state (fpu_state());
1484 state_del_dirty (Thread_fpu_owner);
1489 * Handle FPU trap for this context. Assumes disabled interrupts
1491 PUBLIC inline NEEDS [Thread::spill_fpu, "fpu_alloc.h","fpu_state.h"]
1493 Thread::switchin_fpu(bool alloc_new_fpu = true)
1495 unsigned cpu = this->cpu(true);
1497 if (state() & Thread_vcpu_fpu_disabled)
1500 // If we own the FPU, we should never be getting an "FPU unavailable" trap
1501 assert_kdb (Fpu::owner(cpu) != this);
1503 // Allocate FPU state slab if we didn't already have one
1504 if (!fpu_state()->state_buffer()
1505 && (EXPECT_FALSE((!alloc_new_fpu
1506 || (state() & Thread_alien))
1507 || !Fpu_alloc::alloc_state (_quota, fpu_state()))))
1510 // Enable the FPU before accessing it, otherwise recursive trap
1513 // Save the FPU state of the previous FPU owner (lazy) if applicable
1514 if (Fpu::owner(cpu))
1515 nonull_static_cast<Thread*>(Fpu::owner(cpu))->spill_fpu();
1517 // Become FPU owner and restore own FPU state
1518 Fpu::restore_state (fpu_state());
1520 state_add_dirty (Thread_fpu_owner);
1521 Fpu::set_owner (cpu, this);
1525 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
1527 Thread::transfer_fpu(Thread *to)
1529 unsigned cpu = this->cpu();
1530 if (cpu != to->cpu())
1533 if (to->fpu_state()->state_buffer())
1534 Fpu_alloc::free_state(to->fpu_state());
1536 to->fpu_state()->state_buffer(fpu_state()->state_buffer());
1537 fpu_state()->state_buffer(0);
1539 assert (current() == this || current() == to);
1541 Fpu::disable(); // it will be reanabled in switch_fpu
1543 if (EXPECT_FALSE(Fpu::owner(cpu) == to))
1545 assert_kdb (to->state() & Thread_fpu_owner);
1547 Fpu::set_owner(cpu, 0);
1548 to->state_del_dirty (Thread_fpu_owner);
1550 else if (Fpu::owner(cpu) == this)
1552 assert_kdb (state() & Thread_fpu_owner);
1554 state_del_dirty (Thread_fpu_owner);
1556 to->state_add_dirty (Thread_fpu_owner);
1557 Fpu::set_owner(cpu, to);
1558 if (EXPECT_FALSE(current() == to))
1563 //---------------------------------------------------------------------------
1564 IMPLEMENTATION [!fpu]:
1568 Thread::switchin_fpu(bool alloc_new_fpu = true)
1569 { (void)alloc_new_fpu;
1578 //---------------------------------------------------------------------------
1579 IMPLEMENTATION [!fpu || ux]:
1583 Thread::transfer_fpu(Thread *)
1586 //---------------------------------------------------------------------------
1587 IMPLEMENTATION [!log]:
1590 unsigned Thread::sys_ipc_log(Syscall_frame *)
1594 unsigned Thread::sys_ipc_trace(Syscall_frame *)
1598 void Thread::page_fault_log(Address, unsigned, unsigned)
1601 PUBLIC static inline
1602 int Thread::log_page_fault()
1606 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
1609 //---------------------------------------------------------------------------
1610 IMPLEMENTATION [!io]:
1614 Thread::has_privileged_iopl()
1620 // ----------------------------------------------------------------------------
1621 IMPLEMENTATION [!mp]:
1626 Thread::migration_helper(Migration_info const *inf)
1628 unsigned cpu = inf->cpu;
1629 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
1635 // Not sure if this can ever happen
1636 Sched_context *csc = Context::current_sched();
1637 if (!csc || csc->context() == this)
1638 Context::set_current_sched(current()->sched());
1641 Sched_context *sc = sched_context();
1642 sc->set_prio(inf->prio);
1643 sc->set_quantum(inf->quantum);
1648 state_add_dirty(Thread_drq_ready);
1650 set_cpu_of(this, cpu);
1651 return Drq::No_answer | Drq::Need_resched;
1656 Thread::migrate_xcpu(unsigned cpu)
1663 //----------------------------------------------------------------------------
1666 EXTENSION class Thread
1669 struct Migration_log
1674 unsigned target_cpu;
1676 static unsigned fmt(Tb_entry *, int, char *)
1677 asm ("__thread_migration_log_fmt");
1682 // ----------------------------------------------------------------------------
1683 IMPLEMENTATION [mp]:
1689 Thread::handle_remote_requests_irq()
1690 { assert_kdb (cpu_lock.test());
1691 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1692 Ipi::eoi(Ipi::Request);
1693 Context *const c = current();
1694 //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1695 Context *migration_q = 0;
1696 bool resched = _pending_rqq.cpu(c->cpu()).handle_requests(&migration_q);
1698 resched |= Rcu::do_pending_work(c->cpu());
1701 static_cast<Thread*>(migration_q)->do_migration();
1703 if ((resched || c->handle_drq()) && !c->schedule_in_progress())
1705 //LOG_MSG_3VAL(c, "ipis", 0, 0, 0);
1706 // printf("CPU[%2u]: RQ IPI sched %p\n", current_cpu(), current());
1709 // printf("CPU[%2u]: < RQ IPI (current=%p)\n", current_cpu(), current());
1714 Thread::handle_global_remote_requests_irq()
1715 { assert_kdb (cpu_lock.test());
1716 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1717 Ipi::eoi(Ipi::Global_request);
1718 Context::handle_global_requests();
1723 Thread::migration_helper(Migration_info const *inf)
1725 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), 0);
1726 assert_kdb (cpu() == current_cpu());
1727 assert_kdb (current() != this);
1728 assert_kdb (cpu_lock.test());
1735 // Not sure if this can ever happen
1736 Sched_context *csc = Context::current_sched();
1737 if (!csc || csc->context() == this)
1738 Context::set_current_sched(current()->sched());
1741 unsigned cpu = inf->cpu;
1744 Queue &q = _pending_rqq.cpu(current_cpu());
1745 // The queue lock of the current CPU protects the cpu number in
1747 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1749 // potentailly dequeue from our local queue
1750 if (_pending_rq.queued())
1751 check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1753 Sched_context *sc = sched_context();
1754 sc->set_prio(inf->prio);
1755 sc->set_quantum(inf->quantum);
1760 state_add_dirty(Thread_drq_ready);
1764 assert_kdb (!in_ready_list());
1766 set_cpu_of(this, cpu);
1767 // now we are migrated away fom current_cpu
1773 Queue &q = _pending_rqq.cpu(cpu);
1774 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1776 // migrated meanwhile
1777 if (this->cpu() != cpu || _pending_rq.queued())
1778 return Drq::No_answer | Drq::Need_resched;
1783 q.enqueue(&_pending_rq);
1788 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1789 Ipi::cpu(cpu).send(Ipi::Request);
1792 return Drq::No_answer | Drq::Need_resched;
1797 Thread::migrate_xcpu(unsigned cpu)
1802 Queue &q = Context::_pending_rqq.cpu(cpu);
1803 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1806 if (cpu != this->cpu())
1812 if (!_pending_rq.queued())
1813 q.enqueue(&_pending_rq);
1819 Ipi::cpu(cpu).send(Ipi::Request);
1822 //----------------------------------------------------------------------------
1823 IMPLEMENTATION [debug]:
1827 Thread::Migration_log::fmt(Tb_entry *e, int maxlen, char *buf)
1829 Migration_log *l = e->payload<Migration_log>();
1830 return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1831 l->src_cpu, l->target_cpu, l->state, l->user_ip);