5 #include "continuation.h"
6 #include "helping_lock.h"
8 #include "mem_layout.h"
9 #include "member_offs.h"
13 #include "space.h" // Space_index
14 #include "spin_lock.h"
15 #include "thread_lock.h"
22 typedef Context_ptr_base<Thread> Thread_ptr;
25 /** A thread. This class is the driver class for most kernel functionality.
33 FIASCO_DECLARE_KOBJ();
38 friend class Jdb_thread;
39 friend class Jdb_thread_list;
40 friend class Jdb_list_threads;
41 friend class Jdb_list_timeouts;
42 friend class Jdb_tbuf_show;
45 enum Context_mode_kernel { Kernel = 0 };
54 Op_register_del_irq = 5,
55 Op_modify_senders = 6,
62 Ctl_set_pager = 0x0010000,
63 Ctl_bind_task = 0x0200000,
64 Ctl_alien_thread = 0x0400000,
65 Ctl_ux_native = 0x0800000,
66 Ctl_set_exc_handler = 0x1000000,
72 Exr_trigger_exception = 0x20000,
77 Vcpu_ctl_extendet_vcpu = 0x10000,
84 enum { Stack_size = Config::PAGE_SIZE };
89 static Per_cpu<Dbg_stack> dbg_stack;
92 typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
97 * @param task the task the thread should reside in.
98 * @param id user-visible thread ID of the sender.
99 * @param init_prio initial priority.
100 * @param mcp maximum controlled priority.
102 * @post state() != Thread_invalid.
106 int handle_page_fault (Address pfa, Mword error, Mword pc,
110 struct Migration_helper_info
116 Thread(const Thread&); ///< Default copy constructor is undefined
117 void *operator new(size_t); ///< Default new operator undefined
119 bool handle_sigma0_page_fault (Address pfa);
124 * This function is the default routine run if a newly
125 * initialized context is being switch_exec()'ed.
127 static void user_invoke();
130 static bool pagein_tcb_request(Return_frame *regs);
132 inline Mword user_ip() const;
133 inline void user_ip(Mword);
135 inline Mword user_sp() const;
136 inline void user_sp(Mword);
138 inline Mword user_flags() const;
140 /** nesting level in debugger (always critical) if >1 */
141 static Per_cpu<unsigned long> nested_trap_recover;
142 static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
143 static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
146 explicit Thread(Context_mode_kernel);
148 // Another critical TCB cache line:
149 Thread_lock _thread_lock;
153 Thread_ptr _exc_handler;
157 Irq_base *_del_observer;
161 static const unsigned magic = 0xf001c001;
168 #include <cstdlib> // panic()
171 #include "entry_frame.h"
172 #include "fpu_alloc.h"
175 #include "kmem_alloc.h"
177 #include "map_util.h"
178 #include "ram_quota.h"
179 #include "sched_context.h"
181 #include "std_macros.h"
183 #include "thread_state.h"
186 FIASCO_DEFINE_KOBJ(Thread);
188 Per_cpu<unsigned long> DEFINE_PER_CPU Thread::nested_trap_recover;
192 Thread::Dbg_stack::Dbg_stack()
194 stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size);
196 stack_top = (char *)stack_top + Stack_size;
197 //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
201 PUBLIC inline NEEDS[Thread::thread_lock]
204 { thread_lock()->lock(); }
209 Thread::operator new(size_t, Ram_quota *q) throw ()
211 void *t = Mapped_allocator::allocator()->q_unaligned_alloc(q, Config::thread_block_size);
214 memset(t, 0, sizeof(Thread));
215 reinterpret_cast<Thread*>(t)->_quota = q;
220 /** Class-specific allocator.
221 This allocator ensures that threads are allocated at a fixed virtual
222 address computed from their thread ID.
224 @return address of new thread control block
228 Thread::operator new(size_t, Thread *t) throw ()
230 // Allocate TCB in TCB space. Actually, do not allocate anything,
231 // just return the address. Allocation happens on the fly in
232 // Thread::handle_page_fault().
239 Thread::bind(Task *t, User<Utcb>::Ptr utcb)
241 // _utcb == 0 for all kernel threads
242 Space::Ku_mem const *u = t->find_ku_mem(utcb, sizeof(Utcb));
245 if (EXPECT_FALSE(utcb && !u))
248 Lock_guard<typeof(*_space.lock())> guard(_space.lock());
256 _utcb.set(utcb, u->kern_addr(utcb));
262 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h", "space.h"]
269 Lock_guard<typeof(*_space.lock())> guard(_space.lock());
274 old = static_cast<Task*>(_space.space());
277 Mem_space *oms = old->mem_space();
282 // switch to a safe page table
283 if (Mem_space::current_mem_space(current_cpu()) == oms)
284 Mem_space::kernel_space()->switchin_context(oms);
289 current()->rcu_wait();
296 /** Cut-down version of Thread constructor; only for kernel threads
297 Do only what's necessary to get a kernel thread started --
298 skip all fancy stuff, no locking is necessary.
299 @param task the address space
300 @param id user-visible thread ID of the sender
303 Thread::Thread(Context_mode_kernel)
304 : Receiver(), Sender(), _del_observer(0), _magic(magic)
306 *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
310 if (Config::stack_depth)
311 std::memset((char*)this + sizeof(Thread), '5',
312 Config::thread_block_size-sizeof(Thread)-64);
316 /** Destructor. Reestablish the Context constructor's precondition.
317 @pre current() == thread_lock()->lock_owner()
318 && state() == Thread_dead
320 @post (_kernel_sp == 0) && (* (stack end) == 0) && !exists()
323 Thread::~Thread() // To be called in locked state.
326 unsigned long *init_sp = reinterpret_cast<unsigned long*>
327 (reinterpret_cast<unsigned long>(this) + size - sizeof(Entry_frame));
332 Fpu_alloc::free_state(fpu_state());
333 _state = Thread_invalid;
337 // IPC-gate deletion stuff ------------------------------------
341 Thread::ipc_gate_deleted(Mword id)
344 Lock_guard<Cpu_lock> g(&cpu_lock);
346 _del_observer->hit();
349 class Del_irq_pin : public Irq_pin_dummy
354 Del_irq_pin::Del_irq_pin(Thread *o)
355 { payload()[0] = (Address)o; }
359 Del_irq_pin::thread() const
360 { return (Thread*)payload()[0]; }
364 Del_irq_pin::unbind_irq()
365 { thread()->remove_delete_irq(); }
368 Del_irq_pin::~Del_irq_pin()
373 Thread::register_delete_irq(Irq_base *irq)
375 irq->pin()->unbind_irq();
376 irq->pin()->replace<Del_irq_pin>(this);
382 Thread::remove_delete_irq()
387 Irq_base *tmp = _del_observer;
389 tmp->pin()->unbind_irq();
392 // end of: IPC-gate deletion stuff -------------------------------
395 /** Currently executing thread.
396 @return currently executing thread.
401 { return nonull_static_cast<Thread*>(current()); }
405 Thread::exception_triggered() const
406 { return _exc_cont.valid(); }
410 Thread::continuation_test_and_restore()
412 bool v = _exc_cont.valid();
414 _exc_cont.restore(regs());
419 // state requests/manipulation
424 Overwrite Context's version of thread_lock() with a semantically
425 equivalent, but more efficient version.
426 @return lock used to synchronize accesses to the thread.
430 Thread::thread_lock()
431 { return &_thread_lock; }
434 PUBLIC inline NEEDS ["config.h", "timeout.h"]
436 Thread::handle_timer_interrupt()
438 unsigned _cpu = cpu(true);
439 // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
440 if (!Config::fine_grained_cputime)
441 consume_time(Config::scheduler_granularity);
443 bool resched = Rcu::do_pending_work(_cpu);
445 // Check if we need to reschedule due to timeouts or wakeups
446 if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
447 && !schedule_in_progress())
450 assert (timeslice_timeout.cpu(cpu(true))->is_set()); // Coma check
459 // Cancel must be cleared on all kernel entry paths. See slowtraps for
460 // why we delay doing it until here.
461 state_del(Thread_cancel);
463 // we haven't been re-initialized (cancel was not set) -- so sleep
464 if (state_change_safely(~Thread_ready, Thread_cancel | Thread_dead))
465 while (! (state() & Thread_ready))
471 Thread::halt_current ()
475 current_thread()->halt();
476 kdb_ke("Thread not halted");
480 PRIVATE static inline
482 Thread::user_invoke_generic()
484 Context *const c = current();
485 assert_kdb (c->state() & Thread_ready_mask);
487 if (c->handle_drq() && !c->schedule_in_progress())
490 // release CPU lock explicitly, because
491 // * the context that switched to us holds the CPU lock
492 // * we run on a newly-created stack without a CPU lock guard
498 Thread::leave_and_kill_myself()
500 current_thread()->do_kill();
502 WARN("dead thread scheduled: %lx\n", current_thread()->dbg_id());
504 kdb_ke("DEAD SCHED");
509 Thread::handle_kill_helper(Drq *src, Context *, void *)
511 delete nonull_static_cast<Thread*>(src->context());
512 return Drq::No_answer | Drq::Need_resched;
520 Lock_guard<Thread_lock> guard(thread_lock());
522 if (state() == Thread_invalid)
529 // But first prevent it from being woken up by asynchronous events
532 Lock_guard <Cpu_lock> guard(&cpu_lock);
534 // if IPC timeout active, reset it
538 // Switch to time-sharing mode
539 set_mode(Sched_mode(0));
541 // Switch to time-sharing scheduling context
542 if (sched() != sched_context())
543 switch_sched(sched_context());
545 if (!current_sched() || current_sched()->context() == this)
546 set_current_sched(current()->sched());
549 // possibly dequeue from a wait queue
552 // if other threads want to send me IPC messages, abort these
555 Lock_guard <Cpu_lock> guard(&cpu_lock);
556 while (Sender *s = Sender::cast(sender_list()->head()))
558 s->ipc_receiver_aborted();
559 Proc::preemption_point();
563 // if engaged in IPC operation, stop it
564 if (in_sender_list())
565 sender_dequeue(receiver()->sender_list());
572 vcpu_set_user_space(0);
576 state_change_dirty(0, Thread_dead);
578 // dequeue from system queues
583 _del_observer->pin()->unbind_irq();
590 state_del_dirty(Thread_ready_mask);
592 WARN("woken up dead thread %lx\n", dbg_id());
598 state_del_dirty(Thread_ready_mask);
602 kernel_context_drq(handle_kill_helper, 0);
609 Thread::handle_remote_kill(Drq *, Context *self, void *)
611 Thread *c = nonull_static_cast<Thread*>(self);
612 c->state_add_dirty(Thread_cancel | Thread_ready);
613 c->_exc_cont.restore(c->regs());
614 c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
623 Lock_guard<Cpu_lock> guard(&cpu_lock);
627 if (cpu() == current_cpu())
629 state_add_dirty(Thread_cancel | Thread_ready);
630 sched()->deblock(cpu());
631 _exc_cont.restore(regs()); // overwrite an already triggered exception
632 do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
633 // current()->switch_exec (this, Helping);
637 drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
645 Thread::set_sched_params(unsigned prio, Unsigned64 quantum)
647 Sched_context *sc = sched_context();
648 bool const change = prio != sc->prio()
649 || quantum != sc->quantum();
650 bool const ready_queued = in_ready_list();
652 if (!change && (ready_queued || this == current()))
658 sc->set_quantum(quantum);
661 if (sc == current_sched())
662 set_current_sched(sc);
664 if (state() & Thread_ready_mask)
666 if (this != current())
675 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler)
677 if (pager.is_valid())
680 if (exc_handler.is_valid())
681 _exc_handler = exc_handler;
688 Thread::is_tcb_address(Address a)
690 a &= ~(Config::thread_block_size - 1);
691 return reinterpret_cast<Thread *>(a)->_magic == magic;
696 Thread::assert_irq_entry()
698 assert_kdb(current_thread()->schedule_in_progress()
699 || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting | Thread_ipc_transfer));
704 // ---------------------------------------------------------------------------
708 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
709 bool *have_recv) const
711 if (flags & L4_obj_ref::Ipc_recv)
713 *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
717 if (flags & L4_obj_ref::Ipc_send)
718 *partner = const_cast<Thread*>(this);
720 // FIXME: shall be removed flags == 0 is no-op
723 *sender = const_cast<Thread*>(this);
724 *partner = const_cast<Thread*>(this);
728 return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
733 Thread::handle_migration_helper(Drq *, Context *, void *p)
735 Migration_helper_info const *inf = (Migration_helper_info const *)p;
736 return inf->victim->migration_helper(&inf->inf);
742 Thread::do_migration()
744 assert_kdb(cpu_lock.test());
745 assert_kdb(current_cpu() == cpu(true));
747 Migration_helper_info inf;
750 Lock_guard<typeof(_migration_rq.affinity_lock)>
751 g(&_migration_rq.affinity_lock);
752 inf.inf = _migration_rq.inf;
753 _migration_rq.pending = false;
754 _migration_rq.in_progress = true;
757 unsigned on_cpu = cpu();
759 if (inf.inf.cpu == ~0U)
761 state_add_dirty(Thread_suspended);
762 set_sched_params(0, 0);
763 _migration_rq.in_progress = false;
767 state_del_dirty(Thread_suspended);
769 if (inf.inf.cpu == on_cpu)
772 set_sched_params(inf.inf.prio, inf.inf.quantum);
773 _migration_rq.in_progress = false;
777 // spill FPU state into memory before migration
778 if (state() & Thread_fpu_owner)
780 if (current() != this)
784 Fpu::set_owner(on_cpu, 0);
789 // if we are in the middle of the scheduler, leave it now
790 if (schedule_in_progress() == this)
791 reset_schedule_in_progress();
795 if (current() == this && Config::Max_num_cpus > 1)
796 kernel_context_drq(handle_migration_helper, &inf);
798 migration_helper(&inf.inf);
803 Thread::initiate_migration()
808 Thread::finish_migration()
809 { enqueue_timeout_again(); }
814 Thread::migrate(Migration_info const &info)
816 assert_kdb (cpu_lock.test());
818 LOG_TRACE("Thread migration", "mig", this, __thread_migration_log_fmt,
819 Migration_log *l = tbe->payload<Migration_log>();
822 l->target_cpu = info.cpu;
823 l->user_ip = regs()->ip();
827 Lock_guard<typeof(_migration_rq.affinity_lock)>
828 g(&_migration_rq.affinity_lock);
829 _migration_rq.inf = info;
830 _migration_rq.pending = true;
833 unsigned cpu = this->cpu();
835 if (current_cpu() == cpu)
845 //---------------------------------------------------------------------------
846 IMPLEMENTATION [fpu && !ux]:
849 #include "fpu_alloc.h"
850 #include "fpu_state.h"
852 PUBLIC inline NEEDS ["fpu.h"]
856 // If we own the FPU, we should never be getting an "FPU unavailable" trap
857 assert_kdb (Fpu::owner(cpu()) == this);
858 assert_kdb (state() & Thread_fpu_owner);
859 assert_kdb (fpu_state());
861 // Save the FPU state of the previous FPU owner (lazy) if applicable
862 Fpu::save_state (fpu_state());
863 state_del_dirty (Thread_fpu_owner);
868 * Handle FPU trap for this context. Assumes disabled interrupts
870 PUBLIC inline NEEDS [Thread::spill_fpu, "fpu_alloc.h","fpu_state.h"]
872 Thread::switchin_fpu(bool alloc_new_fpu = true)
874 unsigned cpu = this->cpu(true);
876 if (state() & Thread_vcpu_fpu_disabled)
879 // If we own the FPU, we should never be getting an "FPU unavailable" trap
880 assert_kdb (Fpu::owner(cpu) != this);
882 // Allocate FPU state slab if we didn't already have one
883 if (!fpu_state()->state_buffer()
884 && (EXPECT_FALSE((!alloc_new_fpu
885 || (state() & Thread_alien))
886 || !Fpu_alloc::alloc_state(_quota, fpu_state()))))
889 // Enable the FPU before accessing it, otherwise recursive trap
892 // Save the FPU state of the previous FPU owner (lazy) if applicable
894 nonull_static_cast<Thread*>(Fpu::owner(cpu))->spill_fpu();
896 // Become FPU owner and restore own FPU state
897 Fpu::restore_state(fpu_state());
899 state_add_dirty(Thread_fpu_owner);
900 Fpu::set_owner(cpu, this);
904 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
906 Thread::transfer_fpu(Thread *to)
908 unsigned cpu = this->cpu();
909 if (cpu != to->cpu())
912 if (to->fpu_state()->state_buffer())
913 Fpu_alloc::free_state(to->fpu_state());
915 to->fpu_state()->state_buffer(fpu_state()->state_buffer());
916 fpu_state()->state_buffer(0);
918 assert (current() == this || current() == to);
920 Fpu::disable(); // it will be reanabled in switch_fpu
922 if (EXPECT_FALSE(Fpu::owner(cpu) == to))
924 assert_kdb (to->state() & Thread_fpu_owner);
926 Fpu::set_owner(cpu, 0);
927 to->state_del_dirty (Thread_fpu_owner);
929 else if (Fpu::owner(cpu) == this)
931 assert_kdb (state() & Thread_fpu_owner);
933 state_del_dirty (Thread_fpu_owner);
935 to->state_add_dirty (Thread_fpu_owner);
936 Fpu::set_owner(cpu, to);
937 if (EXPECT_FALSE(current() == to))
942 //---------------------------------------------------------------------------
943 IMPLEMENTATION [!fpu]:
947 Thread::switchin_fpu(bool alloc_new_fpu = true)
958 //---------------------------------------------------------------------------
959 IMPLEMENTATION [!fpu || ux]:
963 Thread::transfer_fpu(Thread *)
966 //---------------------------------------------------------------------------
967 IMPLEMENTATION [!log]:
970 unsigned Thread::sys_ipc_log(Syscall_frame *)
974 unsigned Thread::sys_ipc_trace(Syscall_frame *)
978 void Thread::page_fault_log(Address, unsigned, unsigned)
982 int Thread::log_page_fault()
986 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
990 // ----------------------------------------------------------------------------
991 IMPLEMENTATION [!mp]:
996 Thread::migration_helper(Migration_info const *inf)
998 unsigned cpu = inf->cpu;
999 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
1005 // Not sure if this can ever happen
1006 Sched_context *csc = Context::current_sched();
1007 if (!csc || csc->context() == this)
1008 Context::set_current_sched(current()->sched());
1011 Sched_context *sc = sched_context();
1012 sc->set_prio(inf->prio);
1013 sc->set_quantum(inf->quantum);
1018 state_add_dirty(Thread_drq_ready);
1020 set_cpu_of(this, cpu);
1021 return Drq::No_answer | Drq::Need_resched;
1026 Thread::migrate_xcpu(unsigned cpu)
1033 //----------------------------------------------------------------------------
1036 EXTENSION class Thread
1039 struct Migration_log
1044 unsigned target_cpu;
1046 static unsigned fmt(Tb_entry *, int, char *)
1047 asm ("__thread_migration_log_fmt");
1052 // ----------------------------------------------------------------------------
1053 IMPLEMENTATION [mp]:
1059 Thread::handle_remote_requests_irq()
1061 assert_kdb (cpu_lock.test());
1062 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1063 Ipi::eoi(Ipi::Request);
1064 Context *const c = current();
1065 //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1066 Context *migration_q = 0;
1067 bool resched = _pending_rqq.cpu(c->cpu()).handle_requests(&migration_q);
1069 resched |= Rcu::do_pending_work(c->cpu());
1072 static_cast<Thread*>(migration_q)->do_migration();
1074 if ((resched || c->handle_drq()) && !c->schedule_in_progress())
1076 //LOG_MSG_3VAL(c, "ipis", 0, 0, 0);
1077 // printf("CPU[%2u]: RQ IPI sched %p\n", current_cpu(), current());
1080 // printf("CPU[%2u]: < RQ IPI (current=%p)\n", current_cpu(), current());
1085 Thread::handle_global_remote_requests_irq()
1087 assert_kdb (cpu_lock.test());
1088 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1089 Ipi::eoi(Ipi::Global_request);
1090 Context::handle_global_requests();
1095 Thread::migration_helper(Migration_info const *inf)
1097 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), 0);
1098 assert_kdb (cpu() == current_cpu());
1099 assert_kdb (current() != this);
1100 assert_kdb (cpu_lock.test());
1107 // Not sure if this can ever happen
1108 Sched_context *csc = Context::current_sched();
1109 if (!csc || csc->context() == this)
1110 Context::set_current_sched(current()->sched());
1113 unsigned cpu = inf->cpu;
1116 Queue &q = _pending_rqq.cpu(current_cpu());
1117 // The queue lock of the current CPU protects the cpu number in
1119 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1121 // potentailly dequeue from our local queue
1122 if (_pending_rq.queued())
1123 check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1125 Sched_context *sc = sched_context();
1126 sc->set_prio(inf->prio);
1127 sc->set_quantum(inf->quantum);
1132 state_add_dirty(Thread_drq_ready);
1136 assert_kdb (!in_ready_list());
1138 set_cpu_of(this, cpu);
1139 // now we are migrated away fom current_cpu
1145 Queue &q = _pending_rqq.cpu(cpu);
1146 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1148 // migrated meanwhile
1149 if (this->cpu() != cpu || _pending_rq.queued())
1150 return Drq::No_answer | Drq::Need_resched;
1155 q.enqueue(&_pending_rq);
1160 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1161 Ipi::cpu(cpu).send(Ipi::Request);
1164 return Drq::No_answer | Drq::Need_resched;
1169 Thread::migrate_xcpu(unsigned cpu)
1174 Queue &q = Context::_pending_rqq.cpu(cpu);
1175 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1178 if (cpu != this->cpu())
1184 if (!_pending_rq.queued())
1185 q.enqueue(&_pending_rq);
1191 Ipi::cpu(cpu).send(Ipi::Request);
1194 //----------------------------------------------------------------------------
1195 IMPLEMENTATION [debug]:
1199 Thread::Migration_log::fmt(Tb_entry *e, int maxlen, char *buf)
1201 Migration_log *l = e->payload<Migration_log>();
1202 return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1203 l->src_cpu, l->target_cpu, l->state, l->user_ip);