3 #include <csetjmp> // typedef jmp_buf
7 #include "continuation.h"
12 #include "member_offs.h"
13 #include "per_cpu_data.h"
15 #include "queue_item.h"
17 #include "sched_context.h"
18 #include "spin_lock.h"
31 explicit Context_ptr(unsigned long id) : _t(id) {}
33 Context_ptr(Context_ptr const &o) : _t(o._t) {}
34 Context_ptr const &operator = (Context_ptr const &o)
35 { _t = o._t; return *this; }
37 Kobject_iface *ptr(Space *, unsigned char *) const;
39 bool is_kernel() const { return false; }
40 bool is_valid() const { return _t != ~0UL; }
42 // only for debugging use
43 Mword raw() const { return _t;}
50 template< typename T >
51 class Context_ptr_base : public Context_ptr
54 enum Invalid_type { Invalid };
55 explicit Context_ptr_base(Invalid_type) : Context_ptr(0) {}
56 explicit Context_ptr_base(unsigned long id) : Context_ptr(id) {}
58 Context_ptr_base(Context_ptr_base<T> const &o) : Context_ptr(o) {}
59 template< typename X >
60 Context_ptr_base(Context_ptr_base<X> const &o) : Context_ptr(o)
61 { X*x = 0; T*t = x; (void)t; }
63 Context_ptr_base<T> const &operator = (Context_ptr_base<T> const &o)
64 { Context_ptr::operator = (o); return *this; }
66 template< typename X >
67 Context_ptr_base<T> const &operator = (Context_ptr_base<X> const &o)
68 { X*x=0; T*t=x; (void)t; Context_ptr::operator = (o); return *this; }
70 //T *ptr(Space *s) const { return static_cast<T*>(Context_ptr::ptr(s)); }
76 class Present_list_item : public D_list_item
79 static Spin_lock _plist_lock;
80 static Present_list_item *head;
86 /** An execution context. A context is a runnable, schedulable activity.
87 It carries along some state used by other subsystems: A lock count,
88 and stack-element forward/next pointers.
91 public Global_context_data,
92 private Present_list_item,
96 friend class Jdb_thread_list;
97 friend class Context_ptr;
98 friend class Jdb_utcb;
101 virtual void finish_migration() = 0;
102 virtual void initiate_migration() = 0;
112 * \brief Encapsulate an aggregate of Context.
114 * Allow to get a back reference to the aggregating Context object.
121 * \brief Get the aggregating Context object.
127 * \brief Deffered Request.
129 * Represents a request that can be queued for each Context
130 * and is executed by the target context just after switching to the
133 class Drq : public Queue_item, public Context_member
136 typedef unsigned (Request_func)(Drq *, Context *target, void *);
137 enum { Need_resched = 1, No_answer = 2 };
138 enum Wait_mode { No_wait = 0, Wait = 1 };
139 enum Exec_mode { Target_ctxt = 0, Any_ctxt = 1 };
140 // enum State { Idle = 0, Handled = 1, Reply_handled = 2 };
149 * \brief Queue for deffered requests (Drq).
151 * A FIFO queue each Context aggregates to queue incomming Drq's
152 * that have to be executed directly after switching to a context.
154 class Drq_q : public Queue, public Context_member
157 enum Drop_mode { Drop = true, No_drop = false };
159 bool handle_requests(Drop_mode drop = No_drop);
160 bool execute_request(Drq *r, Drop_mode drop, bool local);
163 struct Migration_info
174 * Definition of different scheduling modes
178 Periodic = 0x1, ///< 0 = Conventional, 1 = Periodic
179 Nonstrict = 0x2, ///< 0 = Strictly Periodic, 1 = Non-strictly periodic
183 * Definition of different helping modes
191 // FIXME: remove this function!
192 Mword is_tcb_mapped() const;
195 * Size of a Context (TCB + kernel stack)
197 static const size_t size = Config::thread_block_size;
200 * Return consumed CPU time.
201 * @return Consumed CPU time in usecs
203 Cpu_time consumed_time();
206 * Get the kernel UTCB pointer.
207 * @return UTCB pointer, or 0 if there is no UTCB
212 * Get the local ID of the context.
214 Local_id local_id() const;
217 * Set the local ID of the context.
218 * Does not touch the kernel UTCB pointer, since
219 * we would need space() to do the address translation.
221 * After setting the local ID and mapping the UTCB area, use
222 * Thread::utcb_init() to set the kernel UTCB pointer and initialize the
225 void local_id (Local_id id);
227 virtual bool kill() = 0;
229 void spill_user_state();
230 void fill_user_state();
234 * Update consumed CPU time during each context switch and when
235 * reading out the current thread's consumed CPU time.
237 void update_consumed_time();
240 * Set the kernel UTCB pointer.
241 * Does NOT keep the value of _local_id in sync.
242 * @see local_id (Local_id id);
251 friend class Jdb_tcb;
253 /// low level page table switching stuff
254 void switchin_context(Context *) asm ("switchin_context_label") FIASCO_FASTCALL;
256 /// low level fpu switching stuff
257 void switch_fpu (Context *t);
259 /// low level cpu switching stuff
260 void switch_cpu (Context *t);
263 Spin_lock_coloc<Space *> _space;
270 // how many locks does this thread hold on other threads
271 // incremented in Thread::lock, decremented in Thread::clear
272 // Thread::kill needs to know
274 Thread_lock * const _thread_lock;
279 // The scheduling parameters. We would only need to keep an
280 // anonymous reference to them as we do not need them ourselves, but
281 // we aggregate them for performance reasons.
282 Sched_context _sched_context;
283 Sched_context * _sched;
287 // Pointer to floating point register state
288 Fpu_state _fpu_state;
289 // Implementation-specific consumed CPU time (TSC ticks or usecs)
290 Clock::Time _consumed_time;
297 // for trigger_exception
298 Continuation _exc_cont;
300 jmp_buf *_recover_jmpbuf; // setjmp buffer for page-fault recovery
308 Migration_rq() : pending(false), in_progress(false) {}
312 // XXX Timeout for both, sender and receiver! In normal case we would have
313 // to define own timeouts in Receiver and Sender but because only one
314 // timeout can be set at one time we use the same timeout. The timeout
315 // has to be defined here because Dirq::hit has to be able to reset the
316 // timeout (Irq::_irq_thread is of type Receiver).
318 Spin_lock _affinity_lock;
321 static Per_cpu<Clock> _clock;
322 static Per_cpu<Context *> _kernel_ctxt;
328 #include "tb_entry.h"
330 EXTENSION class Context
355 static unsigned drq_log_fmt(Tb_entry *, int, char *)
356 asm ("__context_drq_log_fmt");
360 // --------------------------------------------------------------------------
366 #include "cpu_lock.h"
367 #include "entry_frame.h"
369 #include "globals.h" // current()
371 #include "lock_guard.h"
374 #include "mem_layout.h"
375 #include "processor.h"
377 #include "std_macros.h"
378 #include "thread_state.h"
382 Per_cpu<Clock> DEFINE_PER_CPU Context::_clock(true);
383 Per_cpu<Context *> DEFINE_PER_CPU Context::_kernel_ctxt;
385 Spin_lock Present_list_item::_plist_lock INIT_PRIORITY(EARLY_INIT_PRIO);
386 Present_list_item *Present_list_item::head;
388 IMPLEMENT inline NEEDS["kdb_ke.h"]
390 Context_ptr::ptr(Space *s, unsigned char *rights) const
392 assert_kdb (cpu_lock.test());
394 return s->obj_space()->lookup_local(_t, rights);
401 /** Initialize a context. After setup, a switch_exec to this context results
402 in a return to user code using the return registers at regs(). The
403 return registers are not initialized however; neither is the space_context
404 to be used in thread switching (use set_space_context() for that).
405 @pre (_kernel_sp == 0) && (* (stack end) == 0)
406 @param thread_lock pointer to lock used to lock this context
407 @param space_context the space context
409 PUBLIC inline NEEDS ["atomic.h", "entry_frame.h", <cstdio>]
410 Context::Context(Thread_lock *thread_lock)
411 : _kernel_sp(reinterpret_cast<Mword*>(regs())),
413 _thread_lock(thread_lock),
415 _sched(&_sched_context),
416 _mode(Sched_mode (0))
418 // NOTE: We do not have to synchronize the initialization of
419 // _space_context because it is constant for all concurrent
420 // invocations of this constructor. When two threads concurrently
421 // try to create a new task, they already synchronize in
422 // sys_task_new() and avoid calling us twice with different
423 // space_context arguments.
425 set_cpu_of(this, current_cpu());
427 Lock_guard<Spin_lock> guard(&Present_list_item::_plist_lock);
428 if (Present_list_item::head)
429 Present_list_item::head->Present_list_item::enqueue(this);
431 Present_list_item::head = this;
437 Context::affinity_lock()
438 { return &_affinity_lock; }
444 // If this context owned the FPU, noone owns it now
445 if (Fpu::is_owner(cpu(), this))
447 Fpu::set_owner(cpu(), 0);
457 Lock_guard<Spin_lock> guard(&Present_list_item::_plist_lock);
458 if (this == Present_list_item::head)
460 if (Present_list_item::next() != this)
461 Present_list_item::head = static_cast<Present_list_item*>(Present_list_item::next());
464 Present_list_item::head = 0;
469 Present_list_item::dequeue();
476 Context::state(bool check = true) const
479 assert_2_kdb(!check || cpu() == current_cpu());
485 Context::is_tcb_mapped() const
491 Context::kernel_context(unsigned cpu)
492 { return _kernel_ctxt.cpu(cpu); }
494 PROTECTED static inline
496 Context::kernel_context(unsigned cpu, Context *ctxt)
497 { _kernel_ctxt.cpu(cpu) = ctxt; }
500 /** @name State manipulation */
506 * Does the context exist? .
507 * @return true if this context has been initialized.
509 PUBLIC inline NEEDS ["thread_state.h"]
511 Context::exists() const
513 return state() != Thread_invalid;
517 * Is the context about to be deleted.
518 * @return true if this context is in deletion.
520 PUBLIC inline NEEDS ["thread_state.h"]
522 Context::is_invalid() const
523 { return state() == Thread_invalid; }
526 * Atomically add bits to state flags.
527 * @param bits bits to be added to state flags
528 * @return 1 if none of the bits that were added had been set before
530 PUBLIC inline NEEDS ["atomic.h"]
532 Context::state_add (Mword const bits)
534 assert_2_kdb(cpu() == current_cpu());
535 atomic_or (&_state, bits);
539 * Add bits in state flags. Unsafe (non-atomic) and
540 * fast version -- you must hold the kernel lock when you use it.
541 * @pre cpu_lock.test() == true
542 * @param bits bits to be added to state flags
546 Context::state_add_dirty (Mword bits)
548 assert_2_kdb(cpu() == current_cpu());
553 * Atomically delete bits from state flags.
554 * @param bits bits to be removed from state flags
555 * @return 1 if all of the bits that were removed had previously been set
557 PUBLIC inline NEEDS ["atomic.h"]
559 Context::state_del (Mword const bits)
561 assert_2_kdb (current_cpu() == cpu());
562 atomic_and (&_state, ~bits);
566 * Delete bits in state flags. Unsafe (non-atomic) and
567 * fast version -- you must hold the kernel lock when you use it.
568 * @pre cpu_lock.test() == true
569 * @param bits bits to be removed from state flags
573 Context::state_del_dirty (Mword bits, bool check = true)
576 assert_2_kdb(!check || cpu() == current_cpu());
581 * Atomically delete and add bits in state flags, provided the
582 * following rules apply (otherwise state is not changed at all):
583 * - Bits that are to be set must be clear in state or clear in mask
584 * - Bits that are to be cleared must be set in state
585 * @param mask Bits not set in mask shall be deleted from state flags
586 * @param bits Bits to be added to state flags
587 * @return 1 if state was changed, 0 otherwise
589 PUBLIC inline NEEDS ["atomic.h"]
591 Context::state_change_safely (Mword const mask, Mword const bits)
593 assert_2_kdb (current_cpu() == cpu());
599 if (old & bits & mask | ~old & ~mask)
602 while (!cas (&_state, old, old & mask | bits));
608 * Atomically delete and add bits in state flags.
609 * @param mask bits not set in mask shall be deleted from state flags
610 * @param bits bits to be added to state flags
612 PUBLIC inline NEEDS ["atomic.h"]
614 Context::state_change (Mword const mask, Mword const bits)
616 assert_2_kdb (current_cpu() == cpu());
617 return atomic_change (&_state, mask, bits);
621 * Delete and add bits in state flags. Unsafe (non-atomic) and
622 * fast version -- you must hold the kernel lock when you use it.
623 * @pre cpu_lock.test() == true
624 * @param mask Bits not set in mask shall be deleted from state flags
625 * @param bits Bits to be added to state flags
629 Context::state_change_dirty (Mword const mask, Mword const bits, bool check = true)
632 assert_2_kdb(!check || cpu() == current_cpu());
640 /** Return the space context.
641 @return space context used for this execution context.
642 Set with set_space_context().
644 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h"]
646 Context::space() const
648 //assert_kdb (cpu_lock.test());
649 return _space.get_unused();
652 PUBLIC inline NEEDS[Context::space, Context::vcpu_user_space]
654 Context::vcpu_aware_space() const
656 if (EXPECT_FALSE(state() & Thread_vcpu_user_mode))
657 return vcpu_user_space();
662 /** Convenience function: Return memory space. */
663 PUBLIC inline NEEDS["space.h"]
665 Context::mem_space() const
667 return space()->mem_space();
671 @return the thread lock used to lock this context.
675 Context::thread_lock() const
681 /** Registers used when iret'ing to user mode.
682 @return return registers
684 PUBLIC inline NEEDS["cpu.h", "entry_frame.h"]
686 Context::regs() const
688 return reinterpret_cast<Entry_frame *>
689 (Cpu::stack_align(reinterpret_cast<Mword>(this) + size)) - 1;
692 /** @name Lock counting
693 These functions count the number of locks
694 this context holds. A context must not be deleted if its lock
699 /** Increment lock count.
700 @post lock_cnt() > 0 */
703 Context::inc_lock_cnt()
708 /** Decrement lock count.
713 Context::dec_lock_cnt()
723 Context::lock_cnt() const
731 * Switch active timeslice of this Context.
732 * @param next Sched_context to switch to
736 Context::switch_sched(Sched_context * const next)
738 // Ensure CPU lock protection
739 assert_kdb (cpu_lock.test());
741 // If we're leaving the global timeslice, invalidate it
742 // This causes schedule() to select a new timeslice via set_current_sched()
743 if (sched() == current_sched())
746 // Ensure the new timeslice has a full quantum
747 assert_kdb (next->left() == next->quantum());
757 * Select a different context for running and activate it.
763 Lock_guard <Cpu_lock> guard (&cpu_lock);
767 // Ensure only the current thread calls schedule
768 assert_kdb (this == current());
769 assert_kdb (!_drq_active);
771 unsigned current_cpu = ~0U;
772 Sched_context::Ready_queue *rq = 0;
774 // Enqueue current thread into ready-list to schedule correctly
777 // Select a thread for scheduling.
778 Context *next_to_run;
782 // I may've been migrated during the switch_exec_locked in the while
783 // statement below. So cxheck out if I've to use a new ready queue.
784 if (cpu() != current_cpu)
787 rq = &Sched_context::rq(current_cpu);
788 if (rq->schedule_in_progress)
790 // Nested invocations of schedule() are bugs
791 assert_kdb (!rq->schedule_in_progress);
796 next_to_run = rq->next_to_run()->context();
798 // Ensure ready-list sanity
799 assert_kdb (next_to_run);
801 if (EXPECT_TRUE (next_to_run->state() & Thread_ready_mask))
804 next_to_run->ready_dequeue();
806 rq->schedule_in_progress = this;
812 // check if we've been migrated meanwhile
813 if (EXPECT_FALSE(current_cpu != cpu()))
816 rq = &Sched_context::rq(current_cpu);
817 if (rq->schedule_in_progress)
821 rq->schedule_in_progress = 0;
824 while (EXPECT_FALSE(schedule_switch_to_locked (next_to_run)));
828 * Return if there is currently a schedule() in progress
832 Context::schedule_in_progress()
834 return sched()->schedule_in_progress(cpu());
840 Context::reset_schedule_in_progress()
841 { sched()->reset_schedule_in_progress(cpu()); }
845 * Return true if s can preempt the current scheduling context, false otherwise
847 PUBLIC static inline NEEDS ["globals.h"]
849 Context::can_preempt_current (Sched_context const *s)
851 assert_kdb (current_cpu() == s->owner()->cpu());
852 return current()->sched()->can_preempt_current(s);
856 * Return currently active global Sched_context.
860 Context::current_sched()
862 return Sched_context::rq(current_cpu()).current_sched();
866 * Set currently active global Sched_context.
870 Context::set_current_sched(Sched_context *sched)
873 // Save remainder of previous timeslice or refresh it, unless it had
875 unsigned cpu = this->cpu();
876 Sched_context::Ready_queue &rq = Sched_context::rq(cpu);
878 Timeout * const tt = timeslice_timeout.cpu(cpu);
879 Unsigned64 clock = Timer::system_clock();
880 if (Sched_context *s = rq.current_sched())
882 Signed64 left = tt->get_timeout(clock);
891 // Program new end-of-timeslice timeout
893 tt->set(clock + sched->left(), cpu);
895 // Make this timeslice current
898 LOG_SCHED_LOAD(sched);
902 * Invalidate (expire) currently active global Sched_context.
904 PROTECTED inline NEEDS["logdefs.h","timeout.h"]
906 Context::invalidate_sched()
908 //LOG_SCHED_INVALIDATE;
909 sched()->invalidate_sched(cpu());
913 * Return Context's Sched_context with id 'id'; return time slice 0 as default.
914 * @return Sched_context with id 'id' or 0
918 Context::sched_context(unsigned short const id = 0) const
920 if (EXPECT_TRUE (!id))
921 return const_cast<Sched_context*>(&_sched_context);
923 for (Sched_context *tmp = _sched_context.next();
924 tmp != &_sched_context; tmp = tmp->next())
932 * Return Context's currently active Sched_context.
933 * @return Active Sched_context
937 Context::sched() const
943 * Set Context's currently active Sched_context.
944 * @param sched Sched_context to be activated
948 Context::set_sched (Sched_context * const sched)
954 * Return Context's real-time period length.
955 * @return Period length in usecs
959 Context::period() const
965 * Set Context's real-time period length.
966 * @param period New period length in usecs
970 Context::set_period (Unsigned64 const period)
976 * Return Context's scheduling mode.
977 * @return Scheduling mode
981 Context::mode() const
987 * Set Context's scheduling mode.
988 * @param mode New scheduling mode
992 Context::set_mode (Context::Sched_mode const mode)
999 // XXX for now, synchronize with global kernel lock
1003 * Enqueue current() if ready to fix up ready-list invariant.
1005 PRIVATE inline NOEXPORT
1007 Context::update_ready_list()
1009 assert_kdb (this == current());
1011 if (state() & Thread_ready_mask)
1016 * Check if Context is in ready-list.
1017 * @return 1 if thread is in ready-list, 0 otherwise
1021 Context::in_ready_list() const
1023 return sched()->in_ready_list();
1027 * Enqueue context in ready-list.
1031 Context::ready_enqueue()
1033 assert_kdb(current_cpu() == cpu());
1034 //Lock_guard <Cpu_lock> guard (&cpu_lock);
1036 // Don't enqueue threads that are not ready or have no own time
1037 if (EXPECT_FALSE (!(state() & Thread_ready_mask) || !sched()->left()))
1040 sched()->ready_enqueue(cpu());
1045 * \brief Activate a newly created thread.
1047 * This function sets a new thread onto the ready list and switches to
1048 * the thread if it can preempt the currently running thread.
1054 Lock_guard <Cpu_lock> guard (&cpu_lock);
1055 if (cpu() == current_cpu())
1057 state_add_dirty(Thread_ready);
1058 if (sched()->deblock(cpu(), current()->sched(), true))
1060 current()->switch_to_locked(this);
1065 remote_ready_enqueue();
1071 * Remove context from ready-list.
1073 PUBLIC inline NEEDS ["cpu_lock.h", "lock_guard.h", "std_macros.h"]
1075 Context::ready_dequeue()
1077 assert_kdb(current_cpu() == cpu());
1078 sched()->ready_dequeue();
1081 /** Helper. Context that helps us by donating its time to us. It is
1082 set by switch_exec() if the calling thread says so.
1083 @return context that helps us and should be activated after freeing a lock.
1087 Context::helper() const
1094 Context::set_helper (enum Helping_mode const mode)
1099 _helper = current();
1104 case Ignore_Helping:
1105 // don't change _helper value
1110 /** Donatee. Context that receives our time slices, for example
1111 because it has locked us.
1112 @return context that should be activated instead of us when we're
1117 Context::donatee() const
1124 Context::set_donatee (Context * const donatee)
1131 Context::get_kernel_sp() const
1138 Context::set_kernel_sp (Mword * const esp)
1145 Context::fpu_state()
1151 * Add to consumed CPU time.
1152 * @param quantum Implementation-specific time quantum (TSC ticks or usecs)
1156 Context::consume_time(Clock::Time quantum)
1158 _consumed_time += quantum;
1162 * Update consumed CPU time during each context switch and when
1163 * reading out the current thread's consumed CPU time.
1165 IMPLEMENT inline NEEDS ["cpu.h"]
1167 Context::update_consumed_time()
1169 if (Config::fine_grained_cputime)
1170 consume_time (_clock.cpu(cpu()).delta());
1173 IMPLEMENT inline NEEDS ["config.h", "cpu.h"]
1175 Context::consumed_time()
1177 if (Config::fine_grained_cputime)
1178 return _clock.cpu(cpu()).us(_consumed_time);
1180 return _consumed_time;
1184 * Switch to scheduling context and execution context while not running under
1187 PUBLIC inline NEEDS [<cassert>]
1189 Context::switch_to (Context *t)
1191 // Call switch_to_locked if CPU lock is already held
1192 assert (!cpu_lock.test());
1194 // Grab the CPU lock
1195 Lock_guard <Cpu_lock> guard (&cpu_lock);
1197 switch_to_locked (t);
1201 * Switch scheduling context and execution context.
1202 * @param t Destination thread whose scheduling context and execution context
1203 * should be activated.
1205 PRIVATE inline NEEDS ["kdb_ke.h"]
1206 bool FIASCO_WARN_RESULT
1207 Context::schedule_switch_to_locked(Context *t)
1209 // Must be called with CPU lock held
1210 assert_kdb (cpu_lock.test());
1212 // Switch to destination thread's scheduling context
1213 if (current_sched() != t->sched())
1214 set_current_sched(t->sched());
1216 // XXX: IPC dependency tracking belongs here.
1218 // Switch to destination thread's execution context, no helping involved
1220 return switch_exec_locked(t, Not_Helping);
1222 return handle_drq();
1225 PUBLIC inline NEEDS [Context::schedule_switch_to_locked]
1227 Context::switch_to_locked(Context *t)
1229 if (EXPECT_FALSE(schedule_switch_to_locked(t)))
1235 * Switch execution context while not running under CPU lock.
1237 PUBLIC inline NEEDS ["kdb_ke.h"]
1238 bool FIASCO_WARN_RESULT
1239 Context::switch_exec (Context *t, enum Helping_mode mode)
1241 // Call switch_exec_locked if CPU lock is already held
1242 assert_kdb (!cpu_lock.test());
1244 // Grab the CPU lock
1245 Lock_guard <Cpu_lock> guard (&cpu_lock);
1247 return switch_exec_locked (t, mode);
1251 * Switch to a specific different execution context.
1252 * If that context is currently locked, switch to its locker instead
1253 * (except if current() is the locker)
1254 * @pre current() == this && current() != t
1255 * @param t thread that shall be activated.
1256 * @param mode helping mode; we either help, don't help or leave the
1257 * helping state unchanged
1260 bool FIASCO_WARN_RESULT //L4_IPC_CODE
1261 Context::switch_exec_locked (Context *t, enum Helping_mode mode)
1263 // Must be called with CPU lock held
1264 assert_kdb (cpu_lock.test());
1265 if (t->cpu() != current_cpu()){ printf("%p => %p\n", this, t); kdb_ke("ass"); } assert_kdb (t->cpu() == current_cpu());
1266 assert_kdb (current() != t);
1267 assert_kdb (current() == this);
1268 assert_kdb (timeslice_timeout.cpu(cpu())->is_set()); // Coma check
1271 Context *t_orig = t;
1274 // Time-slice lending: if t is locked, switch to its locker
1275 // instead, this is transitive
1276 while (t->donatee() && // target thread locked
1277 t->donatee() != t) // not by itself
1279 // Special case for Thread::kill(): If the locker is
1280 // current(), switch to the locked thread to allow it to
1281 // release other locks. Do this only when the target thread
1282 // actually owns locks.
1283 if (t->donatee() == current())
1285 if (t->lock_cnt() > 0)
1288 return handle_drq();
1297 // Can only switch to ready threads!
1298 if (EXPECT_FALSE (!(t->state() & Thread_ready_mask)))
1300 assert_kdb (state() & Thread_ready_mask);
1305 // Ensure kernel stack pointer is non-null if thread is ready
1306 assert_kdb (t->_kernel_sp);
1308 t->set_helper (mode);
1310 update_ready_list();
1311 assert_kdb (!(state() & Thread_ready_mask) || !sched()->left()
1312 || in_ready_list());
1317 return handle_drq();
1320 PUBLIC inline NEEDS[Context::switch_exec_locked, Context::schedule]
1322 Context::switch_exec_schedule_locked (Context *t, enum Helping_mode mode)
1324 if (EXPECT_FALSE(switch_exec_locked(t, mode)))
1330 Context::local_id() const
1337 Context::local_id (Local_id id)
1344 Context::utcb() const
1351 Context::utcb (Utcb *u)
1357 IMPLEMENT inline NEEDS["globals.h"]
1359 Context::Context_member::context()
1360 { return context_of(this); }
1362 IMPLEMENT inline NEEDS["lock_guard.h", "kdb_ke.h"]
1364 Context::Drq_q::enq(Drq *rq)
1366 assert_kdb(cpu_lock.test());
1367 Lock_guard<Inner_lock> guard(q_lock());
1373 Context::do_drq_reply(Drq *r, Drq_q::Drop_mode drop)
1375 state_change_dirty(~Thread_drq_wait, Thread_ready);
1376 // r->state = Drq::Reply_handled;
1377 if (drop == Drq_q::No_drop && r->reply)
1378 return r->reply(r, this, r->arg) & Drq::Need_resched;
1383 IMPLEMENT inline NEEDS[Context::do_drq_reply]
1385 Context::Drq_q::execute_request(Drq *r, Drop_mode drop, bool local)
1387 bool need_resched = false;
1388 Context *const self = context();
1389 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1390 if (r->context() == self)
1392 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1393 Drq_log *l = tbe->payload<Drq_log>();
1395 l->func = (void*)r->func;
1396 l->reply = (void*)r->reply;
1397 l->thread = r->context();
1398 l->target_cpu = current_cpu();
1401 //LOG_MSG_3VAL(current(), "hrP", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1402 return self->do_drq_reply(r, drop);
1406 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1407 Drq_log *l = tbe->payload<Drq_log>();
1408 l->type = "request";
1409 l->func = (void*)r->func;
1410 l->reply = (void*)r->reply;
1411 l->thread = r->context();
1412 l->target_cpu = current_cpu();
1415 // r->state = Drq::Idle;
1416 unsigned answer = 0;
1417 //LOG_MSG_3VAL(current(), "hrq", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1418 if (EXPECT_TRUE(drop == No_drop && r->func))
1419 answer = r->func(r, self, r->arg);
1420 else if (EXPECT_FALSE(drop == Drop))
1421 // flag DRQ abort for requester
1423 // LOG_MSG_3VAL(current(), "hrq-", answer, current()->state() /*(Mword)r->context()*/, (Mword)r->func);
1424 need_resched |= answer & Drq::Need_resched;
1425 //r->state = Drq::Handled;
1428 if (!(answer & Drq::No_answer))
1431 return r->context()->do_drq_reply(r, drop) || need_resched;
1433 need_resched |= r->context()->enqueue_drq(r, Drq::Target_ctxt);
1436 return need_resched;
1439 IMPLEMENT inline NEEDS["mem.h", "lock_guard.h"]
1441 Context::Drq_q::handle_requests(Drop_mode drop)
1443 // printf("CPU[%2u:%p]: > Context::Drq_q::handle_requests() context=%p\n", current_cpu(), current(), context());
1444 bool need_resched = false;
1449 Lock_guard<Inner_lock> guard(q_lock());
1452 return need_resched;
1454 check_kdb (dequeue(qi, Queue_item::Ok));
1457 Drq *r = static_cast<Drq*>(qi);
1458 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1459 need_resched |= execute_request(r, drop, false);
1463 * \biref Forced dequeue from lock wait queue, or DRQ queue.
1467 Context::force_dequeue()
1469 Queue_item *const qi = queue_item();
1473 // we're waiting for a lock or have a DRQ pending
1474 Queue *const q = qi->queue();
1476 Lock_guard<Queue::Inner_lock> guard(q->q_lock());
1477 // check again, with the queue lock held.
1478 // NOTE: we may be already removed from the queue on another CPU
1479 if (qi->queued() && qi->queue())
1481 // we must never be taken from one queue to another on a
1483 assert_kdb(q == qi->queue());
1484 // pull myself out of the queue, mark reason as invalidation
1485 q->dequeue(qi, Queue_item::Invalid);
1492 * \brief Dequeue from lock and DRQ queues, abort pending DRQs
1496 Context::shutdown_queues()
1504 * \brief Check for pending DRQs.
1505 * \return true if there are DRQs pending, false if not.
1509 Context::drq_pending() const
1510 { return _drq_q.first(); }
1514 Context::try_finish_migration()
1516 if (EXPECT_FALSE(_migration_rq.in_progress))
1518 _migration_rq.in_progress = false;
1525 * \brief Handle all pending DRQs.
1526 * \pre cpu_lock.test() (The CPU lock must be held).
1527 * \pre current() == this (only the currently running context is allowed to
1528 * call this function).
1529 * \return true if re-scheduling is needed (ready queue has changed),
1534 Context::handle_drq()
1536 //LOG_MSG_3VAL(this, ">drq", _drq_active, 0, cpu_lock.test());
1537 assert_kdb (current_cpu() == this->cpu());
1538 assert_kdb (cpu_lock.test());
1540 try_finish_migration();
1554 ret |= _drq_q.handle_requests();
1556 Lock_guard<Drq_q::Inner_lock> guard(_drq_q.q_lock());
1557 if (EXPECT_TRUE(!drq_pending()))
1559 state_del_dirty(Thread_drq_ready);
1565 //LOG_MSG_3VAL(this, "xdrq", state(), ret, cpu_lock.test());
1568 * When the context is marked as dead (Thread_dead) then we must not execute
1569 * any usual context code, however DRQ handlers may run.
1571 if (state() & Thread_dead)
1573 // so disable the context after handling all DRQs and flag a reschedule.
1574 state_del_dirty(Thread_ready_mask);
1578 return ret || !(state() & Thread_ready_mask);
1583 * \brief Get the queue item of the context.
1584 * \pre The context must currently not be in any queue.
1585 * \return The queue item of the context.
1587 * The queue item can be used to enqueue the context to a Queue.
1588 * a context must be in at most one queue at a time.
1589 * To figure out the context corresponding to a queue item
1590 * context_of() can be used.
1592 PUBLIC inline NEEDS["kdb_ke.h"]
1594 Context::queue_item()
1600 * \brief DRQ handler for state_change.
1602 * This function basically wraps Context::state_change().
1606 Context::handle_drq_state_change(Drq * /*src*/, Context *self, void * _rq)
1608 State_request *rq = reinterpret_cast<State_request*>(_rq);
1609 self->state_change_dirty(rq->del, rq->add);
1610 //LOG_MSG_3VAL(c, "dsta", c->state(), (Mword)src, (Mword)_rq);
1616 * \brief Queue a DRQ for changing the contexts state.
1617 * \param mask bit mask for the state (state &= mask).
1618 * \param add bits to add to the state (state |= add).
1619 * \note This function is a preemption point.
1621 * This function must be used to change the state of contexts that are
1622 * potentially running on a different CPU.
1624 PUBLIC inline NEEDS[Context::drq]
1626 Context::drq_state_change(Mword mask, Mword add)
1631 drq(handle_drq_state_change, &rq);
1636 * \brief Initiate a DRQ for the context.
1637 * \pre \a src must be the currently running context.
1638 * \param src the source of the DRQ (the context who initiates the DRQ).
1639 * \param func the DRQ handler.
1640 * \param arg the argument for the DRQ handler.
1641 * \param reply the reply handler (called in the context of \a src immediately
1642 * after receiving a successful reply).
1644 * DRQs are requests than any context can queue to any other context. DRQs are
1645 * the basic mechanism to initiate actions on remote CPUs in an MP system,
1646 * however, are also allowed locally.
1647 * DRQ handlers of pending DRQs are executed by Context::handle_drq() in the
1648 * context of the target context. Context::handle_drq() is basically called
1649 * after switching to a context in Context::switch_exec_locked().
1651 * This function enqueues a DRQ and blocks the current context for a reply DRQ.
1653 PUBLIC inline NEEDS[Context::enqueue_drq]
1655 Context::drq(Drq *drq, Drq::Request_func *func, void *arg,
1656 Drq::Request_func *reply = 0,
1657 Drq::Exec_mode exec = Drq::Target_ctxt,
1658 Drq::Wait_mode wait = Drq::Wait)
1660 // printf("CPU[%2u:%p]: > Context::drq(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1661 Context *cur = current();
1662 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1663 Drq_log *l = tbe->payload<Drq_log>();
1665 l->func = (void*)func;
1666 l->reply = (void*)reply;
1668 l->target_cpu = cpu();
1671 //assert_kdb (current() == src);
1672 assert_kdb (!(wait == Drq::Wait && (cur->state() & Thread_drq_ready)) || cur->cpu() == cpu());
1673 assert_kdb (!((wait == Drq::Wait || drq == &_drq) && cur->state() & Thread_drq_wait));
1674 assert_kdb (!drq->queued());
1679 cur->state_add(wait == Drq::Wait ? Thread_drq_wait : 0);
1682 enqueue_drq(drq, exec);
1684 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1685 while (wait == Drq::Wait && cur->state() & Thread_drq_wait)
1687 cur->state_del(Thread_ready_mask);
1691 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1692 Drq_log *l = tbe->payload<Drq_log>();
1694 l->func = (void*)func;
1695 l->reply = (void*)reply;
1697 l->target_cpu = cpu();
1699 //LOG_MSG_3VAL(src, "drq>", src->state(), Mword(this), 0);
1704 Context::kernel_context_drq(Drq::Request_func *func, void *arg,
1705 Drq::Request_func *reply = 0)
1707 char align_buffer[2*sizeof(Drq)];
1708 Drq *mdrq = (Drq*)((Address(align_buffer) + __alignof__(Drq) - 1) & ~(__alignof__(Drq)-1));
1712 mdrq->reply = reply;
1713 Context *kc = kernel_context(current_cpu());
1714 kc->_drq_q.enq(mdrq);
1715 bool resched = schedule_switch_to_locked(kc);
1719 PUBLIC inline NEEDS[Context::drq]
1721 Context::drq(Drq::Request_func *func, void *arg,
1722 Drq::Request_func *reply = 0,
1723 Drq::Exec_mode exec = Drq::Target_ctxt,
1724 Drq::Wait_mode wait = Drq::Wait)
1725 { return drq(¤t()->_drq, func, arg, reply, exec, wait); }
1729 Context::rcu_unblock(Rcu_item *i)
1731 assert_kdb(cpu_lock.test());
1732 Context *const c = static_cast<Context*>(i);
1733 c->state_change_dirty(~Thread_waiting, Thread_ready);
1734 c->sched()->deblock(c->cpu());
1740 Context::recover_jmp_buf(jmp_buf *b)
1741 { _recover_jmpbuf = b; }
1743 //----------------------------------------------------------------------------
1744 IMPLEMENTATION [!mp]:
1751 Context::cpu(bool = false) const
1757 Context::remote_ready_enqueue()
1759 WARN("Context::remote_ready_enqueue(): in UP system !\n");
1760 kdb_ke("Fiasco BUG");
1765 Context::enqueue_drq(Drq *rq, Drq::Exec_mode)
1767 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1768 if (!in_ready_list() && (state() & Thread_ready_mask))
1778 PRIVATE inline NOEXPORT
1780 Context::shutdown_drqs()
1781 { _drq_q.handle_requests(Drq_q::Drop); }
1788 // The UP case does not need to block for the next grace period, because
1789 // the CPU is always in a quiescent state when the interrupts where enabled
1792 PUBLIC static inline
1794 Context::xcpu_tlb_flush()
1798 //----------------------------------------------------------------------------
1802 #include "queue_item.h"
1804 EXTENSION class Context
1808 class Pending_rqq : public Queue
1811 static void enq(Context *c);
1812 bool handle_requests(Context **);
1815 class Pending_rq : public Queue_item, public Context_member
1819 Pending_rq _pending_rq;
1822 static Per_cpu<Pending_rqq> _pending_rqq;
1823 static Per_cpu<Drq_q> _glbl_drq_q;
1829 //----------------------------------------------------------------------------
1830 IMPLEMENTATION [mp]:
1832 #include "globals.h"
1835 #include "lock_guard.h"
1838 Per_cpu<Context::Pending_rqq> DEFINE_PER_CPU Context::_pending_rqq;
1839 Per_cpu<Context::Drq_q> DEFINE_PER_CPU Context::_glbl_drq_q;
1843 * \brief Enqueue the given \a c into its CPUs queue.
1844 * \param c the context to enqueue for DRQ handling.
1846 IMPLEMENT inline NEEDS["globals.h", "lock_guard.h", "kdb_ke.h"]
1848 Context::Pending_rqq::enq(Context *c)
1850 // FIXME: is it safe to do the check without a locked queue, or may
1851 // we loose DRQs then?
1853 //if (!c->_pending_rq.queued())
1855 Queue &q = Context::_pending_rqq.cpu(c->cpu());
1856 Lock_guard<Inner_lock> guard(q.q_lock());
1857 if (c->_pending_rq.queued())
1859 q.enqueue(&c->_pending_rq);
1865 * \brief Wakeup all contexts with pending DRQs.
1867 * This function wakes up all context from the pending queue.
1871 Context::Pending_rqq::handle_requests(Context **mq)
1873 //LOG_MSG_3VAL(current(), "phq", current_cpu(), 0, 0);
1874 // printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() this=%p\n", current_cpu(), current(), this);
1875 bool resched = false;
1876 Context *curr = current();
1881 Lock_guard<Inner_lock> guard(q_lock());
1885 check_kdb (dequeue(qi, Queue_item::Ok));
1887 Context *c = static_cast<Context::Pending_rq *>(qi)->context();
1888 //LOG_MSG_3VAL(c, "pick", c->state(), c->cpu(), current_cpu());
1889 // Drop migrated threads
1890 assert_kdb (EXPECT_FALSE(c->cpu() == current_cpu()));
1892 if (EXPECT_TRUE(c->drq_pending()))
1893 c->state_add(Thread_drq_ready);
1895 if (EXPECT_FALSE(c->_migration_rq.pending))
1899 c->initiate_migration();
1909 c->try_finish_migration();
1911 if (EXPECT_TRUE((c->state() & Thread_ready_mask)))
1913 //printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() dequeded %p(%u)\n", current_cpu(), current(), c, qi->queued());
1914 resched |= c->sched()->deblock(current_cpu(), current()->sched(), false);
1921 Context::global_drq(unsigned cpu, Drq::Request_func *func, void *arg,
1922 Drq::Request_func *reply = 0, bool wait = true)
1924 assert_kdb (this == current());
1930 state_add(wait ? Thread_drq_wait : 0);
1932 _glbl_drq_q.cpu(cpu).enq(&_drq);
1934 Ipi::cpu(cpu).send(Ipi::Global_request);
1936 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1937 while (wait && (state() & Thread_drq_wait))
1939 state_del(Thread_ready_mask);
1947 Context::handle_global_requests()
1949 return _glbl_drq_q.cpu(current_cpu()).handle_requests();
1954 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1956 assert_kdb (cpu_lock.test());
1957 // printf("CPU[%2u:%p]: Context::enqueue_request(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1959 if (cpu() != current_cpu())
1964 // ready cpu again we may've been migrated meanwhile
1965 unsigned cpu = this->cpu();
1968 Queue &q = Context::_pending_rqq.cpu(cpu);
1969 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1972 // migrated between getting the lock and reading the CPU, so the
1973 // new CPU is responsible for executing our request
1974 if (this->cpu() != cpu)
1980 if (!_pending_rq.queued())
1981 q.enqueue(&_pending_rq);
1986 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1987 Ipi::cpu(cpu).send(Ipi::Request);
1991 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1993 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1994 if (!in_ready_list() && (state() & Thread_ready_mask))
2006 PRIVATE inline NOEXPORT
2008 Context::shutdown_drqs()
2010 if (_pending_rq.queued())
2012 Lock_guard<Pending_rqq::Inner_lock> guard(_pending_rq.queue()->q_lock());
2013 if (_pending_rq.queued())
2014 _pending_rq.queue()->dequeue(&_pending_rq, Queue_item::Ok);
2017 _drq_q.handle_requests(Drq_q::Drop);
2023 Context::cpu(bool running = false) const
2031 * Remote helper for doing remote CPU ready enqueue.
2033 * See remote_ready_enqueue().
2037 Context::handle_remote_ready_enqueue(Drq *, Context *self, void *)
2039 self->state_add_dirty(Thread_ready);
2044 PROTECTED inline NEEDS[Context::handle_remote_ready_enqueue]
2046 Context::remote_ready_enqueue()
2047 { drq(&handle_remote_ready_enqueue, 0); }
2052 * Block and wait for the next grace period.
2054 PUBLIC inline NEEDS["cpu_lock.h", "lock_guard.h"]
2058 Lock_guard<Cpu_lock> gurad(&cpu_lock);
2059 state_change_dirty(~Thread_ready, Thread_waiting);
2060 Rcu::call(this, &rcu_unblock);
2068 Context::handle_remote_tlb_flush(Drq *, Context *, void *)
2070 // printf("RCV XCPU_FLUSH (%d)\n", current_cpu());
2071 if (!current()->space())
2074 Mem_space *ms = current()->mem_space();
2075 bool need_flush = ms->need_tlb_flush();
2077 ms->tlb_flush(true);
2085 Context::xcpu_tlb_flush()
2087 //printf("XCPU_ TLB FLUSH\n");
2088 Lock_guard<Cpu_lock> g(&cpu_lock);
2089 unsigned ccpu = current_cpu();
2090 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
2092 if (ccpu != i && Cpu::online(i))
2093 current()->global_drq(i, Context::handle_remote_tlb_flush, 0);
2099 //----------------------------------------------------------------------------
2100 IMPLEMENTATION [fpu && !ux]:
2105 * When switching away from the FPU owner, disable the FPU to cause
2106 * the next FPU access to trap.
2107 * When switching back to the FPU owner, enable the FPU so we don't
2108 * get an FPU trap on FPU access.
2110 IMPLEMENT inline NEEDS ["fpu.h"]
2112 Context::switch_fpu(Context *t)
2114 if (Fpu::is_owner(cpu(), this))
2116 else if (Fpu::is_owner(cpu(), t) && !(t->state() & Thread_vcpu_fpu_disabled))
2120 //----------------------------------------------------------------------------
2121 IMPLEMENTATION [!fpu]:
2125 Context::switch_fpu(Context *)
2128 //----------------------------------------------------------------------------
2129 IMPLEMENTATION [ux]:
2133 Context::boost_idle_prio(unsigned _cpu)
2135 // Boost the prio of the idle thread so that it can actually get some
2136 // CPU and take down the system.
2137 kernel_context(_cpu)->ready_dequeue();
2138 kernel_context(_cpu)->sched()->set_prio(255);
2139 kernel_context(_cpu)->ready_enqueue();
2142 // --------------------------------------------------------------------------
2143 IMPLEMENTATION [debug]:
2145 #include "kobject_dbg.h"
2149 Context::drq_log_fmt(Tb_entry *e, int maxlen, char *buf)
2151 Drq_log *l = e->payload<Drq_log>();
2152 return snprintf(buf, maxlen, "drq %s(%s) to ctxt=%lx/%p (func=%p, reply=%p) cpu=%u",
2153 l->type, l->wait ? "wait" : "no-wait", Kobject_dbg::pointer_to_id(l->thread),
2154 l->thread, l->func, l->reply, l->target_cpu);