6 #include "continuation.h"
11 #include "member_offs.h"
12 #include "per_cpu_data.h"
14 #include "queue_item.h"
16 #include "sched_context.h"
17 #include "spin_lock.h"
30 explicit Context_ptr(unsigned long id) : _t(id) {}
32 Context_ptr(Context_ptr const &o) : _t(o._t) {}
33 Context_ptr const &operator = (Context_ptr const &o)
34 { _t = o._t; return *this; }
36 Kobject_iface *ptr(Space *, unsigned char *) const;
38 bool is_kernel() const { return false; }
39 bool is_valid() const { return _t != ~0UL; }
41 // only for debugging use
42 Mword raw() const { return _t;}
49 template< typename T >
50 class Context_ptr_base : public Context_ptr
53 enum Invalid_type { Invalid };
54 explicit Context_ptr_base(Invalid_type) : Context_ptr(0) {}
55 explicit Context_ptr_base(unsigned long id) : Context_ptr(id) {}
57 Context_ptr_base(Context_ptr_base<T> const &o) : Context_ptr(o) {}
58 template< typename X >
59 Context_ptr_base(Context_ptr_base<X> const &o) : Context_ptr(o)
60 { X*x = 0; T*t = x; (void)t; }
62 Context_ptr_base<T> const &operator = (Context_ptr_base<T> const &o)
63 { Context_ptr::operator = (o); return *this; }
65 template< typename X >
66 Context_ptr_base<T> const &operator = (Context_ptr_base<X> const &o)
67 { X*x=0; T*t=x; (void)t; Context_ptr::operator = (o); return *this; }
69 //T *ptr(Space *s) const { return static_cast<T*>(Context_ptr::ptr(s)); }
75 class Present_list_item : public D_list_item
78 static Spin_lock _plist_lock;
79 static Present_list_item *head;
85 /** An execution context. A context is a runnable, schedulable activity.
86 It carries along some state used by other subsystems: A lock count,
87 and stack-element forward/next pointers.
90 public Global_context_data,
91 private Present_list_item,
95 friend class Jdb_thread_list;
96 friend class Context_ptr;
97 friend class Jdb_utcb;
100 virtual void finish_migration() = 0;
101 virtual void initiate_migration() = 0;
111 * \brief Encapsulate an aggregate of Context.
113 * Allow to get a back reference to the aggregating Context object.
120 * \brief Get the aggregating Context object.
126 * \brief Deffered Request.
128 * Represents a request that can be queued for each Context
129 * and is executed by the target context just after switching to the
132 class Drq : public Queue_item, public Context_member
135 typedef unsigned (Request_func)(Drq *, Context *target, void *);
136 enum { Need_resched = 1, No_answer = 2 };
137 enum Wait_mode { No_wait = 0, Wait = 1 };
138 enum Exec_mode { Target_ctxt = 0, Any_ctxt = 1 };
139 // enum State { Idle = 0, Handled = 1, Reply_handled = 2 };
148 * \brief Queue for deffered requests (Drq).
150 * A FIFO queue each Context aggregates to queue incomming Drq's
151 * that have to be executed directly after switching to a context.
153 class Drq_q : public Queue, public Context_member
156 enum Drop_mode { Drop = true, No_drop = false };
158 bool handle_requests(Drop_mode drop = No_drop);
159 bool execute_request(Drq *r, Drop_mode drop, bool local);
162 struct Migration_info
173 * Definition of different scheduling modes
176 Periodic = 0x1, ///< 0 = Conventional, 1 = Periodic
177 Nonstrict = 0x2, ///< 0 = Strictly Periodic, 1 = Non-strictly periodic
181 * Definition of different helping modes
189 // FIXME: remove this function!
190 Mword is_tcb_mapped() const;
193 * Size of a Context (TCB + kernel stack)
195 static const size_t size = Config::thread_block_size;
198 * Return consumed CPU time.
199 * @return Consumed CPU time in usecs
201 Cpu_time consumed_time();
204 * Get the kernel UTCB pointer.
205 * @return UTCB pointer, or 0 if there is no UTCB
210 * Get the local ID of the context.
212 Local_id local_id() const;
215 * Set the local ID of the context.
216 * Does not touch the kernel UTCB pointer, since
217 * we would need space() to do the address translation.
219 * After setting the local ID and mapping the UTCB area, use
220 * Thread::utcb_init() to set the kernel UTCB pointer and initialize the
223 void local_id (Local_id id);
225 virtual bool kill() = 0;
227 void spill_user_state();
228 void fill_user_state();
232 * Update consumed CPU time during each context switch and when
233 * reading out the current thread's consumed CPU time.
235 void update_consumed_time();
238 * Set the kernel UTCB pointer.
239 * Does NOT keep the value of _local_id in sync.
240 * @see local_id (Local_id id);
249 friend class Jdb_tcb;
251 /// low level page table switching stuff
252 void switchin_context(Context *) asm ("switchin_context_label") FIASCO_FASTCALL;
254 /// low level fpu switching stuff
255 void switch_fpu (Context *t);
257 /// low level cpu switching stuff
258 void switch_cpu (Context *t);
261 Spin_lock_coloc<Space *> _space;
268 // how many locks does this thread hold on other threads
269 // incremented in Thread::lock, decremented in Thread::clear
270 // Thread::kill needs to know
272 Thread_lock * const _thread_lock;
277 // The scheduling parameters. We would only need to keep an
278 // anonymous reference to them as we do not need them ourselves, but
279 // we aggregate them for performance reasons.
280 Sched_context _sched_context;
281 Sched_context * _sched;
285 // Pointer to floating point register state
286 Fpu_state _fpu_state;
287 // Implementation-specific consumed CPU time (TSC ticks or usecs)
288 Clock::Time _consumed_time;
295 // for trigger_exception
296 Continuation _exc_cont;
304 Migration_rq() : pending(false), in_progress(false) {}
308 // XXX Timeout for both, sender and receiver! In normal case we would have
309 // to define own timeouts in Receiver and Sender but because only one
310 // timeout can be set at one time we use the same timeout. The timeout
311 // has to be defined here because Dirq::hit has to be able to reset the
312 // timeout (Irq::_irq_thread is of type Receiver).
314 Spin_lock _affinity_lock;
317 static Per_cpu<Clock> _clock;
318 static Per_cpu<Context *> _kernel_ctxt;
324 #include "tb_entry.h"
326 EXTENSION class Context
351 static unsigned drq_log_fmt(Tb_entry *, int, char *)
352 asm ("__context_drq_log_fmt");
356 // --------------------------------------------------------------------------
362 #include "cpu_lock.h"
363 #include "entry_frame.h"
365 #include "globals.h" // current()
367 #include "lock_guard.h"
370 #include "mem_layout.h"
371 #include "processor.h"
373 #include "std_macros.h"
374 #include "thread_state.h"
378 Per_cpu<Clock> DEFINE_PER_CPU Context::_clock(true);
379 Per_cpu<Context *> DEFINE_PER_CPU Context::_kernel_ctxt;
381 Spin_lock Present_list_item::_plist_lock INIT_PRIORITY(EARLY_INIT_PRIO);
382 Present_list_item *Present_list_item::head;
384 IMPLEMENT inline NEEDS["kdb_ke.h"]
386 Context_ptr::ptr(Space *s, unsigned char *rights) const
388 assert_kdb (cpu_lock.test());
390 return s->obj_space()->lookup_local(_t, rights);
397 /** Initialize a context. After setup, a switch_exec to this context results
398 in a return to user code using the return registers at regs(). The
399 return registers are not initialized however; neither is the space_context
400 to be used in thread switching (use set_space_context() for that).
401 @pre (_kernel_sp == 0) && (* (stack end) == 0)
402 @param thread_lock pointer to lock used to lock this context
403 @param space_context the space context
405 PUBLIC inline NEEDS ["atomic.h", "entry_frame.h", <cstdio>]
406 Context::Context(Thread_lock *thread_lock)
407 : _kernel_sp(reinterpret_cast<Mword*>(regs())),
409 _thread_lock(thread_lock),
411 _sched(&_sched_context),
412 _mode(Sched_mode (0))
414 // NOTE: We do not have to synchronize the initialization of
415 // _space_context because it is constant for all concurrent
416 // invocations of this constructor. When two threads concurrently
417 // try to create a new task, they already synchronize in
418 // sys_task_new() and avoid calling us twice with different
419 // space_context arguments.
421 set_cpu_of(this, current_cpu());
423 Lock_guard<Spin_lock> guard(&Present_list_item::_plist_lock);
424 if (Present_list_item::head)
425 Present_list_item::head->Present_list_item::enqueue(this);
427 Present_list_item::head = this;
433 Context::affinity_lock()
434 { return &_affinity_lock; }
440 // If this context owned the FPU, noone owns it now
441 if (Fpu::is_owner(cpu(), this))
443 Fpu::set_owner(cpu(), 0);
453 Lock_guard<Spin_lock> guard(&Present_list_item::_plist_lock);
454 if (this == Present_list_item::head)
456 if (Present_list_item::next() != this)
457 Present_list_item::head = static_cast<Present_list_item*>(Present_list_item::next());
460 Present_list_item::head = 0;
465 Present_list_item::dequeue();
472 Context::state(bool check = true) const
475 assert_2_kdb(!check || cpu() == current_cpu());
481 Context::is_tcb_mapped() const
487 Context::kernel_context(unsigned cpu)
488 { return _kernel_ctxt.cpu(cpu); }
490 PROTECTED static inline
492 Context::kernel_context(unsigned cpu, Context *ctxt)
493 { _kernel_ctxt.cpu(cpu) = ctxt; }
496 /** @name State manipulation */
502 * Does the context exist? .
503 * @return true if this context has been initialized.
505 PUBLIC inline NEEDS ["thread_state.h"]
507 Context::exists() const
509 return state() != Thread_invalid;
513 * Is the context about to be deleted.
514 * @return true if this context is in deletion.
516 PUBLIC inline NEEDS ["thread_state.h"]
518 Context::is_invalid() const
519 { return state() == Thread_invalid; }
522 * Atomically add bits to state flags.
523 * @param bits bits to be added to state flags
524 * @return 1 if none of the bits that were added had been set before
526 PUBLIC inline NEEDS ["atomic.h"]
528 Context::state_add (Mword const bits)
530 assert_2_kdb(cpu() == current_cpu());
531 atomic_or (&_state, bits);
535 * Add bits in state flags. Unsafe (non-atomic) and
536 * fast version -- you must hold the kernel lock when you use it.
537 * @pre cpu_lock.test() == true
538 * @param bits bits to be added to state flags
542 Context::state_add_dirty (Mword bits)
544 assert_2_kdb(cpu() == current_cpu());
549 * Atomically delete bits from state flags.
550 * @param bits bits to be removed from state flags
551 * @return 1 if all of the bits that were removed had previously been set
553 PUBLIC inline NEEDS ["atomic.h"]
555 Context::state_del (Mword const bits)
557 assert_2_kdb (current_cpu() == cpu());
558 atomic_and (&_state, ~bits);
562 * Delete bits in state flags. Unsafe (non-atomic) and
563 * fast version -- you must hold the kernel lock when you use it.
564 * @pre cpu_lock.test() == true
565 * @param bits bits to be removed from state flags
569 Context::state_del_dirty (Mword bits, bool check = true)
572 assert_2_kdb(!check || cpu() == current_cpu());
577 * Atomically delete and add bits in state flags, provided the
578 * following rules apply (otherwise state is not changed at all):
579 * - Bits that are to be set must be clear in state or clear in mask
580 * - Bits that are to be cleared must be set in state
581 * @param mask Bits not set in mask shall be deleted from state flags
582 * @param bits Bits to be added to state flags
583 * @return 1 if state was changed, 0 otherwise
585 PUBLIC inline NEEDS ["atomic.h"]
587 Context::state_change_safely (Mword const mask, Mword const bits)
589 assert_2_kdb (current_cpu() == cpu());
595 if (old & bits & mask | ~old & ~mask)
598 while (!cas (&_state, old, old & mask | bits));
604 * Atomically delete and add bits in state flags.
605 * @param mask bits not set in mask shall be deleted from state flags
606 * @param bits bits to be added to state flags
608 PUBLIC inline NEEDS ["atomic.h"]
610 Context::state_change (Mword const mask, Mword const bits)
612 assert_2_kdb (current_cpu() == cpu());
613 return atomic_change (&_state, mask, bits);
617 * Delete and add bits in state flags. Unsafe (non-atomic) and
618 * fast version -- you must hold the kernel lock when you use it.
619 * @pre cpu_lock.test() == true
620 * @param mask Bits not set in mask shall be deleted from state flags
621 * @param bits Bits to be added to state flags
625 Context::state_change_dirty (Mword const mask, Mword const bits, bool check = true)
628 assert_2_kdb(!check || cpu() == current_cpu());
636 /** Return the space context.
637 @return space context used for this execution context.
638 Set with set_space_context().
640 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h"]
642 Context::space() const
644 //assert_kdb (cpu_lock.test());
645 return _space.get_unused();
648 PUBLIC inline NEEDS[Context::space, Context::vcpu_user_space]
650 Context::vcpu_aware_space() const
652 if (EXPECT_FALSE(state() & Thread_vcpu_user_mode))
653 return vcpu_user_space();
658 /** Convenience function: Return memory space. */
659 PUBLIC inline NEEDS["space.h"]
661 Context::mem_space() const
663 return space()->mem_space();
667 @return the thread lock used to lock this context.
671 Context::thread_lock() const
677 /** Registers used when iret'ing to user mode.
678 @return return registers
680 PUBLIC inline NEEDS["cpu.h", "entry_frame.h"]
682 Context::regs() const
684 return reinterpret_cast<Entry_frame *>
685 (Cpu::stack_align(reinterpret_cast<Mword>(this) + size)) - 1;
688 /** @name Lock counting
689 These functions count the number of locks
690 this context holds. A context must not be deleted if its lock
695 /** Increment lock count.
696 @post lock_cnt() > 0 */
699 Context::inc_lock_cnt()
704 /** Decrement lock count.
709 Context::dec_lock_cnt()
719 Context::lock_cnt() const
727 * Switch active timeslice of this Context.
728 * @param next Sched_context to switch to
732 Context::switch_sched(Sched_context * const next)
734 // Ensure CPU lock protection
735 assert_kdb (cpu_lock.test());
737 // If we're leaving the global timeslice, invalidate it
738 // This causes schedule() to select a new timeslice via set_current_sched()
739 if (sched() == current_sched())
742 // Ensure the new timeslice has a full quantum
743 assert_kdb (next->left() == next->quantum());
753 * Select a different context for running and activate it.
759 Lock_guard <Cpu_lock> guard (&cpu_lock);
763 // Ensure only the current thread calls schedule
764 assert_kdb (this == current());
765 assert_kdb (!_drq_active);
767 unsigned current_cpu = ~0U;
768 Sched_context::Ready_queue *rq = 0;
770 // Enqueue current thread into ready-list to schedule correctly
773 // Select a thread for scheduling.
774 Context *next_to_run;
778 // I may've been migrated during the switch_exec_locked in the while
779 // statement below. So cxheck out if I've to use a new ready queue.
780 if (cpu() != current_cpu)
783 rq = &Sched_context::rq(current_cpu);
784 if (rq->schedule_in_progress)
786 // Nested invocations of schedule() are bugs
787 assert_kdb (!rq->schedule_in_progress);
792 next_to_run = rq->next_to_run()->context();
794 // Ensure ready-list sanity
795 assert_kdb (next_to_run);
797 if (EXPECT_TRUE (next_to_run->state() & Thread_ready_mask))
800 next_to_run->ready_dequeue();
802 rq->schedule_in_progress = this;
808 // check if we've been migrated meanwhile
809 if (EXPECT_FALSE(current_cpu != cpu()))
812 rq = &Sched_context::rq(current_cpu);
813 if (rq->schedule_in_progress)
817 rq->schedule_in_progress = 0;
820 while (EXPECT_FALSE(schedule_switch_to_locked (next_to_run)));
824 * Return if there is currently a schedule() in progress
828 Context::schedule_in_progress()
830 return sched()->schedule_in_progress(cpu());
836 Context::reset_schedule_in_progress()
837 { sched()->reset_schedule_in_progress(cpu()); }
841 * Return true if s can preempt the current scheduling context, false otherwise
843 PUBLIC static inline NEEDS ["globals.h"]
845 Context::can_preempt_current (Sched_context const *s)
847 assert_kdb (current_cpu() == s->owner()->cpu());
848 return current()->sched()->can_preempt_current(s);
852 * Return currently active global Sched_context.
856 Context::current_sched()
858 return Sched_context::rq(current_cpu()).current_sched();
862 * Set currently active global Sched_context.
866 Context::set_current_sched(Sched_context *sched)
869 // Save remainder of previous timeslice or refresh it, unless it had
871 unsigned cpu = this->cpu();
872 Sched_context::Ready_queue &rq = Sched_context::rq(cpu);
874 Timeout * const tt = timeslice_timeout.cpu(cpu);
875 Unsigned64 clock = Timer::system_clock();
876 if (Sched_context *s = rq.current_sched())
878 Signed64 left = tt->get_timeout(clock);
887 // Program new end-of-timeslice timeout
889 tt->set(clock + sched->left(), cpu);
891 // Make this timeslice current
894 LOG_SCHED_LOAD(sched);
898 * Invalidate (expire) currently active global Sched_context.
900 PROTECTED inline NEEDS["logdefs.h","timeout.h"]
902 Context::invalidate_sched()
904 //LOG_SCHED_INVALIDATE;
905 sched()->invalidate_sched(cpu());
909 * Return Context's Sched_context with id 'id'; return time slice 0 as default.
910 * @return Sched_context with id 'id' or 0
914 Context::sched_context(unsigned short const id = 0) const
916 if (EXPECT_TRUE (!id))
917 return const_cast<Sched_context*>(&_sched_context);
919 for (Sched_context *tmp = _sched_context.next();
920 tmp != &_sched_context; tmp = tmp->next())
928 * Return Context's currently active Sched_context.
929 * @return Active Sched_context
933 Context::sched() const
939 * Set Context's currently active Sched_context.
940 * @param sched Sched_context to be activated
944 Context::set_sched (Sched_context * const sched)
950 * Return Context's real-time period length.
951 * @return Period length in usecs
955 Context::period() const
961 * Set Context's real-time period length.
962 * @param period New period length in usecs
966 Context::set_period (Unsigned64 const period)
972 * Return Context's scheduling mode.
973 * @return Scheduling mode
977 Context::mode() const
983 * Set Context's scheduling mode.
984 * @param mode New scheduling mode
988 Context::set_mode (Context::Sched_mode const mode)
995 // XXX for now, synchronize with global kernel lock
999 * Enqueue current() if ready to fix up ready-list invariant.
1001 PRIVATE inline NOEXPORT
1003 Context::update_ready_list()
1005 assert_kdb (this == current());
1007 if (state() & Thread_ready_mask)
1012 * Check if Context is in ready-list.
1013 * @return 1 if thread is in ready-list, 0 otherwise
1017 Context::in_ready_list() const
1019 return sched()->in_ready_list();
1023 * Enqueue context in ready-list.
1027 Context::ready_enqueue()
1029 assert_kdb(current_cpu() == cpu());
1030 //Lock_guard <Cpu_lock> guard (&cpu_lock);
1032 // Don't enqueue threads that are not ready or have no own time
1033 if (EXPECT_FALSE (!(state() & Thread_ready_mask) || !sched()->left()))
1036 sched()->ready_enqueue(cpu());
1041 * \brief Activate a newly created thread.
1043 * This function sets a new thread onto the ready list and switches to
1044 * the thread if it can preempt the currently running thread.
1050 Lock_guard <Cpu_lock> guard (&cpu_lock);
1051 if (cpu() == current_cpu())
1053 state_add_dirty(Thread_ready);
1054 if (sched()->deblock(cpu(), current()->sched(), true))
1056 current()->switch_to_locked(this);
1061 remote_ready_enqueue();
1067 * Remove context from ready-list.
1069 PUBLIC inline NEEDS ["cpu_lock.h", "lock_guard.h", "std_macros.h"]
1071 Context::ready_dequeue()
1073 assert_kdb(current_cpu() == cpu());
1074 sched()->ready_dequeue();
1077 /** Helper. Context that helps us by donating its time to us. It is
1078 set by switch_exec() if the calling thread says so.
1079 @return context that helps us and should be activated after freeing a lock.
1083 Context::helper() const
1090 Context::set_helper (enum Helping_mode const mode)
1095 _helper = current();
1100 case Ignore_Helping:
1101 // don't change _helper value
1106 /** Donatee. Context that receives our time slices, for example
1107 because it has locked us.
1108 @return context that should be activated instead of us when we're
1113 Context::donatee() const
1120 Context::set_donatee (Context * const donatee)
1127 Context::get_kernel_sp() const
1134 Context::set_kernel_sp (Mword * const esp)
1141 Context::fpu_state()
1147 * Add to consumed CPU time.
1148 * @param quantum Implementation-specific time quantum (TSC ticks or usecs)
1152 Context::consume_time(Clock::Time quantum)
1154 _consumed_time += quantum;
1158 * Update consumed CPU time during each context switch and when
1159 * reading out the current thread's consumed CPU time.
1161 IMPLEMENT inline NEEDS ["cpu.h"]
1163 Context::update_consumed_time()
1165 if (Config::fine_grained_cputime)
1166 consume_time (_clock.cpu(cpu()).delta());
1169 IMPLEMENT inline NEEDS ["config.h", "cpu.h"]
1171 Context::consumed_time()
1173 if (Config::fine_grained_cputime)
1174 return _clock.cpu(cpu()).us(_consumed_time);
1176 return _consumed_time;
1180 * Switch to scheduling context and execution context while not running under
1183 PUBLIC inline NEEDS [<cassert>]
1185 Context::switch_to (Context *t)
1187 // Call switch_to_locked if CPU lock is already held
1188 assert (!cpu_lock.test());
1190 // Grab the CPU lock
1191 Lock_guard <Cpu_lock> guard (&cpu_lock);
1193 switch_to_locked (t);
1197 * Switch scheduling context and execution context.
1198 * @param t Destination thread whose scheduling context and execution context
1199 * should be activated.
1201 PRIVATE inline NEEDS ["kdb_ke.h"]
1202 bool FIASCO_WARN_RESULT
1203 Context::schedule_switch_to_locked(Context *t)
1205 // Must be called with CPU lock held
1206 assert_kdb (cpu_lock.test());
1208 // Switch to destination thread's scheduling context
1209 if (current_sched() != t->sched())
1210 set_current_sched(t->sched());
1212 // XXX: IPC dependency tracking belongs here.
1214 // Switch to destination thread's execution context, no helping involved
1216 return switch_exec_locked(t, Not_Helping);
1218 return handle_drq();
1221 PUBLIC inline NEEDS [Context::schedule_switch_to_locked]
1223 Context::switch_to_locked(Context *t)
1225 if (EXPECT_FALSE(schedule_switch_to_locked(t)))
1231 * Switch execution context while not running under CPU lock.
1233 PUBLIC inline NEEDS ["kdb_ke.h"]
1234 bool FIASCO_WARN_RESULT
1235 Context::switch_exec (Context *t, enum Helping_mode mode)
1237 // Call switch_exec_locked if CPU lock is already held
1238 assert_kdb (!cpu_lock.test());
1240 // Grab the CPU lock
1241 Lock_guard <Cpu_lock> guard (&cpu_lock);
1243 return switch_exec_locked (t, mode);
1247 * Switch to a specific different execution context.
1248 * If that context is currently locked, switch to its locker instead
1249 * (except if current() is the locker)
1250 * @pre current() == this && current() != t
1251 * @param t thread that shall be activated.
1252 * @param mode helping mode; we either help, don't help or leave the
1253 * helping state unchanged
1256 bool FIASCO_WARN_RESULT //L4_IPC_CODE
1257 Context::switch_exec_locked (Context *t, enum Helping_mode mode)
1259 // Must be called with CPU lock held
1260 assert_kdb (cpu_lock.test());
1261 if (t->cpu() != current_cpu()){ printf("%p => %p\n", this, t); kdb_ke("ass"); } assert_kdb (t->cpu() == current_cpu());
1262 assert_kdb (current() != t);
1263 assert_kdb (current() == this);
1264 assert_kdb (timeslice_timeout.cpu(cpu())->is_set()); // Coma check
1267 Context *t_orig = t;
1270 // Time-slice lending: if t is locked, switch to its locker
1271 // instead, this is transitive
1272 while (t->donatee() && // target thread locked
1273 t->donatee() != t) // not by itself
1275 // Special case for Thread::kill(): If the locker is
1276 // current(), switch to the locked thread to allow it to
1277 // release other locks. Do this only when the target thread
1278 // actually owns locks.
1279 if (t->donatee() == current())
1281 if (t->lock_cnt() > 0)
1284 return handle_drq();
1293 // Can only switch to ready threads!
1294 if (EXPECT_FALSE (!(t->state() & Thread_ready_mask)))
1296 assert_kdb (state() & Thread_ready_mask);
1301 // Ensure kernel stack pointer is non-null if thread is ready
1302 assert_kdb (t->_kernel_sp);
1304 t->set_helper (mode);
1306 update_ready_list();
1307 assert_kdb (!(state() & Thread_ready_mask) || !sched()->left()
1308 || in_ready_list());
1313 return handle_drq();
1316 PUBLIC inline NEEDS[Context::switch_exec_locked, Context::schedule]
1318 Context::switch_exec_schedule_locked (Context *t, enum Helping_mode mode)
1320 if (EXPECT_FALSE(switch_exec_locked(t, mode)))
1326 Context::local_id() const
1333 Context::local_id (Local_id id)
1340 Context::utcb() const
1347 Context::utcb (Utcb *u)
1353 IMPLEMENT inline NEEDS["globals.h"]
1355 Context::Context_member::context()
1356 { return context_of(this); }
1358 IMPLEMENT inline NEEDS["lock_guard.h", "kdb_ke.h"]
1360 Context::Drq_q::enq(Drq *rq)
1362 assert_kdb(cpu_lock.test());
1363 Lock_guard<Inner_lock> guard(q_lock());
1369 Context::do_drq_reply(Drq *r, Drq_q::Drop_mode drop)
1371 state_change_dirty(~Thread_drq_wait, Thread_ready);
1372 // r->state = Drq::Reply_handled;
1373 if (drop == Drq_q::No_drop && r->reply)
1374 return r->reply(r, this, r->arg) & Drq::Need_resched;
1379 IMPLEMENT inline NEEDS[Context::do_drq_reply]
1381 Context::Drq_q::execute_request(Drq *r, Drop_mode drop, bool local)
1383 bool need_resched = false;
1384 Context *const self = context();
1385 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1386 if (r->context() == self)
1388 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1389 Drq_log *l = tbe->payload<Drq_log>();
1391 l->func = (void*)r->func;
1392 l->reply = (void*)r->reply;
1393 l->thread = r->context();
1394 l->target_cpu = current_cpu();
1397 //LOG_MSG_3VAL(current(), "hrP", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1398 return self->do_drq_reply(r, drop);
1402 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1403 Drq_log *l = tbe->payload<Drq_log>();
1404 l->type = "request";
1405 l->func = (void*)r->func;
1406 l->reply = (void*)r->reply;
1407 l->thread = r->context();
1408 l->target_cpu = current_cpu();
1411 // r->state = Drq::Idle;
1412 unsigned answer = 0;
1413 //LOG_MSG_3VAL(current(), "hrq", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1414 if (EXPECT_TRUE(drop == No_drop && r->func))
1415 answer = r->func(r, self, r->arg);
1416 else if (EXPECT_FALSE(drop == Drop))
1417 // flag DRQ abort for requester
1419 // LOG_MSG_3VAL(current(), "hrq-", answer, current()->state() /*(Mword)r->context()*/, (Mword)r->func);
1420 need_resched |= answer & Drq::Need_resched;
1421 //r->state = Drq::Handled;
1424 if (!(answer & Drq::No_answer))
1427 return r->context()->do_drq_reply(r, drop) || need_resched;
1429 need_resched |= r->context()->enqueue_drq(r, Drq::Target_ctxt);
1432 return need_resched;
1435 IMPLEMENT inline NEEDS["mem.h", "lock_guard.h"]
1437 Context::Drq_q::handle_requests(Drop_mode drop)
1439 // printf("CPU[%2u:%p]: > Context::Drq_q::handle_requests() context=%p\n", current_cpu(), current(), context());
1440 bool need_resched = false;
1445 Lock_guard<Inner_lock> guard(q_lock());
1448 return need_resched;
1450 check_kdb (dequeue(qi, Queue_item::Ok));
1453 Drq *r = static_cast<Drq*>(qi);
1454 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1455 need_resched |= execute_request(r, drop, false);
1459 * \biref Forced dequeue from lock wait queue, or DRQ queue.
1463 Context::force_dequeue()
1465 Queue_item *const qi = queue_item();
1469 // we're waiting for a lock or have a DRQ pending
1470 Queue *const q = qi->queue();
1472 Lock_guard<Queue::Inner_lock> guard(q->q_lock());
1473 // check again, with the queue lock held.
1474 // NOTE: we may be already removed from the queue on another CPU
1475 if (qi->queued() && qi->queue())
1477 // we must never be taken from one queue to another on a
1479 assert_kdb(q == qi->queue());
1480 // pull myself out of the queue, mark reason as invalidation
1481 q->dequeue(qi, Queue_item::Invalid);
1488 * \brief Dequeue from lock and DRQ queues, abort pending DRQs
1492 Context::shutdown_queues()
1500 * \brief Check for pending DRQs.
1501 * \return true if there are DRQs pending, false if not.
1505 Context::drq_pending() const
1506 { return _drq_q.first(); }
1510 Context::try_finish_migration()
1512 if (EXPECT_FALSE(_migration_rq.in_progress))
1514 _migration_rq.in_progress = false;
1521 * \brief Handle all pending DRQs.
1522 * \pre cpu_lock.test() (The CPU lock must be held).
1523 * \pre current() == this (only the currently running context is allowed to
1524 * call this function).
1525 * \return true if re-scheduling is needed (ready queue has changed),
1530 Context::handle_drq()
1532 //LOG_MSG_3VAL(this, ">drq", _drq_active, 0, cpu_lock.test());
1533 assert_kdb (current_cpu() == this->cpu());
1534 assert_kdb (cpu_lock.test());
1536 try_finish_migration();
1550 ret |= _drq_q.handle_requests();
1552 Lock_guard<Drq_q::Inner_lock> guard(_drq_q.q_lock());
1553 if (EXPECT_TRUE(!drq_pending()))
1555 state_del_dirty(Thread_drq_ready);
1561 //LOG_MSG_3VAL(this, "xdrq", state(), ret, cpu_lock.test());
1564 * When the context is marked as dead (Thread_dead) then we must not execute
1565 * any usual context code, however DRQ handlers may run.
1567 if (state() & Thread_dead)
1569 // so disable the context after handling all DRQs and flag a reschedule.
1570 state_del_dirty(Thread_ready_mask);
1574 return ret || !(state() & Thread_ready_mask);
1579 * \brief Get the queue item of the context.
1580 * \pre The context must currently not be in any queue.
1581 * \return The queue item of the context.
1583 * The queue item can be used to enqueue the context to a Queue.
1584 * a context must be in at most one queue at a time.
1585 * To figure out the context corresponding to a queue item
1586 * context_of() can be used.
1588 PUBLIC inline NEEDS["kdb_ke.h"]
1590 Context::queue_item()
1596 * \brief DRQ handler for state_change.
1598 * This function basically wraps Context::state_change().
1602 Context::handle_drq_state_change(Drq * /*src*/, Context *self, void * _rq)
1604 State_request *rq = reinterpret_cast<State_request*>(_rq);
1605 self->state_change_dirty(rq->del, rq->add);
1606 //LOG_MSG_3VAL(c, "dsta", c->state(), (Mword)src, (Mword)_rq);
1612 * \brief Queue a DRQ for changing the contexts state.
1613 * \param mask bit mask for the state (state &= mask).
1614 * \param add bits to add to the state (state |= add).
1615 * \note This function is a preemption point.
1617 * This function must be used to change the state of contexts that are
1618 * potentially running on a different CPU.
1620 PUBLIC inline NEEDS[Context::drq]
1622 Context::drq_state_change(Mword mask, Mword add)
1627 drq(handle_drq_state_change, &rq);
1632 * \brief Initiate a DRQ for the context.
1633 * \pre \a src must be the currently running context.
1634 * \param src the source of the DRQ (the context who initiates the DRQ).
1635 * \param func the DRQ handler.
1636 * \param arg the argument for the DRQ handler.
1637 * \param reply the reply handler (called in the context of \a src immediately
1638 * after receiving a successful reply).
1640 * DRQs are requests than any context can queue to any other context. DRQs are
1641 * the basic mechanism to initiate actions on remote CPUs in an MP system,
1642 * however, are also allowed locally.
1643 * DRQ handlers of pending DRQs are executed by Context::handle_drq() in the
1644 * context of the target context. Context::handle_drq() is basically called
1645 * after switching to a context in Context::switch_exec_locked().
1647 * This function enqueues a DRQ and blocks the current context for a reply DRQ.
1649 PUBLIC inline NEEDS[Context::enqueue_drq]
1651 Context::drq(Drq *drq, Drq::Request_func *func, void *arg,
1652 Drq::Request_func *reply = 0,
1653 Drq::Exec_mode exec = Drq::Target_ctxt,
1654 Drq::Wait_mode wait = Drq::Wait)
1656 // printf("CPU[%2u:%p]: > Context::drq(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1657 Context *cur = current();
1658 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1659 Drq_log *l = tbe->payload<Drq_log>();
1661 l->func = (void*)func;
1662 l->reply = (void*)reply;
1664 l->target_cpu = cpu();
1667 //assert_kdb (current() == src);
1668 assert_kdb (!(wait == Drq::Wait && (cur->state() & Thread_drq_ready)) || cur->cpu() == cpu());
1669 assert_kdb (!((wait == Drq::Wait || drq == &_drq) && cur->state() & Thread_drq_wait));
1670 assert_kdb (!drq->queued());
1675 cur->state_add(wait == Drq::Wait ? Thread_drq_wait : 0);
1678 enqueue_drq(drq, exec);
1680 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1681 while (wait == Drq::Wait && cur->state() & Thread_drq_wait)
1683 cur->state_del(Thread_ready_mask);
1687 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1688 Drq_log *l = tbe->payload<Drq_log>();
1690 l->func = (void*)func;
1691 l->reply = (void*)reply;
1693 l->target_cpu = cpu();
1695 //LOG_MSG_3VAL(src, "drq>", src->state(), Mword(this), 0);
1700 Context::kernel_context_drq(Drq::Request_func *func, void *arg,
1701 Drq::Request_func *reply = 0)
1703 char align_buffer[2*sizeof(Drq)];
1704 Drq *mdrq = (Drq*)((Address(align_buffer) + __alignof__(Drq) - 1) & ~(__alignof__(Drq)-1));
1708 mdrq->reply = reply;
1709 Context *kc = kernel_context(current_cpu());
1710 kc->_drq_q.enq(mdrq);
1711 bool resched = schedule_switch_to_locked(kc);
1715 PUBLIC inline NEEDS[Context::drq]
1717 Context::drq(Drq::Request_func *func, void *arg,
1718 Drq::Request_func *reply = 0,
1719 Drq::Exec_mode exec = Drq::Target_ctxt,
1720 Drq::Wait_mode wait = Drq::Wait)
1721 { return drq(¤t()->_drq, func, arg, reply, exec, wait); }
1725 Context::rcu_unblock(Rcu_item *i)
1727 assert_kdb(cpu_lock.test());
1728 Context *const c = static_cast<Context*>(i);
1729 c->state_change_dirty(~Thread_waiting, Thread_ready);
1730 c->sched()->deblock(c->cpu());
1735 //----------------------------------------------------------------------------
1736 IMPLEMENTATION [!mp]:
1743 Context::cpu(bool = false) const
1749 Context::remote_ready_enqueue()
1751 WARN("Context::remote_ready_enqueue(): in UP system !\n");
1752 kdb_ke("Fiasco BUG");
1757 Context::enqueue_drq(Drq *rq, Drq::Exec_mode)
1759 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1760 if (!in_ready_list() && (state() & Thread_ready_mask))
1770 PRIVATE inline NOEXPORT
1772 Context::shutdown_drqs()
1773 { _drq_q.handle_requests(Drq_q::Drop); }
1780 // The UP case does not need to block for the next grace period, because
1781 // the CPU is always in a quiescent state when the interrupts where enabled
1784 PUBLIC static inline
1786 Context::xcpu_tlb_flush()
1790 //----------------------------------------------------------------------------
1794 #include "queue_item.h"
1796 EXTENSION class Context
1800 class Pending_rqq : public Queue
1803 static void enq(Context *c);
1804 bool handle_requests(Context **);
1807 class Pending_rq : public Queue_item, public Context_member
1811 Pending_rq _pending_rq;
1814 static Per_cpu<Pending_rqq> _pending_rqq;
1815 static Per_cpu<Drq_q> _glbl_drq_q;
1821 //----------------------------------------------------------------------------
1822 IMPLEMENTATION [mp]:
1824 #include "globals.h"
1827 #include "lock_guard.h"
1830 Per_cpu<Context::Pending_rqq> DEFINE_PER_CPU Context::_pending_rqq;
1831 Per_cpu<Context::Drq_q> DEFINE_PER_CPU Context::_glbl_drq_q;
1835 * \brief Enqueue the given \a c into its CPUs queue.
1836 * \param c the context to enqueue for DRQ handling.
1838 IMPLEMENT inline NEEDS["globals.h", "lock_guard.h", "kdb_ke.h"]
1840 Context::Pending_rqq::enq(Context *c)
1842 // FIXME: is it safe to do the check without a locked queue, or may
1843 // we loose DRQs then?
1845 //if (!c->_pending_rq.queued())
1847 Queue &q = Context::_pending_rqq.cpu(c->cpu());
1848 Lock_guard<Inner_lock> guard(q.q_lock());
1849 if (c->_pending_rq.queued())
1851 q.enqueue(&c->_pending_rq);
1857 * \brief Wakeup all contexts with pending DRQs.
1859 * This function wakes up all context from the pending queue.
1863 Context::Pending_rqq::handle_requests(Context **mq)
1865 //LOG_MSG_3VAL(current(), "phq", current_cpu(), 0, 0);
1866 // printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() this=%p\n", current_cpu(), current(), this);
1867 bool resched = false;
1868 Context *curr = current();
1873 Lock_guard<Inner_lock> guard(q_lock());
1877 check_kdb (dequeue(qi, Queue_item::Ok));
1879 Context *c = static_cast<Context::Pending_rq *>(qi)->context();
1880 //LOG_MSG_3VAL(c, "pick", c->state(), c->cpu(), current_cpu());
1881 // Drop migrated threads
1882 assert_kdb (EXPECT_FALSE(c->cpu() == current_cpu()));
1884 if (EXPECT_TRUE(c->drq_pending()))
1885 c->state_add(Thread_drq_ready);
1887 if (EXPECT_FALSE(c->_migration_rq.pending))
1891 c->initiate_migration();
1901 c->try_finish_migration();
1903 if (EXPECT_TRUE((c->state() & Thread_ready_mask)))
1905 //printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() dequeded %p(%u)\n", current_cpu(), current(), c, qi->queued());
1906 resched |= c->sched()->deblock(current_cpu(), current()->sched(), false);
1913 Context::global_drq(unsigned cpu, Drq::Request_func *func, void *arg,
1914 Drq::Request_func *reply = 0, bool wait = true)
1916 assert_kdb (this == current());
1922 state_add(wait ? Thread_drq_wait : 0);
1924 _glbl_drq_q.cpu(cpu).enq(&_drq);
1926 Ipi::cpu(cpu).send(Ipi::Global_request);
1928 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1929 while (wait && (state() & Thread_drq_wait))
1931 state_del(Thread_ready_mask);
1939 Context::handle_global_requests()
1941 return _glbl_drq_q.cpu(current_cpu()).handle_requests();
1946 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1948 assert_kdb (cpu_lock.test());
1949 // printf("CPU[%2u:%p]: Context::enqueue_request(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1951 if (cpu() != current_cpu())
1956 // ready cpu again we may've been migrated meanwhile
1957 unsigned cpu = this->cpu();
1960 Queue &q = Context::_pending_rqq.cpu(cpu);
1961 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1964 // migrated between getting the lock and reading the CPU, so the
1965 // new CPU is responsible for executing our request
1966 if (this->cpu() != cpu)
1972 if (!_pending_rq.queued())
1973 q.enqueue(&_pending_rq);
1978 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1979 Ipi::cpu(cpu).send(Ipi::Request);
1983 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1985 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1986 if (!in_ready_list() && (state() & Thread_ready_mask))
1998 PRIVATE inline NOEXPORT
2000 Context::shutdown_drqs()
2002 if (_pending_rq.queued())
2004 Lock_guard<Pending_rqq::Inner_lock> guard(_pending_rq.queue()->q_lock());
2005 if (_pending_rq.queued())
2006 _pending_rq.queue()->dequeue(&_pending_rq, Queue_item::Ok);
2009 _drq_q.handle_requests(Drq_q::Drop);
2015 Context::cpu(bool running = false) const
2023 * Remote helper for doing remote CPU ready enqueue.
2025 * See remote_ready_enqueue().
2029 Context::handle_remote_ready_enqueue(Drq *, Context *self, void *)
2031 self->state_add_dirty(Thread_ready);
2036 PROTECTED inline NEEDS[Context::handle_remote_ready_enqueue]
2038 Context::remote_ready_enqueue()
2039 { drq(&handle_remote_ready_enqueue, 0); }
2044 * Block and wait for the next grace period.
2046 PUBLIC inline NEEDS["cpu_lock.h", "lock_guard.h"]
2050 Lock_guard<Cpu_lock> gurad(&cpu_lock);
2051 state_change_dirty(~Thread_ready, Thread_waiting);
2052 Rcu::call(this, &rcu_unblock);
2060 Context::handle_remote_tlb_flush(Drq *, Context *, void *)
2062 // printf("RCV XCPU_FLUSH (%d)\n", current_cpu());
2063 if (!current()->space())
2066 Mem_space *ms = current()->mem_space();
2067 bool need_flush = ms->need_tlb_flush();
2069 ms->tlb_flush(true);
2077 Context::xcpu_tlb_flush()
2079 //printf("XCPU_ TLB FLUSH\n");
2080 Lock_guard<Cpu_lock> g(&cpu_lock);
2081 unsigned ccpu = current_cpu();
2082 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
2084 if (ccpu != i && Cpu::online(i))
2085 current()->global_drq(i, Context::handle_remote_tlb_flush, 0);
2091 //----------------------------------------------------------------------------
2092 IMPLEMENTATION [fpu && !ux]:
2097 * When switching away from the FPU owner, disable the FPU to cause
2098 * the next FPU access to trap.
2099 * When switching back to the FPU owner, enable the FPU so we don't
2100 * get an FPU trap on FPU access.
2102 IMPLEMENT inline NEEDS ["fpu.h"]
2104 Context::switch_fpu(Context *t)
2106 if (Fpu::is_owner(cpu(), this))
2108 else if (Fpu::is_owner(cpu(), t) && !(t->state() & Thread_vcpu_fpu_disabled))
2112 //----------------------------------------------------------------------------
2113 IMPLEMENTATION [!fpu]:
2117 Context::switch_fpu(Context *)
2120 //----------------------------------------------------------------------------
2121 IMPLEMENTATION [ux]:
2125 Context::boost_idle_prio(unsigned _cpu)
2127 // Boost the prio of the idle thread so that it can actually get some
2128 // CPU and take down the system.
2129 kernel_context(_cpu)->ready_dequeue();
2130 kernel_context(_cpu)->sched()->set_prio(255);
2131 kernel_context(_cpu)->ready_enqueue();
2134 // --------------------------------------------------------------------------
2135 IMPLEMENTATION [debug]:
2137 #include "kobject_dbg.h"
2141 Context::drq_log_fmt(Tb_entry *e, int maxlen, char *buf)
2143 Drq_log *l = e->payload<Drq_log>();
2144 return snprintf(buf, maxlen, "drq %s(%s) to ctxt=%lx/%p (func=%p, reply=%p) cpu=%u",
2145 l->type, l->wait ? "wait" : "no-wait", Kobject_dbg::pointer_to_id(l->thread),
2146 l->thread, l->func, l->reply, l->target_cpu);