3 #include <csetjmp> // typedef jmp_buf
7 #include "continuation.h"
12 #include "mem_space.h"
13 #include "member_offs.h"
14 #include "per_cpu_data.h"
16 #include "queue_item.h"
18 #include "sched_context.h"
19 #include "spin_lock.h"
31 explicit Context_ptr(unsigned long id) : _t(id) {}
33 Context_ptr(Context_ptr const &o) : _t(o._t) {}
34 Context_ptr const &operator = (Context_ptr const &o)
35 { _t = o._t; return *this; }
37 Kobject_iface *ptr(Space *, unsigned char *) const;
39 bool is_kernel() const { return false; }
40 bool is_valid() const { return _t != ~0UL; }
42 // only for debugging use
43 Mword raw() const { return _t;}
50 template< typename T >
51 class Context_ptr_base : public Context_ptr
54 enum Invalid_type { Invalid };
55 explicit Context_ptr_base(Invalid_type) : Context_ptr(0) {}
56 explicit Context_ptr_base(unsigned long id) : Context_ptr(id) {}
58 Context_ptr_base(Context_ptr_base<T> const &o) : Context_ptr(o) {}
59 template< typename X >
60 Context_ptr_base(Context_ptr_base<X> const &o) : Context_ptr(o)
61 { X*x = 0; T*t = x; (void)t; }
63 Context_ptr_base<T> const &operator = (Context_ptr_base<T> const &o)
64 { Context_ptr::operator = (o); return *this; }
66 template< typename X >
67 Context_ptr_base<T> const &operator = (Context_ptr_base<X> const &o)
68 { X*x=0; T*t=x; (void)t; Context_ptr::operator = (o); return *this; }
70 //T *ptr(Space *s) const { return static_cast<T*>(Context_ptr::ptr(s)); }
73 class Context_space_ref
76 typedef Spin_lock_coloc<Space *> Space_n_lock;
83 Space *space() const { return _s.get_unused(); }
84 Space_n_lock *lock() { return &_s; }
85 Address user_mode() const { return _v & 1; }
86 Space *vcpu_user() const { return reinterpret_cast<Space*>(_v & ~3); }
87 Space *vcpu_aware() const { return user_mode() ? vcpu_user() : space(); }
89 void space(Space *s) { _s.set_unused(s); }
90 void vcpu_user(Space *s) { _v = (Address)s; }
91 void user_mode(bool enable)
100 /** An execution context. A context is a runnable, schedulable activity.
101 It carries along some state used by other subsystems: A lock count,
102 and stack-element forward/next pointers.
105 public Global_context_data,
109 friend class Jdb_thread_list;
110 friend class Context_ptr;
111 friend class Jdb_utcb;
114 virtual void finish_migration() = 0;
115 virtual void initiate_migration() = 0;
125 * \brief Encapsulate an aggregate of Context.
127 * Allow to get a back reference to the aggregating Context object.
132 Context_member(Context_member const &);
137 * \brief Get the aggregating Context object.
139 Context *context() const;
143 * \brief Deffered Request.
145 * Represents a request that can be queued for each Context
146 * and is executed by the target context just after switching to the
149 class Drq : public Queue_item, public Context_member
152 typedef unsigned (Request_func)(Drq *, Context *target, void *);
153 enum { Need_resched = 1, No_answer = 2 };
154 enum Wait_mode { No_wait = 0, Wait = 1 };
155 enum Exec_mode { Target_ctxt = 0, Any_ctxt = 1 };
156 // enum State { Idle = 0, Handled = 1, Reply_handled = 2 };
165 * \brief Queue for deffered requests (Drq).
167 * A FIFO queue each Context aggregates to queue incomming Drq's
168 * that have to be executed directly after switching to a context.
170 class Drq_q : public Queue, public Context_member
173 enum Drop_mode { Drop = true, No_drop = false };
175 bool handle_requests(Drop_mode drop = No_drop);
176 bool execute_request(Drq *r, Drop_mode drop, bool local);
179 struct Migration_info
187 class Ku_mem_ptr : public Context_member
192 typename User<T>::Ptr _u;
196 Ku_mem_ptr() : _u(0), _k(0) {}
197 Ku_mem_ptr(typename User<T>::Ptr const &u, T *k) : _u(u), _k(k) {}
199 void set(typename User<T>::Ptr const &u, T *k)
202 T *access(bool is_current = false) const
204 // assert_kdb (!is_current || current() == context());
206 && (int)Config::Access_user_mem == Config::Access_user_mem_direct)
209 unsigned const cpu = current_cpu();
210 if ((int)Config::Access_user_mem == Config::Must_access_user_mem_direct
211 && cpu == context()->cpu()
212 && Mem_space::current_mem_space(cpu) == context()->mem_space())
217 typename User<T>::Ptr usr() const { return _u; }
218 T* kern() const { return _k; }
223 * Definition of different scheduling modes
227 Periodic = 0x1, ///< 0 = Conventional, 1 = Periodic
228 Nonstrict = 0x2, ///< 0 = Strictly Periodic, 1 = Non-strictly periodic
232 * Definition of different helping modes
242 * Size of a Context (TCB + kernel stack)
244 static const size_t size = Config::thread_block_size;
247 * Return consumed CPU time.
248 * @return Consumed CPU time in usecs
250 Cpu_time consumed_time();
252 virtual bool kill() = 0;
254 void spill_user_state();
255 void fill_user_state();
259 * Update consumed CPU time during each context switch and when
260 * reading out the current thread's consumed CPU time.
262 void update_consumed_time();
266 Ku_mem_ptr<Utcb> _utcb;
270 friend class Jdb_tcb;
272 /// low level page table switching stuff
273 void switchin_context(Context *) asm ("switchin_context_label") FIASCO_FASTCALL;
275 /// low level fpu switching stuff
276 void switch_fpu (Context *t);
278 /// low level cpu switching stuff
279 void switch_cpu (Context *t);
282 Context_space_ref _space;
289 // how many locks does this thread hold on other threads
290 // incremented in Thread::lock, decremented in Thread::clear
291 // Thread::kill needs to know
295 // The scheduling parameters. We would only need to keep an
296 // anonymous reference to them as we do not need them ourselves, but
297 // we aggregate them for performance reasons.
298 Sched_context _sched_context;
299 Sched_context *_sched;
303 // Pointer to floating point register state
304 Fpu_state _fpu_state;
305 // Implementation-specific consumed CPU time (TSC ticks or usecs)
306 Clock::Time _consumed_time;
312 // for trigger_exception
313 Continuation _exc_cont;
315 jmp_buf *_recover_jmpbuf; // setjmp buffer for page-fault recovery
320 Spin_lock<> affinity_lock;
324 Migration_rq() : pending(false), in_progress(false)
325 { affinity_lock.init(); }
329 // XXX Timeout for both, sender and receiver! In normal case we would have
330 // to define own timeouts in Receiver and Sender but because only one
331 // timeout can be set at one time we use the same timeout. The timeout
332 // has to be defined here because Dirq::hit has to be able to reset the
333 // timeout (Irq::_irq_thread is of type Receiver).
337 static Per_cpu<Clock> _clock;
338 static Per_cpu<Context *> _kernel_ctxt;
344 #include "tb_entry.h"
346 EXTENSION class Context
371 static unsigned drq_log_fmt(Tb_entry *, int, char *)
372 asm ("__context_drq_log_fmt");
376 // --------------------------------------------------------------------------
382 #include "cpu_lock.h"
383 #include "entry_frame.h"
385 #include "globals.h" // current()
387 #include "lock_guard.h"
390 #include "mem_layout.h"
391 #include "processor.h"
393 #include "std_macros.h"
394 #include "thread_state.h"
398 Per_cpu<Clock> DEFINE_PER_CPU Context::_clock(true);
399 Per_cpu<Context *> DEFINE_PER_CPU Context::_kernel_ctxt;
401 IMPLEMENT inline NEEDS["kdb_ke.h"]
403 Context_ptr::ptr(Space *s, unsigned char *rights) const
405 assert_kdb (cpu_lock.test());
407 return s->obj_space()->lookup_local(_t, rights);
414 /** Initialize a context. After setup, a switch_exec to this context results
415 in a return to user code using the return registers at regs(). The
416 return registers are not initialized however; neither is the space_context
417 to be used in thread switching (use set_space_context() for that).
418 @pre (_kernel_sp == 0) && (* (stack end) == 0)
419 @param thread_lock pointer to lock used to lock this context
420 @param space_context the space context
422 PUBLIC inline NEEDS ["atomic.h", "entry_frame.h", <cstdio>]
424 : _kernel_sp(reinterpret_cast<Mword*>(regs())),
428 _sched(&_sched_context),
431 // NOTE: We do not have to synchronize the initialization of
432 // _space_context because it is constant for all concurrent
433 // invocations of this constructor. When two threads concurrently
434 // try to create a new task, they already synchronize in
435 // sys_task_new() and avoid calling us twice with different
436 // space_context arguments.
438 set_cpu_of(this, current_cpu());
446 // If this context owned the FPU, noone owns it now
447 if (Fpu::is_owner(cpu(), this))
449 Fpu::set_owner(cpu(), 0);
463 Context::state(bool check = true) const
466 assert_kdb(!check || cpu() == current_cpu());
472 Context::kernel_context(unsigned cpu)
473 { return _kernel_ctxt.cpu(cpu); }
475 PROTECTED static inline
477 Context::kernel_context(unsigned cpu, Context *ctxt)
478 { _kernel_ctxt.cpu(cpu) = ctxt; }
481 /** @name State manipulation */
487 * Does the context exist? .
488 * @return true if this context has been initialized.
490 PUBLIC inline NEEDS ["thread_state.h"]
492 Context::exists() const
494 return state() != Thread_invalid;
498 * Is the context about to be deleted.
499 * @return true if this context is in deletion.
501 PUBLIC inline NEEDS ["thread_state.h"]
503 Context::is_invalid() const
504 { return state() == Thread_invalid; }
507 * Atomically add bits to state flags.
508 * @param bits bits to be added to state flags
509 * @return 1 if none of the bits that were added had been set before
511 PUBLIC inline NEEDS ["atomic.h"]
513 Context::state_add(Mword const bits)
515 assert_kdb(cpu() == current_cpu());
516 atomic_or(&_state, bits);
520 * Add bits in state flags. Unsafe (non-atomic) and
521 * fast version -- you must hold the kernel lock when you use it.
522 * @pre cpu_lock.test() == true
523 * @param bits bits to be added to state flags
527 Context::state_add_dirty(Mword bits)
529 assert_kdb(cpu() == current_cpu());
534 * Atomically delete bits from state flags.
535 * @param bits bits to be removed from state flags
536 * @return 1 if all of the bits that were removed had previously been set
538 PUBLIC inline NEEDS ["atomic.h"]
540 Context::state_del(Mword const bits)
542 assert_kdb (current_cpu() == cpu());
543 atomic_and(&_state, ~bits);
547 * Delete bits in state flags. Unsafe (non-atomic) and
548 * fast version -- you must hold the kernel lock when you use it.
549 * @pre cpu_lock.test() == true
550 * @param bits bits to be removed from state flags
554 Context::state_del_dirty(Mword bits, bool check = true)
557 assert_kdb(!check || cpu() == current_cpu());
562 * Atomically delete and add bits in state flags, provided the
563 * following rules apply (otherwise state is not changed at all):
564 * - Bits that are to be set must be clear in state or clear in mask
565 * - Bits that are to be cleared must be set in state
566 * @param mask Bits not set in mask shall be deleted from state flags
567 * @param bits Bits to be added to state flags
568 * @return 1 if state was changed, 0 otherwise
570 PUBLIC inline NEEDS ["atomic.h"]
572 Context::state_change_safely(Mword const mask, Mword const bits)
574 assert_kdb (current_cpu() == cpu());
580 if (old & bits & mask | ~old & ~mask)
583 while (!cas(&_state, old, old & mask | bits));
589 * Atomically delete and add bits in state flags.
590 * @param mask bits not set in mask shall be deleted from state flags
591 * @param bits bits to be added to state flags
593 PUBLIC inline NEEDS ["atomic.h"]
595 Context::state_change(Mword const mask, Mword const bits)
597 assert_kdb (current_cpu() == cpu());
598 return atomic_change(&_state, mask, bits);
602 * Delete and add bits in state flags. Unsafe (non-atomic) and
603 * fast version -- you must hold the kernel lock when you use it.
604 * @pre cpu_lock.test() == true
605 * @param mask Bits not set in mask shall be deleted from state flags
606 * @param bits Bits to be added to state flags
610 Context::state_change_dirty(Mword const mask, Mword const bits, bool check = true)
613 assert_kdb(!check || cpu() == current_cpu());
621 /** Return the space context.
622 @return space context used for this execution context.
623 Set with set_space_context().
627 Context::space() const
628 { return _space.space(); }
637 Context::vcpu_aware_space() const
638 { return _space.vcpu_aware(); }
640 /** Convenience function: Return memory space. */
641 PUBLIC inline NEEDS["space.h"]
643 Context::mem_space() const
645 return space()->mem_space();
649 /** Registers used when iret'ing to user mode.
650 @return return registers
652 PUBLIC inline NEEDS["cpu.h", "entry_frame.h"]
654 Context::regs() const
656 return reinterpret_cast<Entry_frame *>
657 (Cpu::stack_align(reinterpret_cast<Mword>(this) + size)) - 1;
660 /** @name Lock counting
661 These functions count the number of locks
662 this context holds. A context must not be deleted if its lock
667 /** Increment lock count.
668 @post lock_cnt() > 0 */
671 Context::inc_lock_cnt()
676 /** Decrement lock count.
681 Context::dec_lock_cnt()
691 Context::lock_cnt() const
699 * Switch active timeslice of this Context.
700 * @param next Sched_context to switch to
704 Context::switch_sched(Sched_context * const next)
706 // Ensure CPU lock protection
707 assert_kdb (cpu_lock.test());
709 // If we're leaving the global timeslice, invalidate it
710 // This causes schedule() to select a new timeslice via set_current_sched()
711 if (sched() == current_sched())
714 // Ensure the new timeslice has a full quantum
715 assert_kdb (next->left() == next->quantum());
725 * Select a different context for running and activate it.
731 Lock_guard <Cpu_lock> guard(&cpu_lock);
735 // Ensure only the current thread calls schedule
736 assert_kdb (this == current());
738 unsigned current_cpu = ~0U;
739 Sched_context::Ready_queue *rq = 0;
741 // Enqueue current thread into ready-list to schedule correctly
744 // Select a thread for scheduling.
745 Context *next_to_run;
749 // I may've been migrated during the switch_exec_locked in the while
750 // statement below. So cxheck out if I've to use a new ready queue.
751 if (cpu() != current_cpu)
754 rq = &Sched_context::rq(current_cpu);
755 if (rq->schedule_in_progress)
757 // Nested invocations of schedule() are bugs
758 assert_kdb (!rq->schedule_in_progress);
763 next_to_run = rq->next_to_run()->context();
765 // Ensure ready-list sanity
766 assert_kdb (next_to_run);
768 if (EXPECT_TRUE (next_to_run->state() & Thread_ready_mask))
771 next_to_run->ready_dequeue();
773 rq->schedule_in_progress = this;
779 // check if we've been migrated meanwhile
780 if (EXPECT_FALSE(current_cpu != cpu()))
783 rq = &Sched_context::rq(current_cpu);
784 if (rq->schedule_in_progress)
788 rq->schedule_in_progress = 0;
791 while (EXPECT_FALSE(schedule_switch_to_locked(next_to_run)));
795 * Return if there is currently a schedule() in progress
799 Context::schedule_in_progress()
801 return sched()->schedule_in_progress(cpu());
804 PUBLIC inline NEEDS[Context::schedule_in_progress]
806 Context::schedule_if(bool s)
808 if (!s || schedule_in_progress())
816 Context::reset_schedule_in_progress()
817 { sched()->reset_schedule_in_progress(cpu()); }
820 * Return currently active global Sched_context.
824 Context::current_sched()
826 return Sched_context::rq(current_cpu()).current_sched();
830 * Set currently active global Sched_context.
834 Context::set_current_sched(Sched_context *sched)
837 // Save remainder of previous timeslice or refresh it, unless it had
839 unsigned cpu = this->cpu();
840 Sched_context::Ready_queue &rq = Sched_context::rq(cpu);
842 Timeout * const tt = timeslice_timeout.cpu(cpu);
843 Unsigned64 clock = Timer::system_clock();
844 if (Sched_context *s = rq.current_sched())
846 Signed64 left = tt->get_timeout(clock);
855 // Program new end-of-timeslice timeout
857 tt->set(clock + sched->left(), cpu);
859 // Make this timeslice current
862 LOG_SCHED_LOAD(sched);
866 * Invalidate (expire) currently active global Sched_context.
868 PROTECTED inline NEEDS["logdefs.h","timeout.h"]
870 Context::invalidate_sched()
872 //LOG_SCHED_INVALIDATE;
873 sched()->invalidate_sched(cpu());
877 * Return Context's Sched_context with id 'id'; return time slice 0 as default.
878 * @return Sched_context with id 'id' or 0
882 Context::sched_context(unsigned short const id = 0) const
884 if (EXPECT_TRUE (!id))
885 return const_cast<Sched_context*>(&_sched_context);
887 for (Sched_context *tmp = _sched_context.next();
888 tmp != &_sched_context; tmp = tmp->next())
896 * Return Context's currently active Sched_context.
897 * @return Active Sched_context
901 Context::sched() const
907 * Set Context's currently active Sched_context.
908 * @param sched Sched_context to be activated
912 Context::set_sched(Sched_context * const sched)
918 * Return Context's real-time period length.
919 * @return Period length in usecs
923 Context::period() const
929 * Set Context's real-time period length.
930 * @param period New period length in usecs
934 Context::set_period(Unsigned64 const period)
940 * Return Context's scheduling mode.
941 * @return Scheduling mode
945 Context::mode() const
951 * Set Context's scheduling mode.
952 * @param mode New scheduling mode
956 Context::set_mode(Context::Sched_mode const mode)
963 // XXX for now, synchronize with global kernel lock
967 * Enqueue current() if ready to fix up ready-list invariant.
969 PRIVATE inline NOEXPORT
971 Context::update_ready_list()
973 assert_kdb (this == current());
975 if (state() & Thread_ready_mask)
980 * Check if Context is in ready-list.
981 * @return 1 if thread is in ready-list, 0 otherwise
985 Context::in_ready_list() const
987 return sched()->in_ready_list();
991 * Enqueue context in ready-list.
995 Context::ready_enqueue(bool check = true)
998 assert_kdb(!check || current_cpu() == cpu());
999 //Lock_guard <Cpu_lock> guard (&cpu_lock);
1001 // Don't enqueue threads that are not ready or have no own time
1002 if (EXPECT_FALSE (!(state(check) & Thread_ready_mask) || !sched()->left()))
1005 sched()->ready_enqueue(cpu());
1010 * \brief Activate a newly created thread.
1012 * This function sets a new thread onto the ready list and switches to
1013 * the thread if it can preempt the currently running thread.
1019 Lock_guard <Cpu_lock> guard(&cpu_lock);
1020 if (cpu() == current_cpu())
1022 state_add_dirty(Thread_ready);
1023 if (sched()->deblock(cpu(), current()->sched(), true))
1025 current()->switch_to_locked(this);
1030 remote_ready_enqueue();
1036 * Remove context from ready-list.
1038 PUBLIC inline NEEDS ["cpu_lock.h", "lock_guard.h", "std_macros.h"]
1040 Context::ready_dequeue()
1042 assert_kdb(current_cpu() == cpu());
1043 sched()->ready_dequeue();
1047 /** Helper. Context that helps us by donating its time to us. It is
1048 set by switch_exec() if the calling thread says so.
1049 @return context that helps us and should be activated after freeing a lock.
1053 Context::helper() const
1061 Context::set_helper(Helping_mode const mode)
1066 _helper = current();
1071 case Ignore_Helping:
1072 // don't change _helper value
1077 /** Donatee. Context that receives our time slices, for example
1078 because it has locked us.
1079 @return context that should be activated instead of us when we're
1084 Context::donatee() const
1091 Context::set_donatee(Context * const donatee)
1098 Context::get_kernel_sp() const
1105 Context::set_kernel_sp(Mword * const esp)
1112 Context::fpu_state()
1118 * Add to consumed CPU time.
1119 * @param quantum Implementation-specific time quantum (TSC ticks or usecs)
1123 Context::consume_time(Clock::Time quantum)
1125 _consumed_time += quantum;
1129 * Update consumed CPU time during each context switch and when
1130 * reading out the current thread's consumed CPU time.
1132 IMPLEMENT inline NEEDS ["cpu.h"]
1134 Context::update_consumed_time()
1136 if (Config::fine_grained_cputime)
1137 consume_time (_clock.cpu(cpu()).delta());
1140 IMPLEMENT inline NEEDS ["config.h", "cpu.h"]
1142 Context::consumed_time()
1144 if (Config::fine_grained_cputime)
1145 return _clock.cpu(cpu()).us(_consumed_time);
1147 return _consumed_time;
1151 * Switch to scheduling context and execution context while not running under
1154 PUBLIC inline NEEDS [<cassert>]
1156 Context::switch_to(Context *t)
1158 // Call switch_to_locked if CPU lock is already held
1159 assert (!cpu_lock.test());
1161 // Grab the CPU lock
1162 Lock_guard <Cpu_lock> guard(&cpu_lock);
1164 switch_to_locked(t);
1168 * Switch scheduling context and execution context.
1169 * @param t Destination thread whose scheduling context and execution context
1170 * should be activated.
1172 PRIVATE inline NEEDS ["kdb_ke.h"]
1173 bool FIASCO_WARN_RESULT
1174 Context::schedule_switch_to_locked(Context *t)
1176 // Must be called with CPU lock held
1177 assert_kdb (cpu_lock.test());
1179 // Switch to destination thread's scheduling context
1180 if (current_sched() != t->sched())
1181 set_current_sched(t->sched());
1183 // XXX: IPC dependency tracking belongs here.
1185 // Switch to destination thread's execution context, no helping involved
1187 return switch_exec_locked(t, Not_Helping);
1189 return handle_drq();
1192 PUBLIC inline NEEDS [Context::schedule_switch_to_locked]
1194 Context::switch_to_locked(Context *t)
1196 if (EXPECT_FALSE(schedule_switch_to_locked(t)))
1202 * Switch execution context while not running under CPU lock.
1204 PUBLIC inline NEEDS ["kdb_ke.h"]
1205 bool FIASCO_WARN_RESULT
1206 Context::switch_exec(Context *t, enum Helping_mode mode)
1208 // Call switch_exec_locked if CPU lock is already held
1209 assert_kdb (!cpu_lock.test());
1211 // Grab the CPU lock
1212 Lock_guard <Cpu_lock> guard(&cpu_lock);
1214 return switch_exec_locked(t, mode);
1220 Context::handle_helping(Context *t)
1222 assert_kdb (current() == this);
1223 // Time-slice lending: if t is locked, switch to its locker
1224 // instead, this is transitive
1225 while (t->donatee() && // target thread locked
1226 t->donatee() != t) // not by itself
1228 // Special case for Thread::kill(): If the locker is
1229 // current(), switch to the locked thread to allow it to
1230 // release other locks. Do this only when the target thread
1231 // actually owns locks.
1232 if (t->donatee() == this)
1234 if (t->lock_cnt() > 0)
1247 * Switch to a specific different execution context.
1248 * If that context is currently locked, switch to its locker instead
1249 * (except if current() is the locker)
1250 * @pre current() == this && current() != t
1251 * @param t thread that shall be activated.
1252 * @param mode helping mode; we either help, don't help or leave the
1253 * helping state unchanged
1256 bool FIASCO_WARN_RESULT //L4_IPC_CODE
1257 Context::switch_exec_locked(Context *t, enum Helping_mode mode)
1259 // Must be called with CPU lock held
1260 assert_kdb (cpu_lock.test());
1261 assert_kdb (current() != t);
1262 assert_kdb (current() == this);
1263 assert_kdb (timeslice_timeout.cpu(cpu())->is_set()); // Coma check
1266 Context *t_orig = t;
1269 // Time-slice lending: if t is locked, switch to its locker
1270 // instead, this is transitive
1271 t = handle_helping(t);
1274 return handle_drq();
1279 // Can only switch to ready threads!
1280 if (EXPECT_FALSE (!(t->state() & Thread_ready_mask)))
1282 assert_kdb (state() & Thread_ready_mask);
1287 // Ensure kernel stack pointer is non-null if thread is ready
1288 assert_kdb (t->_kernel_sp);
1290 t->set_helper(mode);
1292 update_ready_list();
1293 assert_kdb (!(state() & Thread_ready_mask) || !sched()->left()
1294 || in_ready_list());
1299 return handle_drq();
1302 PUBLIC inline NEEDS[Context::switch_exec_locked, Context::schedule]
1304 Context::switch_exec_schedule_locked(Context *t, enum Helping_mode mode)
1306 if (EXPECT_FALSE(switch_exec_locked(t, mode)))
1311 Context::Ku_mem_ptr<Utcb> const &
1312 Context::utcb() const
1315 IMPLEMENT inline NEEDS["globals.h"]
1317 Context::Context_member::context() const
1318 { return context_of(this); }
1320 IMPLEMENT inline NEEDS["lock_guard.h", "kdb_ke.h"]
1322 Context::Drq_q::enq(Drq *rq)
1324 assert_kdb(cpu_lock.test());
1325 Lock_guard<Inner_lock> guard(q_lock());
1331 Context::do_drq_reply(Drq *r, Drq_q::Drop_mode drop)
1333 state_change_dirty(~Thread_drq_wait, Thread_ready);
1334 // r->state = Drq::Reply_handled;
1335 if (drop == Drq_q::No_drop && r->reply)
1336 return r->reply(r, this, r->arg) & Drq::Need_resched;
1341 IMPLEMENT inline NEEDS[Context::do_drq_reply]
1343 Context::Drq_q::execute_request(Drq *r, Drop_mode drop, bool local)
1345 bool need_resched = false;
1346 Context *const self = context();
1347 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1348 if (r->context() == self)
1350 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1351 Drq_log *l = tbe->payload<Drq_log>();
1353 l->func = (void*)r->func;
1354 l->reply = (void*)r->reply;
1355 l->thread = r->context();
1356 l->target_cpu = current_cpu();
1359 //LOG_MSG_3VAL(current(), "hrP", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1360 return self->do_drq_reply(r, drop);
1364 LOG_TRACE("DRQ handling", "drq", current(), __context_drq_log_fmt,
1365 Drq_log *l = tbe->payload<Drq_log>();
1366 l->type = "request";
1367 l->func = (void*)r->func;
1368 l->reply = (void*)r->reply;
1369 l->thread = r->context();
1370 l->target_cpu = current_cpu();
1373 // r->state = Drq::Idle;
1374 unsigned answer = 0;
1375 //LOG_MSG_3VAL(current(), "hrq", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1376 if (EXPECT_TRUE(drop == No_drop && r->func))
1377 answer = r->func(r, self, r->arg);
1378 else if (EXPECT_FALSE(drop == Drop))
1379 // flag DRQ abort for requester
1381 // LOG_MSG_3VAL(current(), "hrq-", answer, current()->state() /*(Mword)r->context()*/, (Mword)r->func);
1382 need_resched |= answer & Drq::Need_resched;
1383 //r->state = Drq::Handled;
1386 if (!(answer & Drq::No_answer))
1389 return r->context()->do_drq_reply(r, drop) || need_resched;
1391 need_resched |= r->context()->enqueue_drq(r, Drq::Target_ctxt);
1394 return need_resched;
1397 IMPLEMENT inline NEEDS["mem.h", "lock_guard.h"]
1399 Context::Drq_q::handle_requests(Drop_mode drop)
1401 // printf("CPU[%2u:%p]: > Context::Drq_q::handle_requests() context=%p\n", current_cpu(), current(), context());
1402 bool need_resched = false;
1407 Lock_guard<Inner_lock> guard(q_lock());
1410 return need_resched;
1412 check_kdb (dequeue(qi, Queue_item::Ok));
1415 Drq *r = static_cast<Drq*>(qi);
1416 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1417 need_resched |= execute_request(r, drop, false);
1421 * \biref Forced dequeue from lock wait queue, or DRQ queue.
1425 Context::force_dequeue()
1427 Queue_item *const qi = queue_item();
1431 // we're waiting for a lock or have a DRQ pending
1432 Queue *const q = qi->queue();
1434 Lock_guard<Queue::Inner_lock> guard(q->q_lock());
1435 // check again, with the queue lock held.
1436 // NOTE: we may be already removed from the queue on another CPU
1437 if (qi->queued() && qi->queue())
1439 // we must never be taken from one queue to another on a
1441 assert_kdb(q == qi->queue());
1442 // pull myself out of the queue, mark reason as invalidation
1443 q->dequeue(qi, Queue_item::Invalid);
1450 * \brief Dequeue from lock and DRQ queues, abort pending DRQs
1454 Context::shutdown_queues()
1462 * \brief Check for pending DRQs.
1463 * \return true if there are DRQs pending, false if not.
1467 Context::drq_pending() const
1468 { return _drq_q.first(); }
1472 Context::try_finish_migration()
1474 if (EXPECT_FALSE(_migration_rq.in_progress))
1476 _migration_rq.in_progress = false;
1483 * \brief Handle all pending DRQs.
1484 * \pre cpu_lock.test() (The CPU lock must be held).
1485 * \pre current() == this (only the currently running context is allowed to
1486 * call this function).
1487 * \return true if re-scheduling is needed (ready queue has changed),
1492 Context::handle_drq()
1494 assert_kdb (current_cpu() == this->cpu());
1495 assert_kdb (cpu_lock.test());
1497 try_finish_migration();
1506 ret |= _drq_q.handle_requests();
1508 Lock_guard<Drq_q::Inner_lock> guard(_drq_q.q_lock());
1509 if (EXPECT_TRUE(!drq_pending()))
1511 state_del_dirty(Thread_drq_ready);
1516 //LOG_MSG_3VAL(this, "xdrq", state(), ret, cpu_lock.test());
1519 * When the context is marked as dead (Thread_dead) then we must not execute
1520 * any usual context code, however DRQ handlers may run.
1522 if (state() & Thread_dead)
1524 // so disable the context after handling all DRQs and flag a reschedule.
1525 state_del_dirty(Thread_ready_mask);
1529 return ret || !(state() & Thread_ready_mask);
1534 * \brief Get the queue item of the context.
1535 * \pre The context must currently not be in any queue.
1536 * \return The queue item of the context.
1538 * The queue item can be used to enqueue the context to a Queue.
1539 * a context must be in at most one queue at a time.
1540 * To figure out the context corresponding to a queue item
1541 * context_of() can be used.
1543 PUBLIC inline NEEDS["kdb_ke.h"]
1545 Context::queue_item()
1551 * \brief DRQ handler for state_change.
1553 * This function basically wraps Context::state_change().
1557 Context::handle_drq_state_change(Drq * /*src*/, Context *self, void * _rq)
1559 State_request *rq = reinterpret_cast<State_request*>(_rq);
1560 self->state_change_dirty(rq->del, rq->add);
1561 //LOG_MSG_3VAL(c, "dsta", c->state(), (Mword)src, (Mword)_rq);
1567 * \brief Queue a DRQ for changing the contexts state.
1568 * \param mask bit mask for the state (state &= mask).
1569 * \param add bits to add to the state (state |= add).
1570 * \note This function is a preemption point.
1572 * This function must be used to change the state of contexts that are
1573 * potentially running on a different CPU.
1575 PUBLIC inline NEEDS[Context::drq]
1577 Context::drq_state_change(Mword mask, Mword add)
1579 if (current() == this)
1581 state_change_dirty(mask, add);
1588 drq(handle_drq_state_change, &rq);
1593 * \brief Initiate a DRQ for the context.
1594 * \pre \a src must be the currently running context.
1595 * \param src the source of the DRQ (the context who initiates the DRQ).
1596 * \param func the DRQ handler.
1597 * \param arg the argument for the DRQ handler.
1598 * \param reply the reply handler (called in the context of \a src immediately
1599 * after receiving a successful reply).
1601 * DRQs are requests than any context can queue to any other context. DRQs are
1602 * the basic mechanism to initiate actions on remote CPUs in an MP system,
1603 * however, are also allowed locally.
1604 * DRQ handlers of pending DRQs are executed by Context::handle_drq() in the
1605 * context of the target context. Context::handle_drq() is basically called
1606 * after switching to a context in Context::switch_exec_locked().
1608 * This function enqueues a DRQ and blocks the current context for a reply DRQ.
1610 PUBLIC inline NEEDS[Context::enqueue_drq]
1612 Context::drq(Drq *drq, Drq::Request_func *func, void *arg,
1613 Drq::Request_func *reply = 0,
1614 Drq::Exec_mode exec = Drq::Target_ctxt,
1615 Drq::Wait_mode wait = Drq::Wait)
1617 // printf("CPU[%2u:%p]: > Context::drq(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1618 Context *cur = current();
1619 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1620 Drq_log *l = tbe->payload<Drq_log>();
1622 l->func = (void*)func;
1623 l->reply = (void*)reply;
1625 l->target_cpu = cpu();
1628 //assert_kdb (current() == src);
1629 assert_kdb (!(wait == Drq::Wait && (cur->state() & Thread_drq_ready)) || cur->cpu() == cpu());
1630 assert_kdb (!((wait == Drq::Wait || drq == &_drq) && cur->state() & Thread_drq_wait));
1631 assert_kdb (!drq->queued());
1636 cur->state_add(wait == Drq::Wait ? Thread_drq_wait : 0);
1639 enqueue_drq(drq, exec);
1641 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1642 while (wait == Drq::Wait && cur->state() & Thread_drq_wait)
1644 cur->state_del(Thread_ready_mask);
1648 LOG_TRACE("DRQ Stuff", "drq", cur, __context_drq_log_fmt,
1649 Drq_log *l = tbe->payload<Drq_log>();
1651 l->func = (void*)func;
1652 l->reply = (void*)reply;
1654 l->target_cpu = cpu();
1656 //LOG_MSG_3VAL(src, "drq>", src->state(), Mword(this), 0);
1661 Context::kernel_context_drq(Drq::Request_func *func, void *arg,
1662 Drq::Request_func *reply = 0)
1664 char align_buffer[2*sizeof(Drq)];
1665 Drq *mdrq = (Drq*)((Address(align_buffer) + __alignof__(Drq) - 1) & ~(__alignof__(Drq)-1));
1669 mdrq->reply = reply;
1670 Context *kc = kernel_context(current_cpu());
1671 kc->_drq_q.enq(mdrq);
1672 bool resched = schedule_switch_to_locked(kc);
1676 PUBLIC inline NEEDS[Context::drq]
1678 Context::drq(Drq::Request_func *func, void *arg,
1679 Drq::Request_func *reply = 0,
1680 Drq::Exec_mode exec = Drq::Target_ctxt,
1681 Drq::Wait_mode wait = Drq::Wait)
1682 { return drq(¤t()->_drq, func, arg, reply, exec, wait); }
1686 Context::rcu_unblock(Rcu_item *i)
1688 assert_kdb(cpu_lock.test());
1689 Context *const c = static_cast<Context*>(i);
1690 c->state_change_dirty(~Thread_waiting, Thread_ready);
1691 c->sched()->deblock(c->cpu());
1697 Context::recover_jmp_buf(jmp_buf *b)
1698 { _recover_jmpbuf = b; }
1702 Context::xcpu_tlb_flush(...)
1704 // This should always be optimized away
1708 //----------------------------------------------------------------------------
1709 IMPLEMENTATION [!mp]:
1716 Context::cpu(bool = false) const
1722 Context::remote_ready_enqueue()
1724 WARN("Context::remote_ready_enqueue(): in UP system !\n");
1725 kdb_ke("Fiasco BUG");
1730 Context::enqueue_drq(Drq *rq, Drq::Exec_mode)
1732 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1733 if (!in_ready_list() && (state() & Thread_ready_mask))
1743 PRIVATE inline NOEXPORT
1745 Context::shutdown_drqs()
1746 { _drq_q.handle_requests(Drq_q::Drop); }
1753 // The UP case does not need to block for the next grace period, because
1754 // the CPU is always in a quiescent state when the interrupts where enabled
1757 PUBLIC static inline
1759 Context::xcpu_tlb_flush(bool, Mem_space *, Mem_space *)
1764 //----------------------------------------------------------------------------
1768 #include "queue_item.h"
1770 EXTENSION class Context
1774 class Pending_rqq : public Queue
1777 static void enq(Context *c);
1778 bool handle_requests(Context **);
1781 class Pending_rq : public Queue_item, public Context_member
1785 static Per_cpu<Pending_rqq> _pending_rqq;
1786 static Per_cpu<Drq_q> _glbl_drq_q;
1792 //----------------------------------------------------------------------------
1793 IMPLEMENTATION [mp]:
1795 #include "globals.h"
1798 #include "lock_guard.h"
1801 Per_cpu<Context::Pending_rqq> DEFINE_PER_CPU Context::_pending_rqq;
1802 Per_cpu<Context::Drq_q> DEFINE_PER_CPU Context::_glbl_drq_q;
1806 * \brief Enqueue the given \a c into its CPUs queue.
1807 * \param c the context to enqueue for DRQ handling.
1809 IMPLEMENT inline NEEDS["globals.h", "lock_guard.h", "kdb_ke.h"]
1811 Context::Pending_rqq::enq(Context *c)
1813 // FIXME: is it safe to do the check without a locked queue, or may
1814 // we loose DRQs then?
1816 //if (!c->_pending_rq.queued())
1818 Queue &q = Context::_pending_rqq.cpu(c->cpu());
1819 Lock_guard<Inner_lock> guard(q.q_lock());
1820 if (c->_pending_rq.queued())
1822 q.enqueue(&c->_pending_rq);
1828 * \brief Wakeup all contexts with pending DRQs.
1830 * This function wakes up all context from the pending queue.
1834 Context::Pending_rqq::handle_requests(Context **mq)
1836 //LOG_MSG_3VAL(current(), "phq", current_cpu(), 0, 0);
1837 // printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() this=%p\n", current_cpu(), current(), this);
1838 bool resched = false;
1839 Context *curr = current();
1844 Lock_guard<Inner_lock> guard(q_lock());
1848 check_kdb (dequeue(qi, Queue_item::Ok));
1850 Context *c = static_cast<Context::Pending_rq *>(qi)->context();
1851 //LOG_MSG_3VAL(c, "pick", c->state(), c->cpu(), current_cpu());
1852 // Drop migrated threads
1853 assert_kdb (EXPECT_FALSE(c->cpu() == current_cpu()));
1855 if (EXPECT_TRUE(c->drq_pending()))
1856 c->state_add(Thread_drq_ready);
1858 if (EXPECT_FALSE(c->_migration_rq.pending))
1862 c->initiate_migration();
1872 c->try_finish_migration();
1874 if (EXPECT_TRUE((c->state() & Thread_ready_mask)))
1876 //printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() dequeded %p(%u)\n", current_cpu(), current(), c, qi->queued());
1877 resched |= c->sched()->deblock(current_cpu(), current()->sched(), false);
1884 Context::global_drq(unsigned cpu, Drq::Request_func *func, void *arg,
1885 Drq::Request_func *reply = 0, bool wait = true)
1887 assert_kdb (this == current());
1893 state_add(wait ? Thread_drq_wait : 0);
1895 _glbl_drq_q.cpu(cpu).enq(&_drq);
1897 Ipi::cpu(cpu).send(Ipi::Global_request);
1899 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1900 while (wait && (state() & Thread_drq_wait))
1902 state_del(Thread_ready_mask);
1910 Context::handle_global_requests()
1912 return _glbl_drq_q.cpu(current_cpu()).handle_requests();
1917 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1919 assert_kdb (cpu_lock.test());
1920 // printf("CPU[%2u:%p]: Context::enqueue_request(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1922 if (cpu() != current_cpu())
1927 // ready cpu again we may've been migrated meanwhile
1928 unsigned cpu = this->cpu();
1931 Queue &q = Context::_pending_rqq.cpu(cpu);
1932 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1935 // migrated between getting the lock and reading the CPU, so the
1936 // new CPU is responsible for executing our request
1937 if (this->cpu() != cpu)
1943 if (!_pending_rq.queued())
1944 q.enqueue(&_pending_rq);
1949 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1950 Ipi::cpu(cpu).send(Ipi::Request);
1954 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1956 bool sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1957 if (!in_ready_list() && (state() & Thread_ready_mask))
1969 PRIVATE inline NOEXPORT
1971 Context::shutdown_drqs()
1973 if (_pending_rq.queued())
1975 Lock_guard<Pending_rqq::Inner_lock> guard(_pending_rq.queue()->q_lock());
1976 if (_pending_rq.queued())
1977 _pending_rq.queue()->dequeue(&_pending_rq, Queue_item::Ok);
1980 _drq_q.handle_requests(Drq_q::Drop);
1986 Context::cpu(bool running = false) const
1994 * Remote helper for doing remote CPU ready enqueue.
1996 * See remote_ready_enqueue().
2000 Context::handle_remote_ready_enqueue(Drq *, Context *self, void *)
2002 self->state_add_dirty(Thread_ready);
2007 PROTECTED inline NEEDS[Context::handle_remote_ready_enqueue]
2009 Context::remote_ready_enqueue()
2010 { drq(&handle_remote_ready_enqueue, 0); }
2015 * Block and wait for the next grace period.
2017 PUBLIC inline NEEDS["cpu_lock.h", "lock_guard.h"]
2021 Lock_guard<Cpu_lock> gurad(&cpu_lock);
2022 state_change_dirty(~Thread_ready, Thread_waiting);
2023 Rcu::call(this, &rcu_unblock);
2031 Context::handle_remote_tlb_flush(Drq *, Context *, void *_s)
2033 Mem_space **s = (Mem_space **)_s;
2034 Mem_space::tlb_flush_spaces((bool)s[0], s[1], s[2]);
2041 Context::xcpu_tlb_flush(bool flush_all_spaces, Mem_space *s1, Mem_space *s2)
2043 Lock_guard<Cpu_lock> g(&cpu_lock);
2044 Mem_space *s[3] = { (Mem_space *)flush_all_spaces, s1, s2 };
2045 unsigned ccpu = current_cpu();
2046 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
2047 if (ccpu != i && Cpu::online(i))
2048 current()->global_drq(i, Context::handle_remote_tlb_flush, s);
2052 //----------------------------------------------------------------------------
2053 IMPLEMENTATION [fpu && !ux]:
2058 * When switching away from the FPU owner, disable the FPU to cause
2059 * the next FPU access to trap.
2060 * When switching back to the FPU owner, enable the FPU so we don't
2061 * get an FPU trap on FPU access.
2063 IMPLEMENT inline NEEDS ["fpu.h"]
2065 Context::switch_fpu(Context *t)
2067 if (Fpu::is_owner(cpu(), this))
2069 else if (Fpu::is_owner(cpu(), t) && !(t->state() & Thread_vcpu_fpu_disabled))
2073 //----------------------------------------------------------------------------
2074 IMPLEMENTATION [!fpu]:
2078 Context::switch_fpu(Context *)
2081 //----------------------------------------------------------------------------
2082 IMPLEMENTATION [ux]:
2086 Context::boost_idle_prio(unsigned _cpu)
2088 // Boost the prio of the idle thread so that it can actually get some
2089 // CPU and take down the system.
2090 kernel_context(_cpu)->ready_dequeue();
2091 kernel_context(_cpu)->sched()->set_prio(255);
2092 kernel_context(_cpu)->ready_enqueue();
2095 // --------------------------------------------------------------------------
2096 IMPLEMENTATION [debug]:
2098 #include "kobject_dbg.h"
2102 Context::drq_log_fmt(Tb_entry *e, int maxlen, char *buf)
2104 Drq_log *l = e->payload<Drq_log>();
2105 return snprintf(buf, maxlen, "drq %s(%s) to ctxt=%lx/%p (func=%p, reply=%p) cpu=%u",
2106 l->type, l->wait ? "wait" : "no-wait", Kobject_dbg::pointer_to_id(l->thread),
2107 l->thread, l->func, l->reply, l->target_cpu);