3 #include <csetjmp> // typedef jmp_buf
7 #include "continuation.h"
11 #include "member_offs.h"
12 #include "per_cpu_data.h"
14 #include "queue_item.h"
16 #include "sched_context.h"
18 #include "spin_lock.h"
20 #include <fiasco_defs.h>
30 explicit Context_ptr(unsigned long id) : _t(id) {}
32 Context_ptr(Context_ptr const &o) : _t(o._t) {}
33 Context_ptr const &operator = (Context_ptr const &o)
34 { _t = o._t; return *this; }
36 Kobject_iface *ptr(Space *, unsigned char *) const;
38 bool is_kernel() const { return false; }
39 bool is_valid() const { return _t != ~0UL; }
41 // only for debugging use
42 Mword raw() const { return _t;}
49 template< typename T >
50 class Context_ptr_base : public Context_ptr
53 enum Invalid_type { Invalid };
54 explicit Context_ptr_base(Invalid_type) : Context_ptr(0) {}
55 explicit Context_ptr_base(unsigned long id) : Context_ptr(id) {}
57 Context_ptr_base(Context_ptr_base<T> const &o) : Context_ptr(o) {}
58 template< typename X >
59 Context_ptr_base(Context_ptr_base<X> const &o) : Context_ptr(o)
60 { X*x = 0; T*t = x; (void)t; }
62 Context_ptr_base<T> const &operator = (Context_ptr_base<T> const &o)
63 { Context_ptr::operator = (o); return *this; }
65 template< typename X >
66 Context_ptr_base<T> const &operator = (Context_ptr_base<X> const &o)
67 { X*x=0; T*t=x; (void)t; Context_ptr::operator = (o); return *this; }
69 //T *ptr(Space *s) const { return static_cast<T*>(Context_ptr::ptr(s)); }
72 class Context_space_ref
75 typedef Spin_lock_coloc<Space *> Space_n_lock;
82 Space *space() const { return _s.get_unused(); }
83 Space_n_lock *lock() { return &_s; }
84 Address user_mode() const { return _v & 1; }
85 Space *vcpu_user() const { return reinterpret_cast<Space*>(_v & ~3); }
86 Space *vcpu_aware() const { return user_mode() ? vcpu_user() : space(); }
88 void space(Space *s) { _s.set_unused(s); }
89 void vcpu_user(Space *s) { _v = (Address)s; }
90 void user_mode(bool enable)
99 /** An execution context. A context is a runnable, schedulable activity.
100 It carries along some state used by other subsystems: A lock count,
101 and stack-element forward/next pointers.
108 friend class Jdb_thread_list;
109 friend class Context_ptr;
110 friend class Jdb_utcb;
113 virtual void finish_migration() = 0;
114 virtual bool initiate_migration() = 0;
124 * \brief Encapsulate an aggregate of Context.
126 * Allow to get a back reference to the aggregating Context object.
131 Context_member(Context_member const &);
136 * \brief Get the aggregating Context object.
138 Context *context() const;
142 * \brief Deffered Request.
144 * Represents a request that can be queued for each Context
145 * and is executed by the target context just after switching to the
148 class Drq : public Queue_item, public Context_member
151 typedef unsigned (Request_func)(Drq *, Context *target, void *);
152 enum { Need_resched = 1, No_answer = 2 };
153 enum Wait_mode { No_wait = 0, Wait = 1 };
154 enum Exec_mode { Target_ctxt = 0, Any_ctxt = 1 };
155 // enum State { Idle = 0, Handled = 1, Reply_handled = 2 };
164 * \brief Queue for deffered requests (Drq).
166 * A FIFO queue each Context aggregates to queue incomming Drq's
167 * that have to be executed directly after switching to a context.
169 class Drq_q : public Queue, public Context_member
172 enum Drop_mode { Drop = true, No_drop = false };
174 bool dequeue(Drq *drq, Queue_item::Status reason);
175 bool handle_requests(Drop_mode drop = No_drop);
176 bool execute_request(Drq *r, Drop_mode drop, bool local);
182 L4_sched_param const *sp;
185 Migration() : in_progress(false) {}
189 class Ku_mem_ptr : public Context_member
194 typename User<T>::Ptr _u;
198 Ku_mem_ptr() : _u(0), _k(0) {}
199 Ku_mem_ptr(typename User<T>::Ptr const &u, T *k) : _u(u), _k(k) {}
201 void set(typename User<T>::Ptr const &u, T *k)
204 T *access(bool is_current = false) const
206 // assert_kdb (!is_current || current() == context());
208 && (int)Config::Access_user_mem == Config::Access_user_mem_direct)
211 unsigned const cpu = current_cpu();
212 if ((int)Config::Access_user_mem == Config::Must_access_user_mem_direct
213 && cpu == context()->cpu()
214 && Mem_space::current_mem_space(cpu) == context()->space())
219 typename User<T>::Ptr usr() const { return _u; }
220 T* kern() const { return _k; }
225 * Definition of different scheduling modes
229 Periodic = 0x1, ///< 0 = Conventional, 1 = Periodic
230 Nonstrict = 0x2, ///< 0 = Strictly Periodic, 1 = Non-strictly periodic
234 * Definition of different helping modes
244 * Return consumed CPU time.
245 * @return Consumed CPU time in usecs
247 Cpu_time consumed_time();
249 virtual bool kill() = 0;
251 void spill_user_state();
252 void fill_user_state();
254 Space * FIASCO_PURE space() const { return _space.space(); }
255 Mem_space * FIASCO_PURE mem_space() const { return static_cast<Mem_space*>(space()); }
259 * Update consumed CPU time during each context switch and when
260 * reading out the current thread's consumed CPU time.
262 void update_consumed_time();
266 Ku_mem_ptr<Utcb> _utcb;
270 friend class Jdb_tcb;
272 /// low level page table switching stuff
273 void switchin_context(Context *) asm ("switchin_context_label") FIASCO_FASTCALL;
275 /// low level fpu switching stuff
276 void switch_fpu(Context *t);
278 /// low level cpu switching stuff
279 void switch_cpu(Context *t);
282 Context_space_ref _space;
289 // how many locks does this thread hold on other threads
290 // incremented in Thread::lock, decremented in Thread::clear
291 // Thread::kill needs to know
295 // The scheduling parameters. We would only need to keep an
296 // anonymous reference to them as we do not need them ourselves, but
297 // we aggregate them for performance reasons.
298 Sched_context _sched_context;
299 Sched_context *_sched;
303 // Pointer to floating point register state
304 Fpu_state _fpu_state;
305 // Implementation-specific consumed CPU time (TSC ticks or usecs)
306 Clock::Time _consumed_time;
312 // for trigger_exception
313 Continuation _exc_cont;
315 jmp_buf *_recover_jmpbuf; // setjmp buffer for page-fault recovery
317 Migration *_migration;
318 bool _need_to_finish_migration;
321 void arch_load_vcpu_kern_state(Vcpu_state *vcpu, bool do_load);
324 void arch_load_vcpu_user_state(Vcpu_state *vcpu, bool do_load);
325 void arch_update_vcpu_state(Vcpu_state *vcpu);
327 // XXX Timeout for both, sender and receiver! In normal case we would have
328 // to define own timeouts in Receiver and Sender but because only one
329 // timeout can be set at one time we use the same timeout. The timeout
330 // has to be defined here because Dirq::hit has to be able to reset the
331 // timeout (Irq::_irq_thread is of type Receiver).
335 static Per_cpu<Clock> _clock;
336 static Per_cpu<Context *> _kernel_ctxt;
342 #include "tb_entry.h"
344 EXTENSION class Context
347 struct Drq_log : public Tb_entry
354 enum class Type { Send, Do_request, Do_reply, Done } type;
356 unsigned print(int max, char *buf) const;
357 Group_order has_partner() const
361 case Type::Send: return Group_order::first();
362 case Type::Done: return Group_order::last();
363 case Type::Do_request: return Group_order(1);
364 case Type::Do_reply: return Group_order(2);
366 return Group_order::none();
369 Group_order is_partner(Drq_log const *o) const
371 if (rq != o->rq || func != o->func || reply != o->reply)
372 return Group_order::none();
374 return o->has_partner();
379 struct Vcpu_log : public Tb_entry
388 unsigned print(int max, char *buf) const;
392 // --------------------------------------------------------------------------
398 #include "cpu_lock.h"
399 #include "entry_frame.h"
401 #include "globals.h" // current()
403 #include "lock_guard.h"
406 #include "mem_layout.h"
407 #include "processor.h"
409 #include "std_macros.h"
410 #include "thread_state.h"
414 DEFINE_PER_CPU Per_cpu<Clock> Context::_clock(true);
415 DEFINE_PER_CPU Per_cpu<Context *> Context::_kernel_ctxt;
417 IMPLEMENT inline NEEDS["kdb_ke.h"]
418 Kobject_iface * __attribute__((nonnull(1, 2)))
419 Context_ptr::ptr(Space *s, unsigned char *rights) const
421 assert_kdb (cpu_lock.test());
423 return static_cast<Obj_space*>(s)->lookup_local(_t, rights);
430 /** Initialize a context. After setup, a switch_exec to this context results
431 in a return to user code using the return registers at regs(). The
432 return registers are not initialized however; neither is the space_context
433 to be used in thread switching (use set_space_context() for that).
434 @pre (_kernel_sp == 0) && (* (stack end) == 0)
435 @param thread_lock pointer to lock used to lock this context
436 @param space_context the space context
438 PUBLIC inline NEEDS ["atomic.h", "entry_frame.h", <cstdio>]
440 : _kernel_sp(reinterpret_cast<Mword*>(regs())),
444 _sched(&_sched_context),
445 _mode(Sched_mode(0)),
447 _need_to_finish_migration(false)
450 // NOTE: We do not have to synchronize the initialization of
451 // _space_context because it is constant for all concurrent
452 // invocations of this constructor. When two threads concurrently
453 // try to create a new task, they already synchronize in
454 // sys_task_new() and avoid calling us twice with different
455 // space_context arguments.
457 set_cpu_of(this, Cpu::Invalid);
462 Context::spill_fpu_if_owner()
464 // spill FPU state into memory before migration
465 if (state() & Thread_fpu_owner)
467 Fpu &f = Fpu::fpu.current();
468 if (current() != this)
482 // If this context owned the FPU, noone owns it now
483 Fpu &f = Fpu::fpu.current();
484 if (f.is_owner(this))
499 Context::check_for_current_cpu() const
501 bool r = cpu() == current_cpu() || !Cpu::online(cpu());
502 if (0 && EXPECT_FALSE(!r)) // debug output disabled
503 printf("FAIL: cpu=%u (current=%u)\n", cpu(), current_cpu());
510 Context::state(bool check = true) const
513 assert_kdb(!check || check_for_current_cpu());
519 Context::kernel_context(unsigned cpu)
520 { return _kernel_ctxt.cpu(cpu); }
522 PROTECTED static inline
524 Context::kernel_context(unsigned cpu, Context *ctxt)
525 { _kernel_ctxt.cpu(cpu) = ctxt; }
528 /** @name State manipulation */
534 * Does the context exist? .
535 * @return true if this context has been initialized.
537 PUBLIC inline NEEDS ["thread_state.h"]
539 Context::exists() const
541 return state() != Thread_invalid;
545 * Is the context about to be deleted.
546 * @return true if this context is in deletion.
548 PUBLIC inline NEEDS ["thread_state.h"]
550 Context::is_invalid() const
551 { return state() == Thread_invalid; }
554 * Atomically add bits to state flags.
555 * @param bits bits to be added to state flags
556 * @return 1 if none of the bits that were added had been set before
558 PUBLIC inline NEEDS ["atomic.h"]
560 Context::state_add(Mword bits)
562 assert_kdb(check_for_current_cpu());
563 atomic_or(&_state, bits);
567 * Add bits in state flags. Unsafe (non-atomic) and
568 * fast version -- you must hold the kernel lock when you use it.
569 * @pre cpu_lock.test() == true
570 * @param bits bits to be added to state flags
574 Context::state_add_dirty(Mword bits, bool check = true)
577 assert_kdb(!check || check_for_current_cpu());
582 * Atomically delete bits from state flags.
583 * @param bits bits to be removed from state flags
584 * @return 1 if all of the bits that were removed had previously been set
586 PUBLIC inline NEEDS ["atomic.h"]
588 Context::state_del(Mword bits)
590 assert_kdb (check_for_current_cpu());
591 atomic_and(&_state, ~bits);
595 * Delete bits in state flags. Unsafe (non-atomic) and
596 * fast version -- you must hold the kernel lock when you use it.
597 * @pre cpu_lock.test() == true
598 * @param bits bits to be removed from state flags
602 Context::state_del_dirty(Mword bits, bool check = true)
605 assert_kdb(!check || check_for_current_cpu());
610 * Atomically delete and add bits in state flags, provided the
611 * following rules apply (otherwise state is not changed at all):
612 * - Bits that are to be set must be clear in state or clear in mask
613 * - Bits that are to be cleared must be set in state
614 * @param mask Bits not set in mask shall be deleted from state flags
615 * @param bits Bits to be added to state flags
616 * @return 1 if state was changed, 0 otherwise
618 PUBLIC inline NEEDS ["atomic.h"]
620 Context::state_change_safely(Mword mask, Mword bits)
622 assert_kdb (check_for_current_cpu());
628 if (old & bits & mask | ~old & ~mask)
631 while (!cas(&_state, old, old & mask | bits));
637 * Atomically delete and add bits in state flags.
638 * @param mask bits not set in mask shall be deleted from state flags
639 * @param bits bits to be added to state flags
641 PUBLIC inline NEEDS ["atomic.h"]
643 Context::state_change(Mword mask, Mword bits)
645 assert_kdb (check_for_current_cpu());
646 return atomic_change(&_state, mask, bits);
650 * Delete and add bits in state flags. Unsafe (non-atomic) and
651 * fast version -- you must hold the kernel lock when you use it.
652 * @pre cpu_lock.test() == true
653 * @param mask Bits not set in mask shall be deleted from state flags
654 * @param bits Bits to be added to state flags
658 Context::state_change_dirty(Mword mask, Mword bits, bool check = true)
661 assert_kdb(!check || check_for_current_cpu());
677 Context::vcpu_aware_space() const
678 { return _space.vcpu_aware(); }
680 /** Registers used when iret'ing to user mode.
681 @return return registers
683 PUBLIC inline NEEDS["cpu.h", "entry_frame.h"]
685 Context::regs() const
687 return reinterpret_cast<Entry_frame *>
688 (Cpu::stack_align(reinterpret_cast<Mword>(this) + Size)) - 1;
691 /** @name Lock counting
692 These functions count the number of locks
693 this context holds. A context must not be deleted if its lock
698 /** Increment lock count.
699 @post lock_cnt() > 0 */
702 Context::inc_lock_cnt()
707 /** Decrement lock count.
712 Context::dec_lock_cnt()
722 Context::lock_cnt() const
730 * Switch active timeslice of this Context.
731 * @param next Sched_context to switch to
735 Context::switch_sched(Sched_context *next, Sched_context::Ready_queue *queue)
737 queue->switch_sched(sched(), next);
742 * Select a different context for running and activate it.
748 auto guard = lock_guard(cpu_lock);
749 assert (!Sched_context::rq.current().schedule_in_progress);
753 // Ensure only the current thread calls schedule
754 assert_kdb (this == current());
756 unsigned current_cpu = ~0U;
757 Sched_context::Ready_queue *rq = 0;
759 // Enqueue current thread into ready-list to schedule correctly
762 // Select a thread for scheduling.
763 Context *next_to_run;
767 // I may've been migrated during the switch_exec_locked in the while
768 // statement below. So check out if I've to use a new ready queue.
770 unsigned new_cpu = access_once(&_cpu);
771 if (new_cpu != current_cpu)
774 current_cpu = new_cpu;
775 rq = &Sched_context::rq.current();
776 if (rq->schedule_in_progress)
783 next_to_run = rq->next_to_run()->context();
785 // Ensure ready-list sanity
786 assert_kdb (next_to_run);
788 if (EXPECT_TRUE (next_to_run->state() & Thread_ready_mask))
791 rq->ready_dequeue(next_to_run->sched());
793 rq->schedule_in_progress = this;
799 // check if we've been migrated meanwhile
800 if (EXPECT_FALSE(current_cpu != access_once(&_cpu)))
804 rq = &Sched_context::rq.current();
805 if (rq->schedule_in_progress)
809 rq->schedule_in_progress = 0;
812 while (EXPECT_FALSE(schedule_switch_to_locked(next_to_run)));
818 Context::schedule_if(bool s)
820 if (!s || Sched_context::rq.current().schedule_in_progress)
828 * Return Context's Sched_context with id 'id'; return time slice 0 as default.
829 * @return Sched_context with id 'id' or 0
833 Context::sched_context(unsigned short const id = 0) const
835 if (EXPECT_TRUE (!id))
836 return const_cast<Sched_context*>(&_sched_context);
838 for (Sched_context *tmp = _sched_context.next();
839 tmp != &_sched_context; tmp = tmp->next())
847 * Return Context's currently active Sched_context.
848 * @return Active Sched_context
852 Context::sched() const
858 * Set Context's currently active Sched_context.
859 * @param sched Sched_context to be activated
863 Context::set_sched(Sched_context * const sched)
869 * Return Context's real-time period length.
870 * @return Period length in usecs
874 Context::period() const
880 * Set Context's real-time period length.
881 * @param period New period length in usecs
885 Context::set_period(Unsigned64 const period)
891 * Return Context's scheduling mode.
892 * @return Scheduling mode
896 Context::mode() const
902 * Set Context's scheduling mode.
903 * @param mode New scheduling mode
907 Context::set_mode(Context::Sched_mode const mode)
914 // XXX for now, synchronize with global kernel lock
918 * Enqueue current() if ready to fix up ready-list invariant.
920 PRIVATE inline NOEXPORT
922 Context::update_ready_list()
924 assert_kdb (this == current());
926 if ((state() & Thread_ready_mask) && sched()->left())
927 Sched_context::rq.current().ready_enqueue(sched());
931 * Check if Context is in ready-list.
932 * @return 1 if thread is in ready-list, 0 otherwise
936 Context::in_ready_list() const
938 return sched()->in_ready_list();
943 * \brief Activate a newly created thread.
945 * This function sets a new thread onto the ready list and switches to
946 * the thread if it can preempt the currently running thread.
952 auto guard = lock_guard(cpu_lock);
953 if (cpu() == current_cpu())
955 state_add_dirty(Thread_ready);
956 if (Sched_context::rq.current().deblock(sched(), current()->sched(), true))
958 current()->switch_to_locked(this);
963 remote_ready_enqueue();
969 /** Helper. Context that helps us by donating its time to us. It is
970 set by switch_exec() if the calling thread says so.
971 @return context that helps us and should be activated after freeing a lock.
975 Context::helper() const
983 Context::set_helper(Helping_mode const mode)
994 // don't change _helper value
999 /** Donatee. Context that receives our time slices, for example
1000 because it has locked us.
1001 @return context that should be activated instead of us when we're
1006 Context::donatee() const
1013 Context::set_donatee(Context * const donatee)
1020 Context::get_kernel_sp() const
1027 Context::set_kernel_sp(Mword * const esp)
1034 Context::fpu_state()
1040 * Add to consumed CPU time.
1041 * @param quantum Implementation-specific time quantum (TSC ticks or usecs)
1045 Context::consume_time(Clock::Time quantum)
1047 _consumed_time += quantum;
1051 * Update consumed CPU time during each context switch and when
1052 * reading out the current thread's consumed CPU time.
1054 IMPLEMENT inline NEEDS ["cpu.h"]
1056 Context::update_consumed_time()
1058 if (Config::Fine_grained_cputime)
1059 consume_time (_clock.cpu(cpu()).delta());
1062 IMPLEMENT inline NEEDS ["config.h", "cpu.h"]
1064 Context::consumed_time()
1066 if (Config::Fine_grained_cputime)
1067 return _clock.cpu(cpu()).us(_consumed_time);
1069 return _consumed_time;
1073 * Switch to scheduling context and execution context while not running under
1076 PUBLIC inline NEEDS [<cassert>]
1078 Context::switch_to(Context *t)
1080 // Call switch_to_locked if CPU lock is already held
1081 assert (!cpu_lock.test());
1083 // Grab the CPU lock
1084 auto guard = lock_guard(cpu_lock);
1086 switch_to_locked(t);
1090 * Switch scheduling context and execution context.
1091 * @param t Destination thread whose scheduling context and execution context
1092 * should be activated.
1094 PRIVATE inline NEEDS ["kdb_ke.h"]
1095 bool FIASCO_WARN_RESULT
1096 Context::schedule_switch_to_locked(Context *t)
1098 // Must be called with CPU lock held
1099 assert_kdb (cpu_lock.test());
1101 Sched_context::Ready_queue &rq = Sched_context::rq.current();
1102 // Switch to destination thread's scheduling context
1103 if (rq.current_sched() != t->sched())
1104 rq.set_current_sched(t->sched());
1106 // XXX: IPC dependency tracking belongs here.
1108 // Switch to destination thread's execution context, no helping involved
1110 return switch_exec_locked(t, Not_Helping);
1112 return handle_drq();
1115 PUBLIC inline NEEDS [Context::schedule_switch_to_locked]
1117 Context::switch_to_locked(Context *t)
1119 if (EXPECT_FALSE(schedule_switch_to_locked(t)))
1125 * Switch execution context while not running under CPU lock.
1127 PUBLIC inline NEEDS ["kdb_ke.h"]
1128 bool FIASCO_WARN_RESULT
1129 Context::switch_exec(Context *t, enum Helping_mode mode)
1131 // Call switch_exec_locked if CPU lock is already held
1132 assert_kdb (!cpu_lock.test());
1134 // Grab the CPU lock
1135 auto guard = lock_guard(cpu_lock);
1137 return switch_exec_locked(t, mode);
1143 Context::handle_helping(Context *t)
1145 // XXX: maybe we do not need this on MP, because we have no helping there
1146 assert_kdb (current() == this);
1147 // Time-slice lending: if t is locked, switch to its locker
1148 // instead, this is transitive
1149 while (t->donatee() && // target thread locked
1150 t->donatee() != t) // not by itself
1152 // Special case for Thread::kill(): If the locker is
1153 // current(), switch to the locked thread to allow it to
1154 // release other locks. Do this only when the target thread
1155 // actually owns locks.
1156 if (t->donatee() == this)
1158 if (t->lock_cnt() > 0)
1171 * Switch to a specific different execution context.
1172 * If that context is currently locked, switch to its locker instead
1173 * (except if current() is the locker)
1174 * @pre current() == this && current() != t
1175 * @param t thread that shall be activated.
1176 * @param mode helping mode; we either help, don't help or leave the
1177 * helping state unchanged
1180 bool FIASCO_WARN_RESULT //L4_IPC_CODE
1181 Context::switch_exec_locked(Context *t, enum Helping_mode mode)
1183 // Must be called with CPU lock held
1184 assert_kdb (t->cpu() != Cpu::Invalid);
1185 assert_kdb (t->cpu() == current_cpu());
1186 assert_kdb (cpu() == current_cpu());
1187 assert_kdb (cpu_lock.test());
1188 assert_kdb (current() != t);
1189 assert_kdb (current() == this);
1190 assert_kdb (timeslice_timeout.cpu(cpu())->is_set()); // Coma check
1193 Context *t_orig = t;
1196 // Time-slice lending: if t is locked, switch to its locker
1197 // instead, this is transitive
1198 t = handle_helping(t);
1201 return handle_drq();
1206 // Can only switch to ready threads!
1207 if (EXPECT_FALSE (!(t->state() & Thread_ready_mask)))
1209 assert_kdb (state() & Thread_ready_mask);
1214 // Ensure kernel stack pointer is non-null if thread is ready
1215 assert_kdb (t->_kernel_sp);
1217 t->set_helper(mode);
1219 update_ready_list();
1220 assert_kdb (!(state() & Thread_ready_mask) || !sched()->left()
1221 || in_ready_list());
1226 return handle_drq();
1229 PUBLIC inline NEEDS[Context::switch_exec_locked, Context::schedule]
1231 Context::switch_exec_schedule_locked(Context *t, enum Helping_mode mode)
1233 if (EXPECT_FALSE(switch_exec_locked(t, mode)))
1238 Context::Ku_mem_ptr<Utcb> const &
1239 Context::utcb() const
1242 IMPLEMENT inline NEEDS["globals.h"]
1244 Context::Context_member::context() const
1245 { return context_of(this); }
1247 IMPLEMENT inline NEEDS["lock_guard.h", "kdb_ke.h"]
1249 Context::Drq_q::enq(Drq *rq)
1251 assert_kdb(cpu_lock.test());
1252 auto guard = lock_guard(q_lock());
1258 Context::do_drq_reply(Drq *r, Drq_q::Drop_mode drop)
1260 state_change_dirty(~Thread_drq_wait, Thread_ready);
1261 // r->state = Drq::Reply_handled;
1262 if (drop == Drq_q::No_drop && r->reply)
1263 return r->reply(r, this, r->arg) & Drq::Need_resched;
1268 IMPLEMENT inline NEEDS[Context::do_drq_reply]
1270 Context::Drq_q::execute_request(Drq *r, Drop_mode drop, bool local)
1272 bool need_resched = false;
1273 Context *const self = context();
1274 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1275 if (r->context() == self)
1277 LOG_TRACE("DRQ handling", "drq", current(), Drq_log,
1278 l->type = Drq_log::Type::Do_reply;
1280 l->func = (void*)r->func;
1281 l->reply = (void*)r->reply;
1282 l->thread = r->context();
1283 l->target_cpu = current_cpu();
1286 //LOG_MSG_3VAL(current(), "hrP", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1287 return self->do_drq_reply(r, drop);
1291 LOG_TRACE("DRQ handling", "drq", current(), Drq_log,
1292 l->type = Drq_log::Type::Do_request;
1294 l->func = (void*)r->func;
1295 l->reply = (void*)r->reply;
1296 l->thread = r->context();
1297 l->target_cpu = current_cpu();
1300 // r->state = Drq::Idle;
1301 unsigned answer = 0;
1302 //LOG_MSG_3VAL(current(), "hrq", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1303 if (EXPECT_TRUE(drop == No_drop && r->func))
1304 answer = r->func(r, self, r->arg);
1305 else if (EXPECT_FALSE(drop == Drop))
1306 // flag DRQ abort for requester
1308 // LOG_MSG_3VAL(current(), "hrq-", answer, current()->state() /*(Mword)r->context()*/, (Mword)r->func);
1309 need_resched |= answer & Drq::Need_resched;
1310 //r->state = Drq::Handled;
1313 if (!(answer & Drq::No_answer))
1316 return r->context()->do_drq_reply(r, drop) || need_resched;
1318 need_resched |= r->context()->enqueue_drq(r, Drq::Target_ctxt);
1321 return need_resched;
1324 IMPLEMENT inline NEEDS["lock_guard.h"]
1326 Context::Drq_q::dequeue(Drq *drq, Queue_item::Status reason)
1328 auto guard = lock_guard(q_lock());
1331 return Queue::dequeue(drq, reason);
1334 IMPLEMENT inline NEEDS["mem.h", "lock_guard.h"]
1336 Context::Drq_q::handle_requests(Drop_mode drop)
1338 // printf("CPU[%2u:%p]: > Context::Drq_q::handle_requests() context=%p\n", current_cpu(), current(), context());
1339 bool need_resched = false;
1344 auto guard = lock_guard(q_lock());
1347 return need_resched;
1349 check_kdb (Queue::dequeue(qi, Queue_item::Ok));
1352 Drq *r = static_cast<Drq*>(qi);
1353 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1354 need_resched |= execute_request(r, drop, false);
1358 * \biref Forced dequeue from lock wait queue, or DRQ queue.
1362 Context::force_dequeue()
1364 Queue_item *const qi = queue_item();
1368 // we're waiting for a lock or have a DRQ pending
1369 Queue *const q = qi->queue();
1371 auto guard = lock_guard(q->q_lock());
1372 // check again, with the queue lock held.
1373 // NOTE: we may be already removed from the queue on another CPU
1374 if (qi->queued() && qi->queue())
1376 // we must never be taken from one queue to another on a
1378 assert_kdb(q == qi->queue());
1379 // pull myself out of the queue, mark reason as invalidation
1380 q->dequeue(qi, Queue_item::Invalid);
1387 * \brief Dequeue from lock and DRQ queues, abort pending DRQs
1391 Context::shutdown_queues()
1399 * \brief Check for pending DRQs.
1400 * \return true if there are DRQs pending, false if not.
1404 Context::drq_pending() const
1405 { return _drq_q.first(); }
1409 Context::try_finish_migration()
1411 if (EXPECT_FALSE(_need_to_finish_migration))
1413 _need_to_finish_migration = false;
1420 * \brief Handle all pending DRQs.
1421 * \pre cpu_lock.test() (The CPU lock must be held).
1422 * \pre current() == this (only the currently running context is allowed to
1423 * call this function).
1424 * \return true if re-scheduling is needed (ready queue has changed),
1429 Context::handle_drq()
1431 assert_kdb (check_for_current_cpu());
1432 assert_kdb (cpu_lock.test());
1434 try_finish_migration();
1436 if (EXPECT_TRUE(!drq_pending()))
1440 bool ret = _drq_q.handle_requests();
1441 state_del_dirty(Thread_drq_ready);
1443 //LOG_MSG_3VAL(this, "xdrq", state(), ret, cpu_lock.test());
1446 * When the context is marked as dead (Thread_dead) then we must not execute
1447 * any usual context code, however DRQ handlers may run.
1449 if (state() & Thread_dead)
1451 // so disable the context after handling all DRQs and flag a reschedule.
1452 state_del_dirty(Thread_ready_mask);
1456 return ret || !(state() & Thread_ready_mask);
1461 * \brief Get the queue item of the context.
1462 * \pre The context must currently not be in any queue.
1463 * \return The queue item of the context.
1465 * The queue item can be used to enqueue the context to a Queue.
1466 * a context must be in at most one queue at a time.
1467 * To figure out the context corresponding to a queue item
1468 * context_of() can be used.
1470 PUBLIC inline NEEDS["kdb_ke.h"]
1472 Context::queue_item()
1478 * \brief DRQ handler for state_change.
1480 * This function basically wraps Context::state_change().
1484 Context::handle_drq_state_change(Drq * /*src*/, Context *self, void * _rq)
1486 State_request *rq = reinterpret_cast<State_request*>(_rq);
1487 self->state_change_dirty(rq->del, rq->add);
1488 //LOG_MSG_3VAL(c, "dsta", c->state(), (Mword)src, (Mword)_rq);
1494 * \brief Queue a DRQ for changing the contexts state.
1495 * \param mask bit mask for the state (state &= mask).
1496 * \param add bits to add to the state (state |= add).
1497 * \note This function is a preemption point.
1499 * This function must be used to change the state of contexts that are
1500 * potentially running on a different CPU.
1502 PUBLIC inline NEEDS[Context::drq]
1504 Context::drq_state_change(Mword mask, Mword add)
1506 if (current() == this)
1508 state_change_dirty(mask, add);
1515 drq(handle_drq_state_change, &rq);
1520 * \brief Initiate a DRQ for the context.
1521 * \pre \a src must be the currently running context.
1522 * \param src the source of the DRQ (the context who initiates the DRQ).
1523 * \param func the DRQ handler.
1524 * \param arg the argument for the DRQ handler.
1525 * \param reply the reply handler (called in the context of \a src immediately
1526 * after receiving a successful reply).
1528 * DRQs are requests that any context can queue to any other context. DRQs are
1529 * the basic mechanism to initiate actions on remote CPUs in an MP system,
1530 * however, are also allowed locally.
1531 * DRQ handlers of pending DRQs are executed by Context::handle_drq() in the
1532 * context of the target context. Context::handle_drq() is basically called
1533 * after switching to a context in Context::switch_exec_locked().
1535 * This function enqueues a DRQ and blocks the current context for a reply DRQ.
1537 PUBLIC inline NEEDS[Context::enqueue_drq, "logdefs.h"]
1539 Context::drq(Drq *drq, Drq::Request_func *func, void *arg,
1540 Drq::Request_func *reply = 0,
1541 Drq::Exec_mode exec = Drq::Target_ctxt,
1542 Drq::Wait_mode wait = Drq::Wait)
1544 // printf("CPU[%2u:%p]: > Context::drq(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1545 Context *cur = current();
1546 LOG_TRACE("DRQ Stuff", "drq", cur, Drq_log,
1547 l->type = Drq_log::Type::Send;
1549 l->func = (void*)func;
1550 l->reply = (void*)reply;
1552 l->target_cpu = cpu();
1555 //assert_kdb (current() == src);
1556 assert_kdb (!(wait == Drq::Wait && (cur->state() & Thread_drq_ready)) || cur->cpu() == cpu());
1557 assert_kdb (!((wait == Drq::Wait || drq == &_drq) && cur->state() & Thread_drq_wait));
1558 assert_kdb (!drq->queued());
1563 cur->state_add(wait == Drq::Wait ? Thread_drq_wait : 0);
1566 enqueue_drq(drq, exec);
1568 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1569 while (wait == Drq::Wait && cur->state() & Thread_drq_wait)
1571 cur->state_del(Thread_ready_mask);
1575 LOG_TRACE("DRQ Stuff", "drq", cur, Drq_log,
1576 l->type = Drq_log::Type::Done;
1578 l->func = (void*)func;
1579 l->reply = (void*)reply;
1581 l->target_cpu = cpu();
1583 //LOG_MSG_3VAL(src, "drq>", src->state(), Mword(this), 0);
1588 Context::kernel_context_drq(Drq::Request_func *func, void *arg,
1589 Drq::Request_func *reply = 0)
1591 char align_buffer[2*sizeof(Drq)];
1592 Drq *mdrq = new ((void*)((Address)(align_buffer + __alignof__(Drq) - 1) & ~(__alignof__(Drq)-1))) Drq;
1597 mdrq->reply = reply;
1598 Context *kc = kernel_context(current_cpu());
1600 kc->_drq_q.enq(mdrq);
1601 bool resched = schedule_switch_to_locked(kc);
1605 PUBLIC inline NEEDS[Context::drq]
1607 Context::drq(Drq::Request_func *func, void *arg,
1608 Drq::Request_func *reply = 0,
1609 Drq::Exec_mode exec = Drq::Target_ctxt,
1610 Drq::Wait_mode wait = Drq::Wait)
1611 { return drq(¤t()->_drq, func, arg, reply, exec, wait); }
1615 Context::rcu_unblock(Rcu_item *i)
1617 assert_kdb(cpu_lock.test());
1618 Context *const c = static_cast<Context*>(i);
1619 c->state_change_dirty(~Thread_waiting, Thread_ready);
1620 Sched_context::rq.current().deblock(c->sched());
1626 Context::recover_jmp_buf(jmp_buf *b)
1627 { _recover_jmpbuf = b; }
1631 Context::xcpu_tlb_flush(...)
1633 // This should always be optimized away
1637 IMPLEMENT_DEFAULT inline
1639 Context::arch_load_vcpu_kern_state(Vcpu_state *, bool)
1642 IMPLEMENT_DEFAULT inline
1644 Context::arch_load_vcpu_user_state(Vcpu_state *, bool)
1647 IMPLEMENT_DEFAULT inline
1649 Context::arch_update_vcpu_state(Vcpu_state *)
1652 //----------------------------------------------------------------------------
1653 IMPLEMENTATION [!mp]:
1660 Context::cpu(bool running = false) const
1668 PUBLIC static inline
1670 Context::enable_tlb(unsigned)
1673 PUBLIC static inline
1675 Context::disable_tlb(unsigned)
1681 Context::remote_ready_enqueue()
1683 WARN("Context::remote_ready_enqueue(): in UP system !\n");
1684 kdb_ke("Fiasco BUG");
1689 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1691 assert_kdb (cpu_lock.test());
1693 if (access_once(&_cpu) != current_cpu())
1695 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1696 //LOG_MSG_3VAL(this, "drqX", access_once(&_cpu), current_cpu(), state());
1697 if (access_once(&_cpu) == current_cpu() && (state() & Thread_ready_mask))
1699 Sched_context::rq.current().ready_enqueue(sched());
1705 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1707 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1708 if (!in_ready_list() && (state() & Thread_ready_mask))
1710 Sched_context::rq.current().ready_enqueue(sched());
1720 PRIVATE inline NOEXPORT
1722 Context::shutdown_drqs()
1723 { _drq_q.handle_requests(Drq_q::Drop); }
1730 // The UP case does not need to block for the next grace period, because
1731 // the CPU is always in a quiescent state when the interrupts where enabled
1734 PUBLIC static inline
1736 Context::xcpu_tlb_flush(bool, Mem_space *, Mem_space *)
1741 //----------------------------------------------------------------------------
1745 #include "queue_item.h"
1747 EXTENSION class Context
1751 class Pending_rqq : public Queue
1754 static void enq(Context *c);
1755 bool handle_requests(Context **);
1758 class Pending_rq : public Queue_item, public Context_member
1762 static Per_cpu<Pending_rqq> _pending_rqq;
1763 static Per_cpu<Drq_q> _glbl_drq_q;
1764 static Cpu_mask _tlb_active;
1770 //----------------------------------------------------------------------------
1771 IMPLEMENTATION [mp]:
1773 #include "globals.h"
1776 #include "lock_guard.h"
1779 DEFINE_PER_CPU Per_cpu<Context::Pending_rqq> Context::_pending_rqq;
1780 DEFINE_PER_CPU Per_cpu<Context::Drq_q> Context::_glbl_drq_q;
1781 Cpu_mask Context::_tlb_active;
1783 PUBLIC static inline
1785 Context::enable_tlb(unsigned cpu)
1786 { _tlb_active.atomic_set(cpu); }
1788 PUBLIC static inline
1790 Context::disable_tlb(unsigned cpu)
1791 { _tlb_active.atomic_clear(cpu); }
1794 * \brief Enqueue the given \a c into its CPUs queue.
1795 * \param c the context to enqueue for DRQ handling.
1797 IMPLEMENT inline NEEDS["globals.h", "lock_guard.h", "kdb_ke.h"]
1799 Context::Pending_rqq::enq(Context *c)
1801 // FIXME: is it safe to do the check without a locked queue, or may
1802 // we loose DRQs then?
1804 //if (!c->_pending_rq.queued())
1806 Queue &q = Context::_pending_rqq.cpu(c->cpu());
1807 auto guard = lock_guard(q.q_lock());
1808 if (c->_pending_rq.queued())
1810 q.enqueue(&c->_pending_rq);
1816 * \brief Wakeup all contexts with pending DRQs.
1818 * This function wakes up all context from the pending queue.
1822 Context::Pending_rqq::handle_requests(Context **mq)
1824 //LOG_MSG_3VAL(current(), "phq", current_cpu(), 0, 0);
1825 // printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() this=%p\n", current_cpu(), current(), this);
1826 bool resched = false;
1827 Context *curr = current();
1832 auto guard = lock_guard(q_lock());
1833 Queue_item *qi = first();
1837 check_kdb (dequeue(qi, Queue_item::Ok));
1838 c = static_cast<Context::Pending_rq *>(qi)->context();
1841 assert_kdb (c->check_for_current_cpu());
1843 if (EXPECT_FALSE(c->_migration != 0))
1845 // if the currently executing thread shall be migrated we must defer
1846 // this until we have handled the whole request queue, otherwise we
1847 // would miss the remaining requests or execute them on the wrong CPU.
1850 // we can directly migrate the thread...
1851 resched |= c->initiate_migration();
1853 // if migrated away skip the resched test below
1854 if (access_once(&c->_cpu) != current_cpu())
1861 c->try_finish_migration();
1863 if (EXPECT_TRUE(c != curr && c->drq_pending()))
1864 c->state_add(Thread_drq_ready);
1866 // FIXME: must we also reschedule when c cannot preempt the current
1867 // thread but its current scheduling context?
1868 if (EXPECT_TRUE(c != curr && (c->state() & Thread_ready_mask)))
1870 //printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() dequeded %p(%u)\n", current_cpu(), current(), c, qi->queued());
1871 resched |= Sched_context::rq.current().deblock(c->sched(), curr->sched(), false);
1878 Context::global_drq(unsigned cpu, Drq::Request_func *func, void *arg,
1879 Drq::Request_func *reply = 0, bool wait = true)
1881 assert_kdb (this == current());
1887 state_add(wait ? Thread_drq_wait : 0);
1889 _glbl_drq_q.cpu(cpu).enq(&_drq);
1891 Ipi::send(Ipi::Global_request, this->cpu(), cpu);
1893 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1894 while (wait && (state() & Thread_drq_wait))
1896 state_del(Thread_ready_mask);
1904 Context::handle_global_requests()
1906 return _glbl_drq_q.cpu(current_cpu()).handle_requests();
1911 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1913 assert_kdb (cpu_lock.test());
1914 // printf("CPU[%2u:%p]: Context::enqueue_request(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1916 if (cpu() != current_cpu())
1921 // read cpu again we may've been migrated meanwhile
1922 unsigned cpu = access_once(&this->_cpu);
1925 Queue &q = Context::_pending_rqq.cpu(cpu);
1926 auto guard = lock_guard(q.q_lock());
1929 // migrated between getting the lock and reading the CPU, so the
1930 // new CPU is responsible for executing our request
1931 if (access_once(&this->_cpu) != cpu)
1934 if (EXPECT_FALSE(!Cpu::online(cpu)))
1936 if (EXPECT_FALSE(!_drq_q.dequeue(rq, Queue_item::Ok)))
1940 // execute locally under the target CPU's queue lock
1941 _drq_q.execute_request(rq, Drq_q::No_drop, true);
1943 // free the lock early
1945 if ( access_once(&this->_cpu) == current_cpu()
1947 && (state() & Thread_ready_mask))
1949 Sched_context::rq.current().ready_enqueue(sched());
1955 if (!_pending_rq.queued())
1960 q.enqueue(&_pending_rq);
1966 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1967 Ipi::send(Ipi::Request, current_cpu(), cpu);
1971 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1973 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1974 if (!in_ready_list() && (state() & Thread_ready_mask))
1976 Sched_context::rq.current().ready_enqueue(sched());
1986 PRIVATE inline NOEXPORT
1988 Context::shutdown_drqs()
1990 if (_pending_rq.queued())
1992 auto guard = lock_guard(_pending_rq.queue()->q_lock());
1993 if (_pending_rq.queued())
1994 _pending_rq.queue()->dequeue(&_pending_rq, Queue_item::Ok);
1997 _drq_q.handle_requests(Drq_q::Drop);
2003 Context::cpu(bool running = false) const
2011 * Remote helper for doing remote CPU ready enqueue.
2013 * See remote_ready_enqueue().
2017 Context::handle_remote_ready_enqueue(Drq *, Context *self, void *)
2019 self->state_add_dirty(Thread_ready);
2024 PROTECTED inline NEEDS[Context::handle_remote_ready_enqueue]
2026 Context::remote_ready_enqueue()
2027 { drq(&handle_remote_ready_enqueue, 0); }
2032 * Block and wait for the next grace period.
2034 PUBLIC inline NEEDS["cpu_lock.h", "lock_guard.h"]
2038 auto gurad = lock_guard(cpu_lock);
2039 state_change_dirty(~Thread_ready, Thread_waiting);
2040 Rcu::call(this, &rcu_unblock);
2048 Context::handle_remote_tlb_flush(Drq *, Context *, void *_s)
2050 Mem_space **s = (Mem_space **)_s;
2051 Mem_space::tlb_flush_spaces((bool)s[0], s[1], s[2]);
2058 Context::xcpu_tlb_flush(bool flush_all_spaces, Mem_space *s1, Mem_space *s2)
2060 auto g = lock_guard(cpu_lock);
2061 Mem_space *s[3] = { (Mem_space *)flush_all_spaces, s1, s2 };
2062 unsigned ccpu = current_cpu();
2063 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
2064 if (ccpu != i && _tlb_active.get(i))
2065 current()->global_drq(i, Context::handle_remote_tlb_flush, s);
2068 //----------------------------------------------------------------------------
2069 IMPLEMENTATION [fpu && !ux]:
2073 PUBLIC inline NEEDS ["fpu.h"]
2075 Context::spill_fpu()
2077 // If we own the FPU, we should never be getting an "FPU unavailable" trap
2078 assert_kdb (Fpu::fpu.current().owner() == this);
2079 assert_kdb (state() & Thread_fpu_owner);
2080 assert_kdb (fpu_state());
2082 // Save the FPU state of the previous FPU owner (lazy) if applicable
2083 Fpu::save_state(fpu_state());
2084 state_del_dirty(Thread_fpu_owner);
2089 * When switching away from the FPU owner, disable the FPU to cause
2090 * the next FPU access to trap.
2091 * When switching back to the FPU owner, enable the FPU so we don't
2092 * get an FPU trap on FPU access.
2094 IMPLEMENT inline NEEDS ["fpu.h"]
2096 Context::switch_fpu(Context *t)
2098 Fpu &f = Fpu::fpu.current();
2099 if (f.is_owner(this))
2101 else if (f.is_owner(t) && !(t->state() & Thread_vcpu_fpu_disabled))
2105 //----------------------------------------------------------------------------
2106 IMPLEMENTATION [!fpu]:
2110 Context::spill_fpu()
2115 Context::switch_fpu(Context *)
2118 // --------------------------------------------------------------------------
2119 IMPLEMENTATION [debug]:
2121 #include "kobject_dbg.h"
2125 Context::Drq_log::print(int maxlen, char *buf) const
2127 static char const *const _types[] =
2128 { "send", "request", "reply", "done" };
2130 char const *t = "unk";
2131 if ((unsigned)type < sizeof(_types)/sizeof(_types[0]))
2132 t = _types[(unsigned)type];
2134 return snprintf(buf, maxlen, "%s(%s) rq=%p to ctxt=%lx/%p (func=%p, reply=%p) cpu=%u",
2135 t, wait ? "wait" : "no-wait", rq, Kobject_dbg::pointer_to_id(thread),
2136 thread, func, reply, target_cpu);