3 #include <csetjmp> // typedef jmp_buf
7 #include "continuation.h"
11 #include "member_offs.h"
12 #include "per_cpu_data.h"
14 #include "queue_item.h"
16 #include "sched_context.h"
18 #include "spin_lock.h"
20 #include <fiasco_defs.h>
30 explicit Context_ptr(Cap_index id) : _t(id) {}
32 Context_ptr(Context_ptr const &o) : _t(o._t) {}
33 Context_ptr const &operator = (Context_ptr const &o)
34 { _t = o._t; return *this; }
36 Kobject_iface *ptr(Space *, L4_fpage::Rights *) const;
38 bool is_kernel() const { return false; }
39 bool is_valid() const { return _t != Cap_index(~0UL); }
41 // only for debugging use
42 Cap_index raw() const { return _t; }
49 template< typename T >
50 class Context_ptr_base : public Context_ptr
53 enum Invalid_type { Invalid };
54 enum Null_type { Null };
55 explicit Context_ptr_base(Invalid_type) : Context_ptr(Cap_index(~0UL)) {}
56 explicit Context_ptr_base(Null_type) : Context_ptr(Cap_index(0)) {}
57 explicit Context_ptr_base(Cap_index id) : Context_ptr(id) {}
59 Context_ptr_base(Context_ptr_base<T> const &o) : Context_ptr(o) {}
60 template< typename X >
61 Context_ptr_base(Context_ptr_base<X> const &o) : Context_ptr(o)
62 { X*x = 0; T*t = x; (void)t; }
64 Context_ptr_base<T> const &operator = (Context_ptr_base<T> const &o)
65 { Context_ptr::operator = (o); return *this; }
67 template< typename X >
68 Context_ptr_base<T> const &operator = (Context_ptr_base<X> const &o)
69 { X*x=0; T*t=x; (void)t; Context_ptr::operator = (o); return *this; }
71 //T *ptr(Space *s) const { return static_cast<T*>(Context_ptr::ptr(s)); }
74 class Context_space_ref
77 typedef Spin_lock_coloc<Space *> Space_n_lock;
84 Space *space() const { return _s.get_unused(); }
85 Space_n_lock *lock() { return &_s; }
86 Address user_mode() const { return _v & 1; }
87 Space *vcpu_user() const { return reinterpret_cast<Space*>(_v & ~3); }
88 Space *vcpu_aware() const { return user_mode() ? vcpu_user() : space(); }
90 void space(Space *s) { _s.set_unused(s); }
91 void vcpu_user(Space *s) { _v = (Address)s; }
92 void user_mode(bool enable)
101 /** An execution context. A context is a runnable, schedulable activity.
102 It carries along some state used by other subsystems: A lock count,
103 and stack-element forward/next pointers.
110 friend class Jdb_thread_list;
111 friend class Context_ptr;
112 friend class Jdb_utcb;
115 virtual void finish_migration() = 0;
116 virtual bool initiate_migration() = 0;
126 * \brief Encapsulate an aggregate of Context.
128 * Allow to get a back reference to the aggregating Context object.
133 Context_member(Context_member const &);
138 * \brief Get the aggregating Context object.
140 Context *context() const;
144 * \brief Deffered Request.
146 * Represents a request that can be queued for each Context
147 * and is executed by the target context just after switching to the
150 class Drq : public Queue_item, public Context_member
153 typedef unsigned (Request_func)(Drq *, Context *target, void *);
154 enum { Need_resched = 1, No_answer = 2 };
155 enum Wait_mode { No_wait = 0, Wait = 1 };
156 enum Exec_mode { Target_ctxt = 0, Any_ctxt = 1 };
157 // enum State { Idle = 0, Handled = 1, Reply_handled = 2 };
166 * \brief Queue for deffered requests (Drq).
168 * A FIFO queue each Context aggregates to queue incomming Drq's
169 * that have to be executed directly after switching to a context.
171 class Drq_q : public Queue, public Context_member
174 enum Drop_mode { Drop = true, No_drop = false };
176 bool dequeue(Drq *drq, Queue_item::Status reason);
177 bool handle_requests(Drop_mode drop = No_drop);
178 bool execute_request(Drq *r, Drop_mode drop, bool local);
184 L4_sched_param const *sp;
187 Migration() : in_progress(false) {}
191 class Ku_mem_ptr : public Context_member
196 typename User<T>::Ptr _u;
200 Ku_mem_ptr() : _u(0), _k(0) {}
201 Ku_mem_ptr(typename User<T>::Ptr const &u, T *k) : _u(u), _k(k) {}
203 void set(typename User<T>::Ptr const &u, T *k)
206 T *access(bool is_current = false) const
208 // assert_kdb (!is_current || current() == context());
210 && (int)Config::Access_user_mem == Config::Access_user_mem_direct)
213 Cpu_number const cpu = current_cpu();
214 if ((int)Config::Access_user_mem == Config::Must_access_user_mem_direct
215 && cpu == context()->cpu()
216 && Mem_space::current_mem_space(cpu) == context()->space())
221 typename User<T>::Ptr usr() const { return _u; }
222 T* kern() const { return _k; }
227 * Definition of different scheduling modes
231 Periodic = 0x1, ///< 0 = Conventional, 1 = Periodic
232 Nonstrict = 0x2, ///< 0 = Strictly Periodic, 1 = Non-strictly periodic
236 * Definition of different helping modes
246 * Return consumed CPU time.
247 * @return Consumed CPU time in usecs
249 Cpu_time consumed_time();
251 virtual bool kill() = 0;
253 void spill_user_state();
254 void fill_user_state();
256 Space * FIASCO_PURE space() const { return _space.space(); }
257 Mem_space * FIASCO_PURE mem_space() const { return static_cast<Mem_space*>(space()); }
261 * Update consumed CPU time during each context switch and when
262 * reading out the current thread's consumed CPU time.
264 void update_consumed_time();
268 Ku_mem_ptr<Utcb> _utcb;
272 friend class Jdb_tcb;
274 /// low level page table switching stuff
275 void switchin_context(Context *) asm ("switchin_context_label") FIASCO_FASTCALL;
277 /// low level fpu switching stuff
278 void switch_fpu(Context *t);
280 /// low level cpu switching stuff
281 void switch_cpu(Context *t);
284 Context_space_ref _space;
291 // how many locks does this thread hold on other threads
292 // incremented in Thread::lock, decremented in Thread::clear
293 // Thread::kill needs to know
297 // The scheduling parameters. We would only need to keep an
298 // anonymous reference to them as we do not need them ourselves, but
299 // we aggregate them for performance reasons.
300 Sched_context _sched_context;
301 Sched_context *_sched;
305 // Pointer to floating point register state
306 Fpu_state _fpu_state;
307 // Implementation-specific consumed CPU time (TSC ticks or usecs)
308 Clock::Time _consumed_time;
314 // for trigger_exception
315 Continuation _exc_cont;
317 jmp_buf *_recover_jmpbuf; // setjmp buffer for page-fault recovery
319 Migration *_migration;
320 bool _need_to_finish_migration;
323 void arch_load_vcpu_kern_state(Vcpu_state *vcpu, bool do_load);
326 void arch_load_vcpu_user_state(Vcpu_state *vcpu, bool do_load);
327 void arch_update_vcpu_state(Vcpu_state *vcpu);
329 // XXX Timeout for both, sender and receiver! In normal case we would have
330 // to define own timeouts in Receiver and Sender but because only one
331 // timeout can be set at one time we use the same timeout. The timeout
332 // has to be defined here because Dirq::hit has to be able to reset the
333 // timeout (Irq::_irq_thread is of type Receiver).
337 static Per_cpu<Clock> _clock;
338 static Per_cpu<Context *> _kernel_ctxt;
344 #include "tb_entry.h"
346 EXTENSION class Context
349 struct Drq_log : public Tb_entry
355 Cpu_number target_cpu;
356 enum class Type { Send, Do_request, Do_reply, Done } type;
358 unsigned print(int max, char *buf) const;
359 Group_order has_partner() const
363 case Type::Send: return Group_order::first();
364 case Type::Done: return Group_order::last();
365 case Type::Do_request: return Group_order(1);
366 case Type::Do_reply: return Group_order(2);
368 return Group_order::none();
371 Group_order is_partner(Drq_log const *o) const
373 if (rq != o->rq || func != o->func || reply != o->reply)
374 return Group_order::none();
376 return o->has_partner();
381 struct Vcpu_log : public Tb_entry
390 unsigned print(int max, char *buf) const;
394 // --------------------------------------------------------------------------
400 #include "cpu_lock.h"
401 #include "entry_frame.h"
403 #include "globals.h" // current()
405 #include "lock_guard.h"
408 #include "mem_layout.h"
409 #include "processor.h"
411 #include "std_macros.h"
412 #include "thread_state.h"
416 DEFINE_PER_CPU Per_cpu<Clock> Context::_clock(true);
417 DEFINE_PER_CPU Per_cpu<Context *> Context::_kernel_ctxt;
419 IMPLEMENT inline NEEDS["kdb_ke.h"]
420 Kobject_iface * __attribute__((nonnull(1, 2)))
421 Context_ptr::ptr(Space *s, L4_fpage::Rights *rights) const
423 assert_kdb (cpu_lock.test());
425 return static_cast<Obj_space*>(s)->lookup_local(_t, rights);
432 /** Initialize a context. After setup, a switch_exec to this context results
433 in a return to user code using the return registers at regs(). The
434 return registers are not initialized however; neither is the space_context
435 to be used in thread switching (use set_space_context() for that).
436 @pre (_kernel_sp == 0) && (* (stack end) == 0)
437 @param thread_lock pointer to lock used to lock this context
438 @param space_context the space context
440 PUBLIC inline NEEDS ["atomic.h", "entry_frame.h", <cstdio>]
442 : _kernel_sp(reinterpret_cast<Mword*>(regs())),
446 _sched(&_sched_context),
447 _mode(Sched_mode(0)),
449 _need_to_finish_migration(false)
452 // NOTE: We do not have to synchronize the initialization of
453 // _space_context because it is constant for all concurrent
454 // invocations of this constructor. When two threads concurrently
455 // try to create a new task, they already synchronize in
456 // sys_task_new() and avoid calling us twice with different
457 // space_context arguments.
459 set_cpu_of(this, Cpu::invalid());
464 Context::spill_fpu_if_owner()
466 // spill FPU state into memory before migration
467 if (state() & Thread_fpu_owner)
469 Fpu &f = Fpu::fpu.current();
470 if (current() != this)
484 // If this context owned the FPU, noone owns it now
485 Fpu &f = Fpu::fpu.current();
486 if (f.is_owner(this))
501 Context::check_for_current_cpu() const
503 bool r = cpu() == current_cpu() || !Cpu::online(cpu());
504 if (0 && EXPECT_FALSE(!r)) // debug output disabled
505 printf("FAIL: cpu=%u (current=%u)\n",
506 cxx::int_value<Cpu_number>(cpu()),
507 cxx::int_value<Cpu_number>(current_cpu()));
514 Context::state(bool check = true) const
517 assert_kdb(!check || check_for_current_cpu());
523 Context::kernel_context(Cpu_number cpu)
524 { return _kernel_ctxt.cpu(cpu); }
526 PROTECTED static inline
528 Context::kernel_context(Cpu_number cpu, Context *ctxt)
529 { _kernel_ctxt.cpu(cpu) = ctxt; }
532 /** @name State manipulation */
538 * Does the context exist? .
539 * @return true if this context has been initialized.
541 PUBLIC inline NEEDS ["thread_state.h"]
543 Context::exists() const
545 return state() != Thread_invalid;
549 * Is the context about to be deleted.
550 * @return true if this context is in deletion.
552 PUBLIC inline NEEDS ["thread_state.h"]
554 Context::is_invalid() const
555 { return state() == Thread_invalid; }
558 * Atomically add bits to state flags.
559 * @param bits bits to be added to state flags
560 * @return 1 if none of the bits that were added had been set before
562 PUBLIC inline NEEDS ["atomic.h"]
564 Context::state_add(Mword bits)
566 assert_kdb(check_for_current_cpu());
567 atomic_or(&_state, bits);
571 * Add bits in state flags. Unsafe (non-atomic) and
572 * fast version -- you must hold the kernel lock when you use it.
573 * @pre cpu_lock.test() == true
574 * @param bits bits to be added to state flags
578 Context::state_add_dirty(Mword bits, bool check = true)
581 assert_kdb(!check || check_for_current_cpu());
586 * Atomically delete bits from state flags.
587 * @param bits bits to be removed from state flags
588 * @return 1 if all of the bits that were removed had previously been set
590 PUBLIC inline NEEDS ["atomic.h"]
592 Context::state_del(Mword bits)
594 assert_kdb (check_for_current_cpu());
595 atomic_and(&_state, ~bits);
599 * Delete bits in state flags. Unsafe (non-atomic) and
600 * fast version -- you must hold the kernel lock when you use it.
601 * @pre cpu_lock.test() == true
602 * @param bits bits to be removed from state flags
606 Context::state_del_dirty(Mword bits, bool check = true)
609 assert_kdb(!check || check_for_current_cpu());
614 * Atomically delete and add bits in state flags, provided the
615 * following rules apply (otherwise state is not changed at all):
616 * - Bits that are to be set must be clear in state or clear in mask
617 * - Bits that are to be cleared must be set in state
618 * @param mask Bits not set in mask shall be deleted from state flags
619 * @param bits Bits to be added to state flags
620 * @return 1 if state was changed, 0 otherwise
622 PUBLIC inline NEEDS ["atomic.h"]
624 Context::state_change_safely(Mword mask, Mword bits)
626 assert_kdb (check_for_current_cpu());
632 if (old & bits & mask | ~old & ~mask)
635 while (!cas(&_state, old, old & mask | bits));
641 * Atomically delete and add bits in state flags.
642 * @param mask bits not set in mask shall be deleted from state flags
643 * @param bits bits to be added to state flags
645 PUBLIC inline NEEDS ["atomic.h"]
647 Context::state_change(Mword mask, Mword bits)
649 assert_kdb (check_for_current_cpu());
650 return atomic_change(&_state, mask, bits);
654 * Delete and add bits in state flags. Unsafe (non-atomic) and
655 * fast version -- you must hold the kernel lock when you use it.
656 * @pre cpu_lock.test() == true
657 * @param mask Bits not set in mask shall be deleted from state flags
658 * @param bits Bits to be added to state flags
662 Context::state_change_dirty(Mword mask, Mword bits, bool check = true)
665 assert_kdb(!check || check_for_current_cpu());
681 Context::vcpu_aware_space() const
682 { return _space.vcpu_aware(); }
684 /** Registers used when iret'ing to user mode.
685 @return return registers
687 PUBLIC inline NEEDS["cpu.h", "entry_frame.h"]
689 Context::regs() const
691 return reinterpret_cast<Entry_frame *>
692 (Cpu::stack_align(reinterpret_cast<Mword>(this) + Size)) - 1;
695 /** @name Lock counting
696 These functions count the number of locks
697 this context holds. A context must not be deleted if its lock
702 /** Increment lock count.
703 @post lock_cnt() > 0 */
706 Context::inc_lock_cnt()
711 /** Decrement lock count.
716 Context::dec_lock_cnt()
726 Context::lock_cnt() const
734 * Switch active timeslice of this Context.
735 * @param next Sched_context to switch to
739 Context::switch_sched(Sched_context *next, Sched_context::Ready_queue *queue)
741 queue->switch_sched(sched(), next);
746 * Select a different context for running and activate it.
752 auto guard = lock_guard(cpu_lock);
753 assert (!Sched_context::rq.current().schedule_in_progress);
757 // Ensure only the current thread calls schedule
758 assert_kdb (this == current());
760 Cpu_number current_cpu = Cpu_number::nil();
761 Sched_context::Ready_queue *rq = 0;
763 // Enqueue current thread into ready-list to schedule correctly
766 // Select a thread for scheduling.
767 Context *next_to_run;
771 // I may've been migrated during the switch_exec_locked in the while
772 // statement below. So check out if I've to use a new ready queue.
774 Cpu_number new_cpu = access_once(&_cpu);
775 if (new_cpu != current_cpu)
778 current_cpu = new_cpu;
779 rq = &Sched_context::rq.current();
780 if (rq->schedule_in_progress)
787 next_to_run = rq->next_to_run()->context();
789 // Ensure ready-list sanity
790 assert_kdb (next_to_run);
792 if (EXPECT_TRUE (next_to_run->state() & Thread_ready_mask))
795 rq->ready_dequeue(next_to_run->sched());
797 rq->schedule_in_progress = this;
803 // check if we've been migrated meanwhile
804 if (EXPECT_FALSE(current_cpu != access_once(&_cpu)))
808 rq = &Sched_context::rq.current();
809 if (rq->schedule_in_progress)
813 rq->schedule_in_progress = 0;
816 while (EXPECT_FALSE(schedule_switch_to_locked(next_to_run)));
822 Context::schedule_if(bool s)
824 if (!s || Sched_context::rq.current().schedule_in_progress)
832 * Return Context's Sched_context with id 'id'; return time slice 0 as default.
833 * @return Sched_context with id 'id' or 0
837 Context::sched_context(unsigned short const id = 0) const
839 if (EXPECT_TRUE (!id))
840 return const_cast<Sched_context*>(&_sched_context);
842 for (Sched_context *tmp = _sched_context.next();
843 tmp != &_sched_context; tmp = tmp->next())
851 * Return Context's currently active Sched_context.
852 * @return Active Sched_context
856 Context::sched() const
862 * Set Context's currently active Sched_context.
863 * @param sched Sched_context to be activated
867 Context::set_sched(Sched_context * const sched)
873 * Return Context's real-time period length.
874 * @return Period length in usecs
878 Context::period() const
884 * Set Context's real-time period length.
885 * @param period New period length in usecs
889 Context::set_period(Unsigned64 const period)
895 * Return Context's scheduling mode.
896 * @return Scheduling mode
900 Context::mode() const
906 * Set Context's scheduling mode.
907 * @param mode New scheduling mode
911 Context::set_mode(Context::Sched_mode const mode)
918 // XXX for now, synchronize with global kernel lock
922 * Enqueue current() if ready to fix up ready-list invariant.
924 PRIVATE inline NOEXPORT
926 Context::update_ready_list()
928 assert_kdb (this == current());
930 if ((state() & Thread_ready_mask) && sched()->left())
931 Sched_context::rq.current().ready_enqueue(sched());
935 * Check if Context is in ready-list.
936 * @return 1 if thread is in ready-list, 0 otherwise
940 Context::in_ready_list() const
942 return sched()->in_ready_list();
947 * \brief Activate a newly created thread.
949 * This function sets a new thread onto the ready list and switches to
950 * the thread if it can preempt the currently running thread.
956 auto guard = lock_guard(cpu_lock);
957 if (cpu() == current_cpu())
959 state_add_dirty(Thread_ready);
960 if (Sched_context::rq.current().deblock(sched(), current()->sched(), true))
962 current()->switch_to_locked(this);
967 remote_ready_enqueue();
973 /** Helper. Context that helps us by donating its time to us. It is
974 set by switch_exec() if the calling thread says so.
975 @return context that helps us and should be activated after freeing a lock.
979 Context::helper() const
987 Context::set_helper(Helping_mode const mode)
998 // don't change _helper value
1003 /** Donatee. Context that receives our time slices, for example
1004 because it has locked us.
1005 @return context that should be activated instead of us when we're
1010 Context::donatee() const
1017 Context::set_donatee(Context * const donatee)
1024 Context::get_kernel_sp() const
1031 Context::set_kernel_sp(Mword * const esp)
1038 Context::fpu_state()
1044 * Add to consumed CPU time.
1045 * @param quantum Implementation-specific time quantum (TSC ticks or usecs)
1049 Context::consume_time(Clock::Time quantum)
1051 _consumed_time += quantum;
1055 * Update consumed CPU time during each context switch and when
1056 * reading out the current thread's consumed CPU time.
1058 IMPLEMENT inline NEEDS ["cpu.h"]
1060 Context::update_consumed_time()
1062 if (Config::Fine_grained_cputime)
1063 consume_time (_clock.cpu(cpu()).delta());
1066 IMPLEMENT inline NEEDS ["config.h", "cpu.h"]
1068 Context::consumed_time()
1070 if (Config::Fine_grained_cputime)
1071 return _clock.cpu(cpu()).us(_consumed_time);
1073 return _consumed_time;
1077 * Switch to scheduling context and execution context while not running under
1080 PUBLIC inline NEEDS [<cassert>]
1082 Context::switch_to(Context *t)
1084 // Call switch_to_locked if CPU lock is already held
1085 assert (!cpu_lock.test());
1087 // Grab the CPU lock
1088 auto guard = lock_guard(cpu_lock);
1090 switch_to_locked(t);
1094 * Switch scheduling context and execution context.
1095 * @param t Destination thread whose scheduling context and execution context
1096 * should be activated.
1098 PRIVATE inline NEEDS ["kdb_ke.h"]
1099 bool FIASCO_WARN_RESULT
1100 Context::schedule_switch_to_locked(Context *t)
1102 // Must be called with CPU lock held
1103 assert_kdb (cpu_lock.test());
1105 Sched_context::Ready_queue &rq = Sched_context::rq.current();
1106 // Switch to destination thread's scheduling context
1107 if (rq.current_sched() != t->sched())
1108 rq.set_current_sched(t->sched());
1110 // XXX: IPC dependency tracking belongs here.
1112 // Switch to destination thread's execution context, no helping involved
1114 return switch_exec_locked(t, Not_Helping);
1116 return handle_drq();
1119 PUBLIC inline NEEDS [Context::schedule_switch_to_locked]
1121 Context::switch_to_locked(Context *t)
1123 if (EXPECT_FALSE(schedule_switch_to_locked(t)))
1129 * Switch execution context while not running under CPU lock.
1131 PUBLIC inline NEEDS ["kdb_ke.h"]
1132 bool FIASCO_WARN_RESULT
1133 Context::switch_exec(Context *t, enum Helping_mode mode)
1135 // Call switch_exec_locked if CPU lock is already held
1136 assert_kdb (!cpu_lock.test());
1138 // Grab the CPU lock
1139 auto guard = lock_guard(cpu_lock);
1141 return switch_exec_locked(t, mode);
1147 Context::handle_helping(Context *t)
1149 // XXX: maybe we do not need this on MP, because we have no helping there
1150 assert_kdb (current() == this);
1151 // Time-slice lending: if t is locked, switch to its locker
1152 // instead, this is transitive
1153 while (t->donatee() && // target thread locked
1154 t->donatee() != t) // not by itself
1156 // Special case for Thread::kill(): If the locker is
1157 // current(), switch to the locked thread to allow it to
1158 // release other locks. Do this only when the target thread
1159 // actually owns locks.
1160 if (t->donatee() == this)
1162 if (t->lock_cnt() > 0)
1175 * Switch to a specific different execution context.
1176 * If that context is currently locked, switch to its locker instead
1177 * (except if current() is the locker)
1178 * @pre current() == this && current() != t
1179 * @param t thread that shall be activated.
1180 * @param mode helping mode; we either help, don't help or leave the
1181 * helping state unchanged
1184 bool FIASCO_WARN_RESULT //L4_IPC_CODE
1185 Context::switch_exec_locked(Context *t, enum Helping_mode mode)
1187 // Must be called with CPU lock held
1188 assert_kdb (t->cpu() != Cpu::invalid());
1189 assert_kdb (t->cpu() == current_cpu());
1190 assert_kdb (cpu() == current_cpu());
1191 assert_kdb (cpu_lock.test());
1192 assert_kdb (current() != t);
1193 assert_kdb (current() == this);
1194 assert_kdb (timeslice_timeout.cpu(cpu())->is_set()); // Coma check
1197 Context *t_orig = t;
1200 // Time-slice lending: if t is locked, switch to its locker
1201 // instead, this is transitive
1202 t = handle_helping(t);
1205 return handle_drq();
1210 // Can only switch to ready threads!
1211 if (EXPECT_FALSE (!(t->state() & Thread_ready_mask)))
1213 assert_kdb (state() & Thread_ready_mask);
1218 // Ensure kernel stack pointer is non-null if thread is ready
1219 assert_kdb (t->_kernel_sp);
1221 t->set_helper(mode);
1223 update_ready_list();
1224 assert_kdb (!(state() & Thread_ready_mask) || !sched()->left()
1225 || in_ready_list());
1230 return handle_drq();
1233 PUBLIC inline NEEDS[Context::switch_exec_locked, Context::schedule]
1235 Context::switch_exec_schedule_locked(Context *t, enum Helping_mode mode)
1237 if (EXPECT_FALSE(switch_exec_locked(t, mode)))
1242 Context::Ku_mem_ptr<Utcb> const &
1243 Context::utcb() const
1246 IMPLEMENT inline NEEDS["globals.h"]
1248 Context::Context_member::context() const
1249 { return context_of(this); }
1251 IMPLEMENT inline NEEDS["lock_guard.h", "kdb_ke.h"]
1253 Context::Drq_q::enq(Drq *rq)
1255 assert_kdb(cpu_lock.test());
1256 auto guard = lock_guard(q_lock());
1262 Context::do_drq_reply(Drq *r, Drq_q::Drop_mode drop)
1264 state_change_dirty(~Thread_drq_wait, Thread_ready);
1265 // r->state = Drq::Reply_handled;
1266 if (drop == Drq_q::No_drop && r->reply)
1267 return r->reply(r, this, r->arg) & Drq::Need_resched;
1272 IMPLEMENT inline NEEDS[Context::do_drq_reply]
1274 Context::Drq_q::execute_request(Drq *r, Drop_mode drop, bool local)
1276 bool need_resched = false;
1277 Context *const self = context();
1278 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1279 if (r->context() == self)
1281 LOG_TRACE("DRQ handling", "drq", current(), Drq_log,
1282 l->type = Drq_log::Type::Do_reply;
1284 l->func = (void*)r->func;
1285 l->reply = (void*)r->reply;
1286 l->thread = r->context();
1287 l->target_cpu = current_cpu();
1290 //LOG_MSG_3VAL(current(), "hrP", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1291 return self->do_drq_reply(r, drop);
1295 LOG_TRACE("DRQ handling", "drq", current(), Drq_log,
1296 l->type = Drq_log::Type::Do_request;
1298 l->func = (void*)r->func;
1299 l->reply = (void*)r->reply;
1300 l->thread = r->context();
1301 l->target_cpu = current_cpu();
1304 // r->state = Drq::Idle;
1305 unsigned answer = 0;
1306 //LOG_MSG_3VAL(current(), "hrq", current_cpu() | (drop ? 0x100: 0), (Mword)r->context(), (Mword)r->func);
1307 if (EXPECT_TRUE(drop == No_drop && r->func))
1308 answer = r->func(r, self, r->arg);
1309 else if (EXPECT_FALSE(drop == Drop))
1310 // flag DRQ abort for requester
1312 // LOG_MSG_3VAL(current(), "hrq-", answer, current()->state() /*(Mword)r->context()*/, (Mword)r->func);
1313 need_resched |= answer & Drq::Need_resched;
1314 //r->state = Drq::Handled;
1317 if (!(answer & Drq::No_answer))
1320 return r->context()->do_drq_reply(r, drop) || need_resched;
1322 need_resched |= r->context()->enqueue_drq(r, Drq::Target_ctxt);
1325 return need_resched;
1328 IMPLEMENT inline NEEDS["lock_guard.h"]
1330 Context::Drq_q::dequeue(Drq *drq, Queue_item::Status reason)
1332 auto guard = lock_guard(q_lock());
1335 return Queue::dequeue(drq, reason);
1338 IMPLEMENT inline NEEDS["mem.h", "lock_guard.h"]
1340 Context::Drq_q::handle_requests(Drop_mode drop)
1342 // printf("CPU[%2u:%p]: > Context::Drq_q::handle_requests() context=%p\n", current_cpu(), current(), context());
1343 bool need_resched = false;
1348 auto guard = lock_guard(q_lock());
1351 return need_resched;
1353 check_kdb (Queue::dequeue(qi, Queue_item::Ok));
1356 Drq *r = static_cast<Drq*>(qi);
1357 // printf("CPU[%2u:%p]: context=%p: handle request for %p (func=%p, arg=%p)\n", current_cpu(), current(), context(), r->context(), r->func, r->arg);
1358 need_resched |= execute_request(r, drop, false);
1362 * \biref Forced dequeue from lock wait queue, or DRQ queue.
1366 Context::force_dequeue()
1368 Queue_item *const qi = queue_item();
1372 // we're waiting for a lock or have a DRQ pending
1373 Queue *const q = qi->queue();
1375 auto guard = lock_guard(q->q_lock());
1376 // check again, with the queue lock held.
1377 // NOTE: we may be already removed from the queue on another CPU
1378 if (qi->queued() && qi->queue())
1380 // we must never be taken from one queue to another on a
1382 assert_kdb(q == qi->queue());
1383 // pull myself out of the queue, mark reason as invalidation
1384 q->dequeue(qi, Queue_item::Invalid);
1391 * \brief Dequeue from lock and DRQ queues, abort pending DRQs
1395 Context::shutdown_queues()
1403 * \brief Check for pending DRQs.
1404 * \return true if there are DRQs pending, false if not.
1408 Context::drq_pending() const
1409 { return _drq_q.first(); }
1413 Context::try_finish_migration()
1415 if (EXPECT_FALSE(_need_to_finish_migration))
1417 _need_to_finish_migration = false;
1424 * \brief Handle all pending DRQs.
1425 * \pre cpu_lock.test() (The CPU lock must be held).
1426 * \pre current() == this (only the currently running context is allowed to
1427 * call this function).
1428 * \return true if re-scheduling is needed (ready queue has changed),
1433 Context::handle_drq()
1435 assert_kdb (check_for_current_cpu());
1436 assert_kdb (cpu_lock.test());
1438 try_finish_migration();
1440 if (EXPECT_TRUE(!drq_pending()))
1444 bool ret = _drq_q.handle_requests();
1445 state_del_dirty(Thread_drq_ready);
1447 //LOG_MSG_3VAL(this, "xdrq", state(), ret, cpu_lock.test());
1450 * When the context is marked as dead (Thread_dead) then we must not execute
1451 * any usual context code, however DRQ handlers may run.
1453 if (state() & Thread_dead)
1455 // so disable the context after handling all DRQs and flag a reschedule.
1456 state_del_dirty(Thread_ready_mask);
1460 return ret || !(state() & Thread_ready_mask);
1465 * \brief Get the queue item of the context.
1466 * \pre The context must currently not be in any queue.
1467 * \return The queue item of the context.
1469 * The queue item can be used to enqueue the context to a Queue.
1470 * a context must be in at most one queue at a time.
1471 * To figure out the context corresponding to a queue item
1472 * context_of() can be used.
1474 PUBLIC inline NEEDS["kdb_ke.h"]
1476 Context::queue_item()
1482 * \brief DRQ handler for state_change.
1484 * This function basically wraps Context::state_change().
1488 Context::handle_drq_state_change(Drq * /*src*/, Context *self, void * _rq)
1490 State_request *rq = reinterpret_cast<State_request*>(_rq);
1491 self->state_change_dirty(rq->del, rq->add);
1492 //LOG_MSG_3VAL(c, "dsta", c->state(), (Mword)src, (Mword)_rq);
1498 * \brief Queue a DRQ for changing the contexts state.
1499 * \param mask bit mask for the state (state &= mask).
1500 * \param add bits to add to the state (state |= add).
1501 * \note This function is a preemption point.
1503 * This function must be used to change the state of contexts that are
1504 * potentially running on a different CPU.
1506 PUBLIC inline NEEDS[Context::drq]
1508 Context::drq_state_change(Mword mask, Mword add)
1510 if (current() == this)
1512 state_change_dirty(mask, add);
1519 drq(handle_drq_state_change, &rq);
1524 * \brief Initiate a DRQ for the context.
1525 * \pre \a src must be the currently running context.
1526 * \param src the source of the DRQ (the context who initiates the DRQ).
1527 * \param func the DRQ handler.
1528 * \param arg the argument for the DRQ handler.
1529 * \param reply the reply handler (called in the context of \a src immediately
1530 * after receiving a successful reply).
1532 * DRQs are requests that any context can queue to any other context. DRQs are
1533 * the basic mechanism to initiate actions on remote CPUs in an MP system,
1534 * however, are also allowed locally.
1535 * DRQ handlers of pending DRQs are executed by Context::handle_drq() in the
1536 * context of the target context. Context::handle_drq() is basically called
1537 * after switching to a context in Context::switch_exec_locked().
1539 * This function enqueues a DRQ and blocks the current context for a reply DRQ.
1541 PUBLIC inline NEEDS[Context::enqueue_drq, "logdefs.h"]
1543 Context::drq(Drq *drq, Drq::Request_func *func, void *arg,
1544 Drq::Request_func *reply = 0,
1545 Drq::Exec_mode exec = Drq::Target_ctxt,
1546 Drq::Wait_mode wait = Drq::Wait)
1548 // printf("CPU[%2u:%p]: > Context::drq(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1549 Context *cur = current();
1550 LOG_TRACE("DRQ Stuff", "drq", cur, Drq_log,
1551 l->type = Drq_log::Type::Send;
1553 l->func = (void*)func;
1554 l->reply = (void*)reply;
1556 l->target_cpu = cpu();
1559 //assert_kdb (current() == src);
1560 assert_kdb (!(wait == Drq::Wait && (cur->state() & Thread_drq_ready)) || cur->cpu() == cpu());
1561 assert_kdb (!((wait == Drq::Wait || drq == &_drq) && cur->state() & Thread_drq_wait));
1562 assert_kdb (!drq->queued());
1567 cur->state_add(wait == Drq::Wait ? Thread_drq_wait : 0);
1570 enqueue_drq(drq, exec);
1572 //LOG_MSG_3VAL(src, "<drq", src->state(), Mword(this), 0);
1573 while (wait == Drq::Wait && cur->state() & Thread_drq_wait)
1575 cur->state_del(Thread_ready_mask);
1579 LOG_TRACE("DRQ Stuff", "drq", cur, Drq_log,
1580 l->type = Drq_log::Type::Done;
1582 l->func = (void*)func;
1583 l->reply = (void*)reply;
1585 l->target_cpu = cpu();
1587 //LOG_MSG_3VAL(src, "drq>", src->state(), Mword(this), 0);
1592 Context::kernel_context_drq(Drq::Request_func *func, void *arg,
1593 Drq::Request_func *reply = 0)
1595 char align_buffer[2*sizeof(Drq)];
1596 Drq *mdrq = new ((void*)((Address)(align_buffer + __alignof__(Drq) - 1) & ~(__alignof__(Drq)-1))) Drq;
1601 mdrq->reply = reply;
1602 Context *kc = kernel_context(current_cpu());
1604 kc->_drq_q.enq(mdrq);
1605 bool resched = schedule_switch_to_locked(kc);
1609 PUBLIC inline NEEDS[Context::drq]
1611 Context::drq(Drq::Request_func *func, void *arg,
1612 Drq::Request_func *reply = 0,
1613 Drq::Exec_mode exec = Drq::Target_ctxt,
1614 Drq::Wait_mode wait = Drq::Wait)
1615 { return drq(¤t()->_drq, func, arg, reply, exec, wait); }
1619 Context::rcu_unblock(Rcu_item *i)
1621 assert_kdb(cpu_lock.test());
1622 Context *const c = static_cast<Context*>(i);
1623 c->state_change_dirty(~Thread_waiting, Thread_ready);
1624 Sched_context::rq.current().deblock(c->sched());
1630 Context::recover_jmp_buf(jmp_buf *b)
1631 { _recover_jmpbuf = b; }
1635 Context::xcpu_tlb_flush(...)
1637 // This should always be optimized away
1641 IMPLEMENT_DEFAULT inline
1643 Context::arch_load_vcpu_kern_state(Vcpu_state *, bool)
1646 IMPLEMENT_DEFAULT inline
1648 Context::arch_load_vcpu_user_state(Vcpu_state *, bool)
1651 IMPLEMENT_DEFAULT inline
1653 Context::arch_update_vcpu_state(Vcpu_state *)
1656 //----------------------------------------------------------------------------
1657 IMPLEMENTATION [!mp]:
1664 Context::cpu(bool running = false) const
1667 return Cpu_number::boot_cpu();
1672 PUBLIC static inline
1674 Context::enable_tlb(Cpu_number)
1677 PUBLIC static inline
1679 Context::disable_tlb(Cpu_number)
1685 Context::remote_ready_enqueue()
1687 WARN("Context::remote_ready_enqueue(): in UP system !\n");
1688 kdb_ke("Fiasco BUG");
1693 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1695 assert_kdb (cpu_lock.test());
1697 if (access_once(&_cpu) != current_cpu())
1699 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1700 //LOG_MSG_3VAL(this, "drqX", access_once(&_cpu), current_cpu(), state());
1701 if (access_once(&_cpu) == current_cpu() && (state() & Thread_ready_mask))
1703 Sched_context::rq.current().ready_enqueue(sched());
1709 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1711 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1712 if (!in_ready_list() && (state() & Thread_ready_mask))
1714 Sched_context::rq.current().ready_enqueue(sched());
1724 PRIVATE inline NOEXPORT
1726 Context::shutdown_drqs()
1727 { _drq_q.handle_requests(Drq_q::Drop); }
1734 // The UP case does not need to block for the next grace period, because
1735 // the CPU is always in a quiescent state when the interrupts where enabled
1738 PUBLIC static inline
1740 Context::xcpu_tlb_flush(bool, Mem_space *, Mem_space *)
1745 //----------------------------------------------------------------------------
1749 #include "queue_item.h"
1751 EXTENSION class Context
1755 class Pending_rqq : public Queue
1758 static void enq(Context *c);
1759 bool handle_requests(Context **);
1762 class Pending_rq : public Queue_item, public Context_member
1766 static Per_cpu<Pending_rqq> _pending_rqq;
1767 static Per_cpu<Drq_q> _glbl_drq_q;
1768 static Cpu_mask _tlb_active;
1774 //----------------------------------------------------------------------------
1775 IMPLEMENTATION [mp]:
1777 #include "globals.h"
1780 #include "lock_guard.h"
1783 DEFINE_PER_CPU Per_cpu<Context::Pending_rqq> Context::_pending_rqq;
1784 DEFINE_PER_CPU Per_cpu<Context::Drq_q> Context::_glbl_drq_q;
1785 Cpu_mask Context::_tlb_active;
1787 PUBLIC static inline
1789 Context::enable_tlb(Cpu_number cpu)
1790 { _tlb_active.atomic_set(cpu); }
1792 PUBLIC static inline
1794 Context::disable_tlb(Cpu_number cpu)
1795 { _tlb_active.atomic_clear(cpu); }
1798 * \brief Enqueue the given \a c into its CPUs queue.
1799 * \param c the context to enqueue for DRQ handling.
1801 IMPLEMENT inline NEEDS["globals.h", "lock_guard.h", "kdb_ke.h"]
1803 Context::Pending_rqq::enq(Context *c)
1805 // FIXME: is it safe to do the check without a locked queue, or may
1806 // we loose DRQs then?
1808 //if (!c->_pending_rq.queued())
1810 Queue &q = Context::_pending_rqq.cpu(c->cpu());
1811 auto guard = lock_guard(q.q_lock());
1812 if (c->_pending_rq.queued())
1814 q.enqueue(&c->_pending_rq);
1820 * \brief Wakeup all contexts with pending DRQs.
1822 * This function wakes up all context from the pending queue.
1826 Context::Pending_rqq::handle_requests(Context **mq)
1828 //LOG_MSG_3VAL(current(), "phq", current_cpu(), 0, 0);
1829 // printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() this=%p\n", current_cpu(), current(), this);
1830 bool resched = false;
1831 Context *curr = current();
1836 auto guard = lock_guard(q_lock());
1837 Queue_item *qi = first();
1841 check_kdb (dequeue(qi, Queue_item::Ok));
1842 c = static_cast<Context::Pending_rq *>(qi)->context();
1845 assert_kdb (c->check_for_current_cpu());
1847 if (EXPECT_FALSE(c->_migration != 0))
1849 // if the currently executing thread shall be migrated we must defer
1850 // this until we have handled the whole request queue, otherwise we
1851 // would miss the remaining requests or execute them on the wrong CPU.
1854 // we can directly migrate the thread...
1855 resched |= c->initiate_migration();
1857 // if migrated away skip the resched test below
1858 if (access_once(&c->_cpu) != current_cpu())
1865 c->try_finish_migration();
1867 if (EXPECT_TRUE(c != curr && c->drq_pending()))
1868 c->state_add(Thread_drq_ready);
1870 // FIXME: must we also reschedule when c cannot preempt the current
1871 // thread but its current scheduling context?
1872 if (EXPECT_TRUE(c != curr && (c->state() & Thread_ready_mask)))
1874 //printf("CPU[%2u:%p]: Context::Pending_rqq::handle_requests() dequeded %p(%u)\n", current_cpu(), current(), c, qi->queued());
1875 resched |= Sched_context::rq.current().deblock(c->sched(), curr->sched(), false);
1882 Context::global_drq(Cpu_number cpu, Drq::Request_func *func, void *arg,
1883 Drq::Request_func *reply = 0, bool wait = true)
1885 assert_kdb (this == current());
1891 state_add(wait ? Thread_drq_wait : 0);
1893 _glbl_drq_q.cpu(cpu).enq(&_drq);
1895 Ipi::send(Ipi::Global_request, this->cpu(), cpu);
1897 //LOG_MSG_3VAL(this, "<drq", state(), Mword(this), 0);
1898 while (wait && (state() & Thread_drq_wait))
1900 state_del(Thread_ready_mask);
1908 Context::handle_global_requests()
1910 return _glbl_drq_q.cpu(current_cpu()).handle_requests();
1915 Context::enqueue_drq(Drq *rq, Drq::Exec_mode /*exec*/)
1917 assert_kdb (cpu_lock.test());
1918 // printf("CPU[%2u:%p]: Context::enqueue_request(this=%p, src=%p, func=%p, arg=%p)\n", current_cpu(), current(), this, src, func,arg);
1920 if (cpu() != current_cpu())
1925 // read cpu again we may've been migrated meanwhile
1926 Cpu_number cpu = access_once(&this->_cpu);
1929 Queue &q = Context::_pending_rqq.cpu(cpu);
1930 auto guard = lock_guard(q.q_lock());
1933 // migrated between getting the lock and reading the CPU, so the
1934 // new CPU is responsible for executing our request
1935 if (access_once(&this->_cpu) != cpu)
1938 if (EXPECT_FALSE(!Cpu::online(cpu)))
1940 if (EXPECT_FALSE(!_drq_q.dequeue(rq, Queue_item::Ok)))
1944 // execute locally under the target CPU's queue lock
1945 _drq_q.execute_request(rq, Drq_q::No_drop, true);
1947 // free the lock early
1949 if ( access_once(&this->_cpu) == current_cpu()
1951 && (state() & Thread_ready_mask))
1953 Sched_context::rq.current().ready_enqueue(sched());
1959 if (!_pending_rq.queued())
1964 q.enqueue(&_pending_rq);
1970 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1971 Ipi::send(Ipi::Request, current_cpu(), cpu);
1975 { // LOG_MSG_3VAL(this, "adrq", state(), (Mword)current(), (Mword)rq);
1977 bool do_sched = _drq_q.execute_request(rq, Drq_q::No_drop, true);
1978 if (!in_ready_list() && (state() & Thread_ready_mask))
1980 Sched_context::rq.current().ready_enqueue(sched());
1990 PRIVATE inline NOEXPORT
1992 Context::shutdown_drqs()
1994 if (_pending_rq.queued())
1996 auto guard = lock_guard(_pending_rq.queue()->q_lock());
1997 if (_pending_rq.queued())
1998 _pending_rq.queue()->dequeue(&_pending_rq, Queue_item::Ok);
2001 _drq_q.handle_requests(Drq_q::Drop);
2007 Context::cpu(bool running = false) const
2015 * Remote helper for doing remote CPU ready enqueue.
2017 * See remote_ready_enqueue().
2021 Context::handle_remote_ready_enqueue(Drq *, Context *self, void *)
2023 self->state_add_dirty(Thread_ready);
2028 PROTECTED inline NEEDS[Context::handle_remote_ready_enqueue]
2030 Context::remote_ready_enqueue()
2031 { drq(&handle_remote_ready_enqueue, 0); }
2036 * Block and wait for the next grace period.
2038 PUBLIC inline NEEDS["cpu_lock.h", "lock_guard.h"]
2042 auto gurad = lock_guard(cpu_lock);
2043 state_change_dirty(~Thread_ready, Thread_waiting);
2044 Rcu::call(this, &rcu_unblock);
2052 Context::handle_remote_tlb_flush(Drq *, Context *, void *_s)
2054 Mem_space **s = (Mem_space **)_s;
2055 Mem_space::tlb_flush_spaces((bool)s[0], s[1], s[2]);
2063 Context::xcpu_tlb_flush(bool flush_all_spaces, Mem_space *s1, Mem_space *s2)
2065 auto g = lock_guard(cpu_lock);
2066 Mem_space *s[3] = { (Mem_space *)flush_all_spaces, s1, s2 };
2067 Cpu_number ccpu = current_cpu();
2068 for (Cpu_number i = Cpu_number::first(); i < Config::max_num_cpus(); ++i)
2069 if (ccpu != i && _tlb_active.get(i))
2070 current()->global_drq(i, Context::handle_remote_tlb_flush, s);
2073 //----------------------------------------------------------------------------
2074 IMPLEMENTATION [fpu && !ux]:
2078 PUBLIC inline NEEDS ["fpu.h"]
2080 Context::spill_fpu()
2082 // If we own the FPU, we should never be getting an "FPU unavailable" trap
2083 assert_kdb (Fpu::fpu.current().owner() == this);
2084 assert_kdb (state() & Thread_fpu_owner);
2085 assert_kdb (fpu_state());
2087 // Save the FPU state of the previous FPU owner (lazy) if applicable
2088 Fpu::save_state(fpu_state());
2089 state_del_dirty(Thread_fpu_owner);
2094 * When switching away from the FPU owner, disable the FPU to cause
2095 * the next FPU access to trap.
2096 * When switching back to the FPU owner, enable the FPU so we don't
2097 * get an FPU trap on FPU access.
2099 IMPLEMENT inline NEEDS ["fpu.h"]
2101 Context::switch_fpu(Context *t)
2103 Fpu &f = Fpu::fpu.current();
2104 if (f.is_owner(this))
2106 else if (f.is_owner(t) && !(t->state() & Thread_vcpu_fpu_disabled))
2110 //----------------------------------------------------------------------------
2111 IMPLEMENTATION [!fpu]:
2115 Context::spill_fpu()
2120 Context::switch_fpu(Context *)
2123 //----------------------------------------------------------------------------
2126 #include "tb_entry.h"
2128 /** logged context switch. */
2129 class Tb_entry_ctx_sw : public Tb_entry
2132 using Tb_entry::_ip;
2134 Context const *dst; ///< switcher target
2135 Context const *dst_orig;
2138 Space const *from_space;
2139 Sched_context const *from_sched;
2141 unsigned print(int max, char *buf) const;
2142 } __attribute__((packed));
2146 // --------------------------------------------------------------------------
2147 IMPLEMENTATION [debug]:
2149 #include "kobject_dbg.h"
2153 Context::Drq_log::print(int maxlen, char *buf) const
2155 static char const *const _types[] =
2156 { "send", "request", "reply", "done" };
2158 char const *t = "unk";
2159 if ((unsigned)type < sizeof(_types)/sizeof(_types[0]))
2160 t = _types[(unsigned)type];
2162 return snprintf(buf, maxlen, "%s(%s) rq=%p to ctxt=%lx/%p (func=%p, reply=%p) cpu=%u",
2163 t, wait ? "wait" : "no-wait", rq, Kobject_dbg::pointer_to_id(thread),
2164 thread, func, reply, cxx::int_value<Cpu_number>(target_cpu));
2170 Tb_entry_ctx_sw::print(int maxlen, char *buf) const
2176 Mword sctxid = ~0UL;
2180 sctx = from_sched->context();
2181 sctxid = Kobject_dbg::pointer_to_id(sctx);
2183 dst = Kobject_dbg::pointer_to_id(this->dst);
2184 dst_orig = Kobject_dbg::pointer_to_id(this->dst_orig);
2186 snprintf(symstr, sizeof(symstr), L4_PTR_FMT, kernel_ip); // Jdb_symbol::...
2189 maxlen -= snprintf(buf, maxlen, "(%lx)", sctxid);
2191 maxlen -= snprintf(buf, maxlen, " ==> %lx ", dst);
2193 if (dst != dst_orig || lock_cnt)
2194 maxlen -= snprintf(buf, maxlen, "(");
2196 if (dst != dst_orig)
2197 maxlen -= snprintf(buf, maxlen, "want %lx", dst_orig);
2199 if (dst != dst_orig && lock_cnt)
2200 maxlen -= snprintf(buf, maxlen, " ");
2203 maxlen -= snprintf(buf, maxlen, "lck %ld", lock_cnt);
2205 if (dst != dst_orig || lock_cnt)
2206 maxlen -= snprintf(buf, maxlen, ") ");
2208 maxlen -= snprintf(buf, maxlen, " krnl %s", symstr);
2210 return max - maxlen;