5 #include "continuation.h"
6 #include "helping_lock.h"
7 #include "kobject_iface.h"
8 #include "mem_layout.h"
9 #include "member_offs.h"
13 #include "space.h" // Space_index
14 #include "spin_lock.h"
15 #include "thread_lock.h"
22 typedef Context_ptr_base<Thread> Thread_ptr;
25 /** A thread. This class is the driver class for most kernel functionality.
36 friend class Jdb_thread;
37 friend class Jdb_thread_list;
38 friend class Jdb_list_threads;
39 friend class Jdb_list_timeouts;
40 friend class Jdb_tbuf_show;
43 enum Context_mode_kernel { Kernel = 0 };
52 Op_register_del_irq = 5,
53 Op_modify_senders = 6,
59 Ctl_set_pager = 0x0010000,
60 Ctl_set_scheduler = 0x0020000,
61 Ctl_set_mcp = 0x0040000,
62 Ctl_set_prio = 0x0080000,
63 Ctl_set_quantum = 0x0100000,
64 Ctl_bind_task = 0x0200000,
65 Ctl_alien_thread = 0x0400000,
66 Ctl_ux_native = 0x0800000,
67 Ctl_set_exc_handler = 0x1000000,
68 Ctl_vcpu_enabled = 0x2000000,
74 Exr_trigger_exception = 0x20000,
87 enum { Stack_size = Config::PAGE_SIZE };
92 static Per_cpu<Dbg_stack> dbg_stack;
96 typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
101 * @param task the task the thread should reside in.
102 * @param id user-visible thread ID of the sender.
103 * @param init_prio initial priority.
104 * @param mcp maximum controlled priority.
106 * @post state() != Thread_invalid.
111 int handle_page_fault (Address pfa, Mword error, Mword pc,
116 struct Migration_helper_info
122 Thread(const Thread&); ///< Default copy constructor is undefined
123 void *operator new(size_t); ///< Default new operator undefined
125 bool handle_sigma0_page_fault (Address pfa);
130 * This function is the default routine run if a newly
131 * initialized context is being switch_exec()'ed.
133 static void user_invoke();
136 static bool pagein_tcb_request(Return_frame *regs);
138 inline Mword user_ip() const;
139 inline void user_ip(Mword);
141 inline Mword user_sp() const;
142 inline void user_sp(Mword);
144 inline Mword user_flags() const;
146 /** nesting level in debugger (always critical) if >1 */
147 static Per_cpu<unsigned long> nested_trap_recover;
148 static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
149 static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
151 // implementation details follow...
153 explicit Thread(Context_mode_kernel);
157 // Another critical TCB cache line:
158 Thread_lock _thread_lock;
162 Thread_ptr _exc_handler;
166 Irq_base *_del_observer;
170 static const unsigned magic = 0xf001c001;
177 #include <cstdlib> // panic()
180 #include "entry_frame.h"
181 #include "fpu_alloc.h"
184 #include "kmem_alloc.h"
186 #include "map_util.h"
187 #include "ram_quota.h"
188 #include "sched_context.h"
190 #include "std_macros.h"
192 #include "thread_state.h"
196 Per_cpu<unsigned long> DEFINE_PER_CPU Thread::nested_trap_recover;
200 Thread::Dbg_stack::Dbg_stack()
202 stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size);
204 stack_top = (char *)stack_top + Stack_size;
205 //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
209 PUBLIC inline NEEDS[Thread::thread_lock]
213 thread_lock()->lock();
219 Thread::operator new(size_t, Ram_quota *q) throw ()
221 void *t = Mapped_allocator::allocator()->q_unaligned_alloc(q, Config::thread_block_size);
224 memset(t, 0, sizeof(Thread));
225 reinterpret_cast<Thread*>(t)->_quota = q;
230 /** Class-specific allocator.
231 This allocator ensures that threads are allocated at a fixed virtual
232 address computed from their thread ID.
234 @return address of new thread control block
238 Thread::operator new(size_t, Thread *t) throw ()
240 // Allocate TCB in TCB space. Actually, do not allocate anything,
241 // just return the address. Allocation happens on the fly in
242 // Thread::handle_page_fault().
247 PUBLIC inline NEEDS["space.h"]
249 Thread::bind(Space *t, void *_utcb)
251 // _utcb == 0 for all kernel threads
252 assert_kdb (!_utcb || t->is_utcb_valid(_utcb));
255 Lock_guard<Spin_lock> guard(&_space);
256 if (_space.get_unused())
259 _space.set_unused(t);
263 utcb(t->kernel_utcb(_utcb));
264 local_id(Address(_utcb));
271 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h", "space.h"]
278 Lock_guard<Spin_lock> guard(&_space);
280 if (!_space.get_unused())
283 old = _space.get_unused();
284 _space.set_unused(0);
286 Mem_space *oms = old->mem_space();
291 // switch to a safe page table
292 if (Mem_space::current_mem_space(current_cpu()) == oms)
293 Mem_space::kernel_space()->switchin_context(oms);
298 current()->rcu_wait();
305 /** Cut-down version of Thread constructor; only for kernel threads
306 Do only what's necessary to get a kernel thread started --
307 skip all fancy stuff, no locking is necessary.
308 @param task the address space
309 @param id user-visible thread ID of the sender
312 Thread::Thread(Context_mode_kernel)
313 : Receiver(&_thread_lock), Sender(), _del_observer(0), _magic(magic)
315 *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
319 if (Config::stack_depth)
320 std::memset((char*)this + sizeof(Thread), '5',
321 Config::thread_block_size-sizeof(Thread)-64);
325 /** Destructor. Reestablish the Context constructor's precondition.
326 @pre current() == thread_lock()->lock_owner()
327 && state() == Thread_dead
329 @post (_kernel_sp == 0) && (* (stack end) == 0) && !exists()
332 Thread::~Thread() // To be called in locked state.
335 unsigned long *init_sp = reinterpret_cast<unsigned long*>
336 (reinterpret_cast<unsigned long>(this) + size - sizeof(Entry_frame));
341 Fpu_alloc::free_state(fpu_state());
342 state_change(0, Thread_invalid);
346 // IPC-gate deletion stuff ------------------------------------
350 Thread::ipc_gate_deleted(Mword id)
353 Lock_guard<Cpu_lock> g(&cpu_lock);
355 _del_observer->hit();
358 class Del_irq_pin : public Irq_pin_dummy
363 Del_irq_pin::Del_irq_pin(Thread *o)
364 { payload()[0] = (Address)o; }
368 Del_irq_pin::thread() const
369 { return (Thread*)payload()[0]; }
373 Del_irq_pin::unbind_irq()
375 thread()->remove_delete_irq();
379 Del_irq_pin::~Del_irq_pin()
386 Thread::register_delete_irq(Irq_base *irq)
388 irq->pin()->unbind_irq();
389 irq->pin()->replace<Del_irq_pin>(this);
395 Thread::remove_delete_irq()
400 Irq_base *tmp = _del_observer;
402 tmp->pin()->unbind_irq();
405 // end of: IPC-gate deletion stuff -------------------------------
408 /** Lookup function: Find Thread instance that owns a given Context.
410 @return the thread that owns the context
414 Thread::lookup (Context* c)
416 return reinterpret_cast<Thread*>(c);
421 Thread::lookup (Context const * c)
423 return reinterpret_cast<Thread const *>(c);
426 /** Currently executing thread.
427 @return currently executing thread.
433 return Thread::lookup(current());
438 Thread::exception_triggered() const
439 { return _exc_cont.valid(); }
442 // state requests/manipulation
447 Overwrite Context's version of thread_lock() with a semantically
448 equivalent, but more efficient version.
449 @return lock used to synchronize accesses to the thread.
453 Thread::thread_lock()
455 return &_thread_lock;
459 PUBLIC inline NEEDS ["config.h", "timeout.h"]
461 Thread::handle_timer_interrupt()
463 unsigned _cpu = cpu(true);
464 // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
465 if (!Config::fine_grained_cputime)
466 consume_time(Config::scheduler_granularity);
468 bool resched = Rcu::do_pending_work(_cpu);
470 // Check if we need to reschedule due to timeouts or wakeups
471 if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
472 && !schedule_in_progress())
475 assert (timeslice_timeout.cpu(cpu(true))->is_set()); // Coma check
484 // Cancel must be cleared on all kernel entry paths. See slowtraps for
485 // why we delay doing it until here.
486 state_del (Thread_cancel);
488 // we haven't been re-initialized (cancel was not set) -- so sleep
489 if (state_change_safely (~Thread_ready, Thread_cancel | Thread_dead))
490 while (! (state() & Thread_ready))
496 Thread::halt_current ()
500 current_thread()->halt();
501 kdb_ke("Thread not halted");
505 PRIVATE static inline
507 Thread::user_invoke_generic()
509 Context *const c = current();
510 assert_kdb (c->state() & Thread_ready_mask);
512 if (c->handle_drq() && !c->schedule_in_progress())
515 // release CPU lock explicitly, because
516 // * the context that switched to us holds the CPU lock
517 // * we run on a newly-created stack without a CPU lock guard
523 Thread::leave_and_kill_myself()
525 current_thread()->do_kill();
527 WARN("dead thread scheduled: %lx\n", current_thread()->kobject()->dbg_id());
529 kdb_ke("DEAD SCHED");
534 Thread::handle_kill_helper(Drq *src, Context *, void *)
536 delete nonull_static_cast<Thread*>(src->context());
537 return Drq::No_answer | Drq::Need_resched;
546 Lock_guard<Thread_lock> guard(thread_lock());
548 if (state() == Thread_invalid)
559 // But first prevent it from being woken up by asynchronous events
562 Lock_guard <Cpu_lock> guard (&cpu_lock);
564 // if IPC timeout active, reset it
568 // Switch to time-sharing mode
569 set_mode (Sched_mode (0));
571 // Switch to time-sharing scheduling context
572 if (sched() != sched_context())
573 switch_sched(sched_context());
575 if (current_sched()->context() == this)
576 set_current_sched(current()->sched());
579 // possibly dequeue from a wait queue
582 // if other threads want to send me IPC messages, abort these
585 Lock_guard <Cpu_lock> guard (&cpu_lock);
586 while (Sender *s = Sender::cast(sender_list()->head()))
588 s->ipc_receiver_aborted();
589 Proc::preemption_point();
593 // if engaged in IPC operation, stop it
595 sender_dequeue (receiver()->sender_list());
602 vcpu_set_user_space(0);
606 state_change_dirty (0, Thread_dead);
608 // dequeue from system queues
613 _del_observer->pin()->unbind_irq();
620 state_del_dirty(Thread_ready_mask);
622 WARN("woken up dead thread %lx\n", kobject()->dbg_id());
628 state_del_dirty(Thread_ready_mask);
632 kernel_context_drq(handle_kill_helper, 0);
639 Thread::handle_remote_kill(Drq *, Context *self, void *)
641 Thread *c = nonull_static_cast<Thread*>(self);
642 c->state_add_dirty(Thread_cancel | Thread_ready);
643 c->_exc_cont.restore(c->regs());
644 c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
653 Lock_guard<Cpu_lock> guard(&cpu_lock);
657 if (cpu() == current_cpu())
659 state_add_dirty(Thread_cancel | Thread_ready);
660 sched()->deblock(cpu());
661 _exc_cont.restore(regs()); // overwrite an already triggered exception
662 do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
663 // current()->switch_exec (this, Helping);
667 drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
671 drq(Thread::handle_migration, reinterpret_cast<void*>(current_cpu()));
673 assert_kdb(cpu() == current_cpu());
682 Thread::set_sched_params(unsigned prio, Unsigned64 quantum)
684 Sched_context *sc = sched_context();
685 bool const change = prio != sc->prio()
686 || quantum != sc->quantum();
687 bool const ready_queued = in_ready_list();
689 if (!change && (ready_queued || this == current()))
695 sc->set_quantum(quantum);
698 if (sc == current_sched())
699 set_current_sched(sc);
701 if (state() & Thread_ready_mask)
703 if (this != current())
712 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler,
713 Space *task, void *user_utcb,
714 bool utcb_vcpu_flag = false, bool utcb_vcpu_val = false)
716 bool new_vcpu_state = state() & Thread_vcpu_enabled;
718 new_vcpu_state = utcb_vcpu_val;
722 if (EXPECT_FALSE(!task->is_utcb_valid(user_utcb, 1 + new_vcpu_state)))
723 return -L4_err::EInval;
725 if (EXPECT_FALSE(!bind(task, user_utcb)))
726 return -L4_err::EInval; // unbind first !!
729 vcpu_state(utcb() + 1);
736 if (!space()->is_utcb_valid((void *)local_id(), 2))
737 return -L4_err::EInval;
738 vcpu_state(utcb() + 1);
741 state_add_dirty(Thread_vcpu_enabled);
744 // we're not clearing the vcpu_state pointer, it's not used if vcpu mode
746 state_del_dirty(Thread_vcpu_enabled);
748 if (pager.is_valid())
751 if (exc_handler.is_valid())
752 _exc_handler = exc_handler;
758 /** Clears the utcb pointer of the Thread
759 * Reason: To avoid a stale pointer after unmapping and deallocating
760 * the UTCB. Without this the Thread_lock::clear will access the UTCB
761 * after the unmapping the UTCB -> POOFFF.
765 Thread::unset_utcb_ptr()
772 PRIVATE static inline
773 bool FIASCO_WARN_RESULT
774 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
775 unsigned char rights)
777 assert (cpu_lock.test());
779 Utcb *snd_utcb = snd->access_utcb();
780 Utcb *rcv_utcb = rcv->access_utcb();
781 Mword s = tag.words();
782 Mword r = Utcb::Max_words;
784 Mem::memcpy_mwords (rcv_utcb->values, snd_utcb->values, r < s ? r : s);
788 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
790 if (tag.transfer_fpu() && rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
791 snd->transfer_fpu(rcv);
797 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
798 Thread::copy_ts_to_utcb]
799 bool FIASCO_WARN_RESULT
800 Thread::copy_utcb_to(L4_msg_tag const &tag, Thread* receiver,
801 unsigned char rights)
803 // we cannot copy trap state to trap state!
804 assert_kdb (!this->_utcb_handler || !receiver->_utcb_handler);
805 if (EXPECT_FALSE(this->_utcb_handler != 0))
806 return copy_ts_to_utcb(tag, this, receiver, rights);
807 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
808 return copy_utcb_to_ts(tag, this, receiver, rights);
810 return copy_utcb_to_utcb(tag, this, receiver, rights);
816 Thread::is_tcb_address(Address a)
818 a &= ~(Config::thread_block_size - 1);
819 return reinterpret_cast<Thread *>(a)->_magic == magic;
824 Thread::assert_irq_entry()
826 assert_kdb(current_thread()->schedule_in_progress()
827 || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting));
832 // ---------------------------------------------------------------------------
836 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
837 bool *have_recv) const
839 if (flags & L4_obj_ref::Ipc_recv)
841 *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
845 if (flags & L4_obj_ref::Ipc_send)
846 *partner = const_cast<Thread*>(this);
848 // FIXME: shall be removed flags == 0 is no-op
851 *sender = const_cast<Thread*>(this);
852 *partner = const_cast<Thread*>(this);
856 return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
861 Thread::handle_migration_helper(Drq *, Context *, void *p)
863 Migration_helper_info const *inf = (Migration_helper_info const *)p;
864 return inf->victim->migration_helper(&inf->inf);
870 Thread::do_migration()
872 assert_kdb(cpu_lock.test());
873 assert_kdb(current_cpu() == cpu(true));
875 Migration_helper_info inf;
878 Lock_guard<Spin_lock> g(affinity_lock());
879 inf.inf = _migration_rq.inf;
880 _migration_rq.pending = false;
881 _migration_rq.in_progress = true;
884 unsigned on_cpu = cpu();
886 if (inf.inf.cpu == ~0U)
888 state_add_dirty(Thread_suspended);
889 set_sched_params(0, 0);
890 _migration_rq.in_progress = false;
894 state_del_dirty(Thread_suspended);
896 if (inf.inf.cpu == on_cpu)
899 set_sched_params(inf.inf.prio, inf.inf.quantum);
900 _migration_rq.in_progress = false;
904 // spill FPU state into memory before migration
905 if (state() & Thread_fpu_owner)
907 if (current() != this)
911 Fpu::set_owner(on_cpu, 0);
916 // if we are in the middle of the scheduler, leave it now
917 if (schedule_in_progress() == this)
918 reset_schedule_in_progress();
922 if (current() == this && Config::Max_num_cpus > 1)
923 kernel_context_drq(handle_migration_helper, &inf);
925 migration_helper(&inf.inf);
930 Thread::initiate_migration()
935 Thread::finish_migration()
936 { enqueue_timeout_again(); }
941 Thread::migrate(Migration_info const &info)
943 assert_kdb (cpu_lock.test());
945 LOG_TRACE("Thread migration", "mig", this, __thread_migration_log_fmt,
946 Migration_log *l = tbe->payload<Migration_log>();
949 l->target_cpu = info.cpu;
950 l->user_ip = regs()->ip();
954 Lock_guard<Spin_lock> g(affinity_lock());
955 _migration_rq.inf = info;
956 _migration_rq.pending = true;
959 unsigned cpu = this->cpu();
961 if (current_cpu() == cpu)
971 //---------------------------------------------------------------------------
972 IMPLEMENTATION [fpu && !ux]:
975 #include "fpu_alloc.h"
976 #include "fpu_state.h"
978 PUBLIC inline NEEDS ["fpu.h"]
982 // If we own the FPU, we should never be getting an "FPU unavailable" trap
983 assert_kdb (Fpu::owner(cpu()) == this);
984 assert_kdb (state() & Thread_fpu_owner);
985 assert_kdb (fpu_state());
987 // Save the FPU state of the previous FPU owner (lazy) if applicable
988 Fpu::save_state (fpu_state());
989 state_del_dirty (Thread_fpu_owner);
994 * Handle FPU trap for this context. Assumes disabled interrupts
996 PUBLIC inline NEEDS [Thread::spill_fpu, "fpu_alloc.h","fpu_state.h"]
998 Thread::switchin_fpu(bool alloc_new_fpu = true)
1000 unsigned cpu = this->cpu(true);
1002 if (state() & Thread_vcpu_fpu_disabled)
1005 // If we own the FPU, we should never be getting an "FPU unavailable" trap
1006 assert_kdb (Fpu::owner(cpu) != this);
1008 // Allocate FPU state slab if we didn't already have one
1009 if (!fpu_state()->state_buffer()
1010 && (EXPECT_FALSE((!alloc_new_fpu
1011 || (state() & Thread_alien))
1012 || !Fpu_alloc::alloc_state (_quota, fpu_state()))))
1015 // Enable the FPU before accessing it, otherwise recursive trap
1018 // Save the FPU state of the previous FPU owner (lazy) if applicable
1019 if (Fpu::owner(cpu))
1020 nonull_static_cast<Thread*>(Fpu::owner(cpu))->spill_fpu();
1022 // Become FPU owner and restore own FPU state
1023 Fpu::restore_state (fpu_state());
1025 state_add_dirty (Thread_fpu_owner);
1026 Fpu::set_owner (cpu, this);
1030 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
1032 Thread::transfer_fpu(Thread *to)
1034 unsigned cpu = this->cpu();
1035 if (cpu != to->cpu())
1038 if (to->fpu_state()->state_buffer())
1039 Fpu_alloc::free_state(to->fpu_state());
1041 to->fpu_state()->state_buffer(fpu_state()->state_buffer());
1042 fpu_state()->state_buffer(0);
1044 assert (current() == this || current() == to);
1046 Fpu::disable(); // it will be reanabled in switch_fpu
1048 if (EXPECT_FALSE(Fpu::owner(cpu) == to))
1050 assert_kdb (to->state() & Thread_fpu_owner);
1052 Fpu::set_owner(cpu, 0);
1053 to->state_del_dirty (Thread_fpu_owner);
1055 else if (Fpu::owner(cpu) == this)
1057 assert_kdb (state() & Thread_fpu_owner);
1059 state_del_dirty (Thread_fpu_owner);
1061 to->state_add_dirty (Thread_fpu_owner);
1062 Fpu::set_owner(cpu, to);
1063 if (EXPECT_FALSE(current() == to))
1068 //---------------------------------------------------------------------------
1069 IMPLEMENTATION [!fpu]:
1073 Thread::switchin_fpu(bool alloc_new_fpu = true)
1074 { (void)alloc_new_fpu;
1083 //---------------------------------------------------------------------------
1084 IMPLEMENTATION [!fpu || ux]:
1088 Thread::transfer_fpu(Thread *)
1091 //---------------------------------------------------------------------------
1092 IMPLEMENTATION [!log]:
1095 unsigned Thread::sys_ipc_log(Syscall_frame *)
1099 unsigned Thread::sys_ipc_trace(Syscall_frame *)
1103 void Thread::page_fault_log(Address, unsigned, unsigned)
1106 PUBLIC static inline
1107 int Thread::log_page_fault()
1111 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
1114 //---------------------------------------------------------------------------
1115 IMPLEMENTATION [!io]:
1119 Thread::has_privileged_iopl()
1125 // ----------------------------------------------------------------------------
1126 IMPLEMENTATION [!mp]:
1131 Thread::migration_helper(Migration_info const *inf)
1133 unsigned cpu = inf->cpu;
1134 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
1140 // Not sure if this can ever happen
1141 Sched_context *csc = Context::current_sched();
1142 if (!csc || csc->context() == this)
1143 Context::set_current_sched(current()->sched());
1146 Sched_context *sc = sched_context();
1147 sc->set_prio(inf->prio);
1148 sc->set_quantum(inf->quantum);
1153 state_add_dirty(Thread_drq_ready);
1155 set_cpu_of(this, cpu);
1156 return Drq::No_answer | Drq::Need_resched;
1161 Thread::migrate_xcpu(unsigned cpu)
1168 //----------------------------------------------------------------------------
1171 EXTENSION class Thread
1174 struct Migration_log
1179 unsigned target_cpu;
1181 static unsigned fmt(Tb_entry *, int, char *)
1182 asm ("__thread_migration_log_fmt");
1187 // ----------------------------------------------------------------------------
1188 IMPLEMENTATION [mp]:
1194 Thread::handle_remote_requests_irq()
1195 { assert_kdb (cpu_lock.test());
1196 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1197 Ipi::eoi(Ipi::Request);
1198 Context *const c = current();
1199 //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1200 Context *migration_q = 0;
1201 bool resched = _pending_rqq.cpu(c->cpu()).handle_requests(&migration_q);
1203 resched |= Rcu::do_pending_work(c->cpu());
1206 static_cast<Thread*>(migration_q)->do_migration();
1208 if ((resched || c->handle_drq()) && !c->schedule_in_progress())
1210 //LOG_MSG_3VAL(c, "ipis", 0, 0, 0);
1211 // printf("CPU[%2u]: RQ IPI sched %p\n", current_cpu(), current());
1214 // printf("CPU[%2u]: < RQ IPI (current=%p)\n", current_cpu(), current());
1219 Thread::handle_global_remote_requests_irq()
1220 { assert_kdb (cpu_lock.test());
1221 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1222 Ipi::eoi(Ipi::Global_request);
1223 Context::handle_global_requests();
1228 Thread::migration_helper(Migration_info const *inf)
1230 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), 0);
1231 assert_kdb (cpu() == current_cpu());
1232 assert_kdb (current() != this);
1233 assert_kdb (cpu_lock.test());
1240 // Not sure if this can ever happen
1241 Sched_context *csc = Context::current_sched();
1242 if (!csc || csc->context() == this)
1243 Context::set_current_sched(current()->sched());
1246 unsigned cpu = inf->cpu;
1249 Queue &q = _pending_rqq.cpu(current_cpu());
1250 // The queue lock of the current CPU protects the cpu number in
1252 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1254 // potentailly dequeue from our local queue
1255 if (_pending_rq.queued())
1256 check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1258 Sched_context *sc = sched_context();
1259 sc->set_prio(inf->prio);
1260 sc->set_quantum(inf->quantum);
1265 state_add_dirty(Thread_drq_ready);
1269 assert_kdb (!in_ready_list());
1271 set_cpu_of(this, cpu);
1272 // now we are migrated away fom current_cpu
1278 Queue &q = _pending_rqq.cpu(cpu);
1279 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1281 // migrated meanwhile
1282 if (this->cpu() != cpu || _pending_rq.queued())
1283 return Drq::No_answer | Drq::Need_resched;
1288 q.enqueue(&_pending_rq);
1293 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1294 Ipi::cpu(cpu).send(Ipi::Request);
1297 return Drq::No_answer | Drq::Need_resched;
1302 Thread::migrate_xcpu(unsigned cpu)
1307 Queue &q = Context::_pending_rqq.cpu(cpu);
1308 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1311 if (cpu != this->cpu())
1317 if (!_pending_rq.queued())
1318 q.enqueue(&_pending_rq);
1324 Ipi::cpu(cpu).send(Ipi::Request);
1327 //----------------------------------------------------------------------------
1328 IMPLEMENTATION [debug]:
1332 Thread::Migration_log::fmt(Tb_entry *e, int maxlen, char *buf)
1334 Migration_log *l = e->payload<Migration_log>();
1335 return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1336 l->src_cpu, l->target_cpu, l->state, l->user_ip);