5 #include "continuation.h"
6 #include "helping_lock.h"
8 #include "mem_layout.h"
9 #include "member_offs.h"
13 #include "spin_lock.h"
14 #include "thread_lock.h"
23 typedef Context_ptr_base<Thread> Thread_ptr;
26 /** A thread. This class is the driver class for most kernel functionality.
34 FIASCO_DECLARE_KOBJ();
39 friend class Jdb_thread;
40 friend class Jdb_thread_list;
41 friend class Jdb_list_threads;
42 friend class Jdb_list_timeouts;
43 friend class Jdb_tbuf_show;
46 enum Context_mode_kernel { Kernel = 0 };
55 Op_register_del_irq = 5,
56 Op_modify_senders = 6,
59 Op_set_tpidruro_arm = 0x10,
60 Op_set_fs_amd64 = 0x12,
65 Ctl_set_pager = 0x0010000,
66 Ctl_bind_task = 0x0200000,
67 Ctl_alien_thread = 0x0400000,
68 Ctl_ux_native = 0x0800000,
69 Ctl_set_exc_handler = 0x1000000,
75 Exr_trigger_exception = 0x20000,
80 Vcpu_ctl_extendet_vcpu = 0x10000,
87 enum { Stack_size = Config::PAGE_SIZE };
92 static Per_cpu<Dbg_stack> dbg_stack;
95 typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
100 * @param task the task the thread should reside in.
101 * @param id user-visible thread ID of the sender.
102 * @param init_prio initial priority.
103 * @param mcp maximum controlled priority.
105 * @post state() != Thread_invalid.
109 int handle_page_fault (Address pfa, Mword error, Mword pc,
113 struct Migration_helper_info
119 Thread(const Thread&); ///< Default copy constructor is undefined
120 void *operator new(size_t); ///< Default new operator undefined
122 bool handle_sigma0_page_fault (Address pfa);
127 * This function is the default routine run if a newly
128 * initialized context is being switch_exec()'ed.
130 static void user_invoke();
133 static bool pagein_tcb_request(Return_frame *regs);
135 inline Mword user_ip() const;
136 inline void user_ip(Mword);
138 inline Mword user_sp() const;
139 inline void user_sp(Mword);
141 inline Mword user_flags() const;
143 /** nesting level in debugger (always critical) if >1 */
144 static Per_cpu<unsigned long> nested_trap_recover;
145 static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
146 static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
149 explicit Thread(Context_mode_kernel);
151 // Another critical TCB cache line:
152 Thread_lock _thread_lock;
156 Thread_ptr _exc_handler;
160 Irq_base *_del_observer;
164 static const unsigned magic = 0xf001c001;
171 #include <cstdlib> // panic()
174 #include "entry_frame.h"
175 #include "fpu_alloc.h"
177 #include "irq_chip.h"
179 #include "kernel_task.h"
180 #include "kmem_alloc.h"
182 #include "map_util.h"
183 #include "ram_quota.h"
184 #include "sched_context.h"
186 #include "std_macros.h"
188 #include "thread_state.h"
191 FIASCO_DEFINE_KOBJ(Thread);
193 DEFINE_PER_CPU Per_cpu<unsigned long> Thread::nested_trap_recover;
197 Thread::Dbg_stack::Dbg_stack()
199 stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size);
201 stack_top = (char *)stack_top + Stack_size;
202 //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
206 PUBLIC inline NEEDS[Thread::thread_lock]
209 { thread_lock()->lock(); }
214 Thread::operator new(size_t, Ram_quota *q) throw ()
216 void *t = Kmem_alloc::allocator()->q_unaligned_alloc(q, Thread::Size);
219 memset(t, 0, sizeof(Thread));
220 reinterpret_cast<Thread*>(t)->_quota = q;
227 Thread::bind(Task *t, User<Utcb>::Ptr utcb)
229 // _utcb == 0 for all kernel threads
230 Space::Ku_mem const *u = t->find_ku_mem(utcb, sizeof(Utcb));
233 if (EXPECT_FALSE(utcb && !u))
236 auto guard = lock_guard(_space.lock());
237 if (_space.space() != Kernel_task::kernel_task())
245 _utcb.set(utcb, u->kern_addr(utcb));
246 arch_setup_utcb_ptr();
253 PUBLIC inline NEEDS["kdb_ke.h", "kernel_task.h", "cpu_lock.h", "space.h"]
260 auto guard = lock_guard(_space.lock());
262 if (_space.space() == Kernel_task::kernel_task())
265 old = static_cast<Task*>(_space.space());
266 _space.space(Kernel_task::kernel_task());
268 // switch to a safe page table
269 if (Mem_space::current_mem_space(current_cpu()) == old)
270 Kernel_task::kernel_task()->switchin_context(old);
278 current()->rcu_wait();
285 /** Cut-down version of Thread constructor; only for kernel threads
286 Do only what's necessary to get a kernel thread started --
287 skip all fancy stuff, no locking is necessary.
288 @param task the address space
289 @param id user-visible thread ID of the sender
292 Thread::Thread(Context_mode_kernel)
293 : Receiver(), Sender(), _del_observer(0), _magic(magic)
295 *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
298 _space.space(Kernel_task::kernel_task());
300 if (Config::Stack_depth)
301 std::memset((char*)this + sizeof(Thread), '5',
302 Thread::Size-sizeof(Thread)-64);
306 /** Destructor. Reestablish the Context constructor's precondition.
307 @pre current() == thread_lock()->lock_owner()
308 && state() == Thread_dead
310 @post (_kernel_sp == 0) && (* (stack end) == 0) && !exists()
313 Thread::~Thread() // To be called in locked state.
316 unsigned long *init_sp = reinterpret_cast<unsigned long*>
317 (reinterpret_cast<unsigned long>(this) + Size - sizeof(Entry_frame));
322 Fpu_alloc::free_state(fpu_state());
323 _state = Thread_invalid;
327 // IPC-gate deletion stuff ------------------------------------
330 * Fake IRQ Chip class for IPC-gate-delete notifications.
331 * This chip uses the IRQ number as thread pointer and implements
332 * the bind and unbind functionality.
334 class Del_irq_chip : public Irq_chip_soft
337 static Del_irq_chip chip;
340 Del_irq_chip Del_irq_chip::chip;
343 Thread *Del_irq_chip::thread(Mword pin)
344 { return (Thread*)pin; }
347 Mword Del_irq_chip::pin(Thread *t)
352 Del_irq_chip::unbind(Irq_base *irq)
353 { thread(irq->pin())->remove_delete_irq(); }
356 PUBLIC inline NEEDS["irq_chip.h"]
358 Thread::ipc_gate_deleted(Mword id)
361 auto g = lock_guard(cpu_lock);
363 _del_observer->hit(0);
368 Thread::register_delete_irq(Irq_base *irq)
371 Del_irq_chip::chip.bind(irq, (Mword)this);
377 Thread::remove_delete_irq()
382 Irq_base *tmp = _del_observer;
387 // end of: IPC-gate deletion stuff -------------------------------
390 /** Currently executing thread.
391 @return currently executing thread.
396 { return nonull_static_cast<Thread*>(current()); }
400 Thread::exception_triggered() const
401 { return _exc_cont.valid(); }
405 Thread::continuation_test_and_restore()
407 bool v = _exc_cont.valid();
409 _exc_cont.restore(regs());
414 // state requests/manipulation
419 Overwrite Context's version of thread_lock() with a semantically
420 equivalent, but more efficient version.
421 @return lock used to synchronize accesses to the thread.
425 Thread::thread_lock()
426 { return &_thread_lock; }
429 PUBLIC inline NEEDS ["config.h", "timeout.h"]
431 Thread::handle_timer_interrupt()
433 unsigned _cpu = cpu(true);
434 // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
435 if (!Config::Fine_grained_cputime)
436 consume_time(Config::Scheduler_granularity);
438 bool resched = Rcu::do_pending_work(_cpu);
440 // Check if we need to reschedule due to timeouts or wakeups
441 if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
442 && !Sched_context::rq.current().schedule_in_progress)
445 assert (timeslice_timeout.cpu(cpu(true))->is_set()); // Coma check
454 // Cancel must be cleared on all kernel entry paths. See slowtraps for
455 // why we delay doing it until here.
456 state_del(Thread_cancel);
458 // we haven't been re-initialized (cancel was not set) -- so sleep
459 if (state_change_safely(~Thread_ready, Thread_cancel | Thread_dead))
460 while (! (state() & Thread_ready))
466 Thread::halt_current()
470 current_thread()->halt();
471 kdb_ke("Thread not halted");
475 PRIVATE static inline
477 Thread::user_invoke_generic()
479 Context *const c = current();
480 assert_kdb (c->state() & Thread_ready_mask);
485 // release CPU lock explicitly, because
486 // * the context that switched to us holds the CPU lock
487 // * we run on a newly-created stack without a CPU lock guard
493 Thread::leave_and_kill_myself()
495 current_thread()->do_kill();
497 WARN("dead thread scheduled: %lx\n", current_thread()->dbg_id());
499 kdb_ke("DEAD SCHED");
504 Thread::handle_kill_helper(Drq *src, Context *, void *)
506 delete nonull_static_cast<Thread*>(src->context());
507 return Drq::No_answer | Drq::Need_resched;
515 auto guard = lock_guard(thread_lock());
517 if (state() == Thread_invalid)
524 // But first prevent it from being woken up by asynchronous events
527 auto guard = lock_guard(cpu_lock);
529 // if IPC timeout active, reset it
533 // Switch to time-sharing mode
534 set_mode(Sched_mode(0));
536 Sched_context::Ready_queue &rq = Sched_context::rq.current();
538 // Switch to time-sharing scheduling context
539 if (sched() != sched_context())
540 switch_sched(sched_context(), &rq);
542 if (!rq.current_sched() || rq.current_sched()->context() == this)
543 rq.set_current_sched(current()->sched());
546 // if other threads want to send me IPC messages, abort these
549 auto guard = lock_guard(cpu_lock);
550 while (Sender *s = Sender::cast(sender_list()->first()))
552 s->sender_dequeue(sender_list());
554 s->ipc_receiver_aborted();
555 Proc::preemption_point();
559 // if engaged in IPC operation, stop it
560 if (in_sender_list())
562 while (Locked_prio_list *q = wait_queue())
564 auto g = lock_guard(q->lock());
565 if (wait_queue() == q)
579 vcpu_set_user_space(0);
583 state_change_dirty(0, Thread_dead);
585 // dequeue from system queues
586 Sched_context::rq.current().ready_dequeue(sched());
590 _del_observer->unbind();
597 state_del_dirty(Thread_ready_mask);
599 WARN("woken up dead thread %lx\n", dbg_id());
605 state_del_dirty(Thread_ready_mask);
607 Sched_context::rq.current().ready_dequeue(sched());
609 kernel_context_drq(handle_kill_helper, 0);
616 Thread::handle_remote_kill(Drq *, Context *self, void *)
618 Thread *c = nonull_static_cast<Thread*>(self);
619 c->state_add_dirty(Thread_cancel | Thread_ready);
620 c->_exc_cont.restore(c->regs());
621 c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
630 auto guard = lock_guard(cpu_lock);
634 if (cpu() == current_cpu())
636 state_add_dirty(Thread_cancel | Thread_ready);
637 Sched_context::rq.current().deblock(sched());
638 _exc_cont.restore(regs()); // overwrite an already triggered exception
639 do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
640 // current()->switch_exec (this, Helping);
644 drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
652 Thread::set_sched_params(L4_sched_param const *p)
654 Sched_context *sc = sched_context();
655 // FIXME: do not know how to figure this out currently, however this
656 // seems to be just an optimization
658 bool const change = prio != sc->prio()
659 || quantum != sc->quantum();
660 bool const ready_queued = in_ready_list();
662 if (!change && (ready_queued || this == current()))
666 Sched_context::Ready_queue &rq = Sched_context::rq.cpu(cpu());
667 rq.ready_dequeue(sched());
672 if (sc == rq.current_sched())
673 rq.set_current_sched(sc);
675 if (state() & Thread_ready_mask) // maybe we could ommit enqueueing current
676 rq.ready_enqueue(sched());
681 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler)
683 if (pager.is_valid())
686 if (exc_handler.is_valid())
687 _exc_handler = exc_handler;
695 Thread::is_tcb_address(Address a)
697 a &= ~(Thread::Size - 1);
698 return reinterpret_cast<Thread *>(a)->_magic == magic;
703 Thread::assert_irq_entry()
705 assert_kdb(Sched_context::rq.current().schedule_in_progress
706 || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting | Thread_ipc_transfer));
711 // ---------------------------------------------------------------------------
715 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
716 bool *have_recv) const
718 if (flags & L4_obj_ref::Ipc_recv)
720 *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
724 if (flags & L4_obj_ref::Ipc_send)
725 *partner = const_cast<Thread*>(this);
727 // FIXME: shall be removed flags == 0 is no-op
730 *sender = const_cast<Thread*>(this);
731 *partner = const_cast<Thread*>(this);
735 return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
740 Thread::handle_migration_helper(Drq *rq, Context *, void *p)
742 Migration *inf = reinterpret_cast<Migration *>(p);
743 Thread *v = static_cast<Thread*>(context_of(rq));
744 unsigned target_cpu = access_once(&inf->cpu);
745 v->migrate_away(inf, false);
746 v->migrate_to(target_cpu);
747 return Drq::Need_resched | Drq::No_answer;
752 Thread::start_migration()
754 assert_kdb(cpu_lock.test());
755 Migration *m = _migration;
757 assert (!((Mword)m & 0x3)); // ensure alignment
759 if (!m || !mp_cas(&_migration, m, (Migration*)0))
760 return reinterpret_cast<Migration*>(0x2); // bit one == 0 --> no need to reschedule
764 set_sched_params(m->sp);
766 write_now(&m->in_progress, true);
767 return reinterpret_cast<Migration*>(0x1); // bit one == 1 --> need to reschedule
770 return m; // need to do real migration
775 Thread::do_migration()
777 Migration *inf = start_migration();
780 return (Mword)inf & 1; // already migrated, nothing to do
782 spill_fpu_if_owner();
784 if (current() == this)
786 assert_kdb (current_cpu() == cpu());
787 kernel_context_drq(handle_migration_helper, inf);
791 unsigned target_cpu = access_once(&inf->cpu);
792 migrate_away(inf, false);
793 migrate_to(target_cpu);
795 return false; // we already are chosen by the scheduler...
799 Thread::initiate_migration()
801 assert (current() != this);
802 Migration *inf = start_migration();
805 return (Mword)inf & 1;
807 spill_fpu_if_owner();
809 unsigned target_cpu = access_once(&inf->cpu);
810 migrate_away(inf, false);
811 migrate_to(target_cpu);
817 Thread::finish_migration()
818 { enqueue_timeout_again(); }
823 //---------------------------------------------------------------------------
824 IMPLEMENTATION [fpu && !ux]:
827 #include "fpu_alloc.h"
828 #include "fpu_state.h"
832 * Handle FPU trap for this context. Assumes disabled interrupts
834 PUBLIC inline NEEDS ["fpu_alloc.h","fpu_state.h"]
836 Thread::switchin_fpu(bool alloc_new_fpu = true)
838 if (state() & Thread_vcpu_fpu_disabled)
841 Fpu &f = Fpu::fpu.current();
842 // If we own the FPU, we should never be getting an "FPU unavailable" trap
843 assert_kdb (f.owner() != this);
845 // Allocate FPU state slab if we didn't already have one
846 if (!fpu_state()->state_buffer()
847 && (EXPECT_FALSE((!alloc_new_fpu
848 || (state() & Thread_alien))
849 || !Fpu_alloc::alloc_state(_quota, fpu_state()))))
852 // Enable the FPU before accessing it, otherwise recursive trap
855 // Save the FPU state of the previous FPU owner (lazy) if applicable
857 nonull_static_cast<Thread*>(f.owner())->spill_fpu();
859 // Become FPU owner and restore own FPU state
860 f.restore_state(fpu_state());
862 state_add_dirty(Thread_fpu_owner);
867 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
869 Thread::transfer_fpu(Thread *to)
871 if (cpu() != to->cpu())
874 if (to->fpu_state()->state_buffer())
875 Fpu_alloc::free_state(to->fpu_state());
877 to->fpu_state()->state_buffer(fpu_state()->state_buffer());
878 fpu_state()->state_buffer(0);
880 assert (current() == this || current() == to);
882 Fpu &f = Fpu::fpu.current();
884 f.disable(); // it will be reanabled in switch_fpu
886 if (EXPECT_FALSE(f.owner() == to))
888 assert_kdb (to->state() & Thread_fpu_owner);
891 to->state_del_dirty(Thread_fpu_owner);
893 else if (f.owner() == this)
895 assert_kdb (state() & Thread_fpu_owner);
897 state_del_dirty(Thread_fpu_owner);
899 to->state_add_dirty (Thread_fpu_owner);
901 if (EXPECT_FALSE(current() == to))
906 //---------------------------------------------------------------------------
907 IMPLEMENTATION [!fpu]:
911 Thread::switchin_fpu(bool alloc_new_fpu = true)
917 //---------------------------------------------------------------------------
918 IMPLEMENTATION [!fpu || ux]:
922 Thread::transfer_fpu(Thread *)
925 //---------------------------------------------------------------------------
926 IMPLEMENTATION [!log]:
929 unsigned Thread::sys_ipc_log(Syscall_frame *)
933 unsigned Thread::sys_ipc_trace(Syscall_frame *)
937 void Thread::page_fault_log(Address, unsigned, unsigned)
941 int Thread::log_page_fault()
945 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
949 // ----------------------------------------------------------------------------
950 IMPLEMENTATION [!mp]:
955 Thread::migrate_away(Migration *inf, bool /*remote*/)
957 assert_kdb (current() != this);
958 assert_kdb (cpu_lock.test());
960 unsigned cpu = inf->cpu;
961 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
965 auto &rq = Sched_context::rq.current();
967 // if we are in the middle of the scheduler, leave it now
968 if (rq.schedule_in_progress == this)
969 rq.schedule_in_progress = 0;
971 rq.ready_dequeue(sched());
974 // Not sure if this can ever happen
975 Sched_context *csc = rq.current_sched();
976 if (!csc || csc->context() == this)
977 rq.set_current_sched(current()->sched());
980 Sched_context *sc = sched_context();
985 set_cpu_of(this, cpu);
986 inf->in_progress = true;
987 _need_to_finish_migration = true;
992 Thread::migrate_to(unsigned target_cpu)
994 if (!Cpu::online(target_cpu))
1000 auto &rq = Sched_context::rq.current();
1001 if (state() & Thread_ready_mask && !in_ready_list())
1002 rq.ready_enqueue(sched());
1004 enqueue_timeout_again();
1009 Thread::migrate(Migration *info)
1011 assert_kdb (cpu_lock.test());
1013 LOG_TRACE("Thread migration", "mig", this, Migration_log,
1014 l->state = state(false);
1016 l->target_cpu = info->cpu;
1017 l->user_ip = regs()->ip();
1021 current()->schedule_if(do_migration());
1025 //----------------------------------------------------------------------------
1028 #include "tb_entry.h"
1030 EXTENSION class Thread
1033 struct Migration_log : public Tb_entry
1038 unsigned target_cpu;
1040 unsigned print(int, char *) const;
1045 // ----------------------------------------------------------------------------
1046 IMPLEMENTATION [mp]:
1052 Thread::migrate(Migration *info)
1054 assert_kdb (cpu_lock.test());
1056 LOG_TRACE("Thread migration", "mig", this, Migration_log,
1057 l->state = state(false);
1059 l->target_cpu = info->cpu;
1060 l->user_ip = regs()->ip();
1066 while (!mp_cas(&_migration, old, info));
1067 // flag old migration to be done / stale
1069 old->in_progress = true;
1072 unsigned cpu = this->cpu();
1074 if (current_cpu() == cpu || Config::Max_num_cpus == 1)
1075 current()->schedule_if(do_migration());
1080 // FIXME: use monitor & mwait or wfe & sev if available
1081 while (!access_once(&info->in_progress))
1089 Thread::handle_remote_requests_irq()
1091 assert_kdb (cpu_lock.test());
1092 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1093 Context *const c = current();
1094 Ipi::eoi(Ipi::Request, c->cpu());
1095 //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1097 // we might have to migrate the currently running thread, and we cannot do
1098 // this during the processing of the request queue. In this case we get the
1099 // thread in migration_q and do this here.
1100 Context *migration_q = 0;
1101 bool resched = _pending_rqq.current().handle_requests(&migration_q);
1103 resched |= Rcu::do_pending_work(c->cpu());
1106 resched |= static_cast<Thread*>(migration_q)->do_migration();
1108 resched |= c->handle_drq();
1109 if (Sched_context::rq.current().schedule_in_progress)
1111 if (c->state() & Thread_ready_mask)
1112 Sched_context::rq.current().ready_enqueue(c->sched());
1120 Thread::handle_global_remote_requests_irq()
1122 assert_kdb (cpu_lock.test());
1123 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1124 Ipi::eoi(Ipi::Global_request, current_cpu());
1125 Context::handle_global_requests();
1130 Thread::migrate_away(Migration *inf, bool remote)
1132 assert_kdb (check_for_current_cpu());
1133 assert_kdb (current() != this);
1134 assert_kdb (cpu_lock.test());
1139 //printf("[%u] %lx: m %lx %u -> %u\n", current_cpu(), current_thread()->dbg_id(), this->dbg_id(), cpu(), inf->cpu);
1141 Sched_context::Ready_queue &rq = Sched_context::rq.cpu(cpu());
1143 // if we are in the middle of the scheduler, leave it now
1144 if (rq.schedule_in_progress == this)
1145 rq.schedule_in_progress = 0;
1147 rq.ready_dequeue(sched());
1149 // Not sure if this can ever happen
1150 Sched_context *csc = rq.current_sched();
1151 if (!remote && (!csc || csc->context() == this))
1152 rq.set_current_sched(current()->sched());
1155 unsigned target_cpu = inf->cpu;
1158 Queue &q = _pending_rqq.cpu(cpu());
1159 // The queue lock of the current CPU protects the cpu number in
1163 ? lock_guard(q.q_lock())
1164 : Lock_guard<cxx::remove_pointer<decltype(q.q_lock())>::type>();
1166 assert_kdb (q.q_lock()->test());
1167 // potentailly dequeue from our local queue
1168 if (_pending_rq.queued())
1169 check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1171 Sched_context *sc = sched_context();
1178 assert_kdb (!in_ready_list());
1179 assert_kdb (!_pending_rq.queued());
1181 set_cpu_of(this, target_cpu);
1183 write_now(&inf->in_progress, true);
1184 _need_to_finish_migration = true;
1190 Thread::migrate_to(unsigned target_cpu)
1195 Queue &q = _pending_rqq.cpu(target_cpu);
1196 auto g = lock_guard(q.q_lock());
1198 if (access_once(&this->_cpu) == target_cpu
1199 && EXPECT_FALSE(!Cpu::online(target_cpu)))
1205 // migrated meanwhile
1206 if (access_once(&this->_cpu) != target_cpu || _pending_rq.queued())
1209 if (!_pending_rq.queued())
1214 q.enqueue(&_pending_rq);
1217 assert_kdb (_pending_rq.queue() == &q);
1222 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1223 Ipi::send(Ipi::Request, current_cpu(), target_cpu);
1229 Thread::migrate_xcpu(unsigned cpu)
1234 Queue &q = Context::_pending_rqq.cpu(cpu);
1235 auto g = lock_guard(q.q_lock());
1238 if (cpu != access_once(&this->_cpu))
1241 // now we are shure that this thread stays on 'cpu' because
1242 // we have the rqq lock of 'cpu'
1243 if (!Cpu::online(cpu))
1245 Migration *inf = start_migration();
1248 return; // all done, nothing to do
1250 unsigned target_cpu = access_once(&inf->cpu);
1251 migrate_away(inf, true);
1253 migrate_to(target_cpu);
1255 // FIXME: Wie lange dauert es ready dequeue mit WFQ zu machen?
1256 // wird unter spinlock gemacht !!!!
1259 if (!_pending_rq.queued())
1264 q.enqueue(&_pending_rq);
1269 Ipi::send(Ipi::Request, current_cpu(), cpu);
1273 //----------------------------------------------------------------------------
1274 IMPLEMENTATION [debug]:
1278 Thread::Migration_log::print(int maxlen, char *buf) const
1280 return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1281 src_cpu, target_cpu, state, user_ip);