5 #include "continuation.h"
6 #include "helping_lock.h"
8 #include "mem_layout.h"
9 #include "member_offs.h"
13 #include "space.h" // Space_index
14 #include "spin_lock.h"
15 #include "thread_lock.h"
22 typedef Context_ptr_base<Thread> Thread_ptr;
25 /** A thread. This class is the driver class for most kernel functionality.
33 FIASCO_DECLARE_KOBJ();
38 friend class Jdb_thread;
39 friend class Jdb_thread_list;
40 friend class Jdb_list_threads;
41 friend class Jdb_list_timeouts;
42 friend class Jdb_tbuf_show;
45 enum Context_mode_kernel { Kernel = 0 };
54 Op_register_del_irq = 5,
55 Op_modify_senders = 6,
62 Ctl_set_pager = 0x0010000,
63 Ctl_set_scheduler = 0x0020000,
64 Ctl_set_mcp = 0x0040000,
65 Ctl_set_prio = 0x0080000,
66 Ctl_set_quantum = 0x0100000,
67 Ctl_bind_task = 0x0200000,
68 Ctl_alien_thread = 0x0400000,
69 Ctl_ux_native = 0x0800000,
70 Ctl_set_exc_handler = 0x1000000,
76 Exr_trigger_exception = 0x20000,
81 Vcpu_ctl_extendet_vcpu = 0x10000,
88 enum { Stack_size = Config::PAGE_SIZE };
93 static Per_cpu<Dbg_stack> dbg_stack;
96 typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
101 * @param task the task the thread should reside in.
102 * @param id user-visible thread ID of the sender.
103 * @param init_prio initial priority.
104 * @param mcp maximum controlled priority.
106 * @post state() != Thread_invalid.
110 int handle_page_fault (Address pfa, Mword error, Mword pc,
114 struct Migration_helper_info
120 Thread(const Thread&); ///< Default copy constructor is undefined
121 void *operator new(size_t); ///< Default new operator undefined
123 bool handle_sigma0_page_fault (Address pfa);
128 * This function is the default routine run if a newly
129 * initialized context is being switch_exec()'ed.
131 static void user_invoke();
134 static bool pagein_tcb_request(Return_frame *regs);
136 inline Mword user_ip() const;
137 inline void user_ip(Mword);
139 inline Mword user_sp() const;
140 inline void user_sp(Mword);
142 inline Mword user_flags() const;
144 /** nesting level in debugger (always critical) if >1 */
145 static Per_cpu<unsigned long> nested_trap_recover;
146 static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
147 static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
150 explicit Thread(Context_mode_kernel);
152 // Another critical TCB cache line:
153 Thread_lock _thread_lock;
157 Thread_ptr _exc_handler;
161 Irq_base *_del_observer;
165 static const unsigned magic = 0xf001c001;
172 #include <cstdlib> // panic()
175 #include "entry_frame.h"
176 #include "fpu_alloc.h"
179 #include "kmem_alloc.h"
181 #include "map_util.h"
182 #include "ram_quota.h"
183 #include "sched_context.h"
185 #include "std_macros.h"
187 #include "thread_state.h"
190 FIASCO_DEFINE_KOBJ(Thread);
192 Per_cpu<unsigned long> DEFINE_PER_CPU Thread::nested_trap_recover;
196 Thread::Dbg_stack::Dbg_stack()
198 stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size);
200 stack_top = (char *)stack_top + Stack_size;
201 //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
205 PUBLIC inline NEEDS[Thread::thread_lock]
208 { thread_lock()->lock(); }
213 Thread::operator new(size_t, Ram_quota *q) throw ()
215 void *t = Mapped_allocator::allocator()->q_unaligned_alloc(q, Config::thread_block_size);
218 memset(t, 0, sizeof(Thread));
219 reinterpret_cast<Thread*>(t)->_quota = q;
224 /** Class-specific allocator.
225 This allocator ensures that threads are allocated at a fixed virtual
226 address computed from their thread ID.
228 @return address of new thread control block
232 Thread::operator new(size_t, Thread *t) throw ()
234 // Allocate TCB in TCB space. Actually, do not allocate anything,
235 // just return the address. Allocation happens on the fly in
236 // Thread::handle_page_fault().
243 Thread::bind(Task *t, User<Utcb>::Ptr utcb)
245 // _utcb == 0 for all kernel threads
246 Space::Ku_mem const *u = t->find_ku_mem(utcb, sizeof(Utcb));
249 if (EXPECT_FALSE(utcb && !u))
252 Lock_guard<typeof(*_space.lock())> guard(_space.lock());
260 _utcb.set(utcb, u->kern_addr(utcb));
266 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h", "space.h"]
273 Lock_guard<typeof(*_space.lock())> guard(_space.lock());
278 old = static_cast<Task*>(_space.space());
281 Mem_space *oms = old->mem_space();
286 // switch to a safe page table
287 if (Mem_space::current_mem_space(current_cpu()) == oms)
288 Mem_space::kernel_space()->switchin_context(oms);
293 current()->rcu_wait();
300 /** Cut-down version of Thread constructor; only for kernel threads
301 Do only what's necessary to get a kernel thread started --
302 skip all fancy stuff, no locking is necessary.
303 @param task the address space
304 @param id user-visible thread ID of the sender
307 Thread::Thread(Context_mode_kernel)
308 : Receiver(), Sender(), _del_observer(0), _magic(magic)
310 *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
314 if (Config::stack_depth)
315 std::memset((char*)this + sizeof(Thread), '5',
316 Config::thread_block_size-sizeof(Thread)-64);
320 /** Destructor. Reestablish the Context constructor's precondition.
321 @pre current() == thread_lock()->lock_owner()
322 && state() == Thread_dead
324 @post (_kernel_sp == 0) && (* (stack end) == 0) && !exists()
327 Thread::~Thread() // To be called in locked state.
330 unsigned long *init_sp = reinterpret_cast<unsigned long*>
331 (reinterpret_cast<unsigned long>(this) + size - sizeof(Entry_frame));
336 Fpu_alloc::free_state(fpu_state());
337 _state = Thread_invalid;
341 // IPC-gate deletion stuff ------------------------------------
345 Thread::ipc_gate_deleted(Mword id)
348 Lock_guard<Cpu_lock> g(&cpu_lock);
350 _del_observer->hit();
353 class Del_irq_pin : public Irq_pin_dummy
358 Del_irq_pin::Del_irq_pin(Thread *o)
359 { payload()[0] = (Address)o; }
363 Del_irq_pin::thread() const
364 { return (Thread*)payload()[0]; }
368 Del_irq_pin::unbind_irq()
369 { thread()->remove_delete_irq(); }
372 Del_irq_pin::~Del_irq_pin()
377 Thread::register_delete_irq(Irq_base *irq)
379 irq->pin()->unbind_irq();
380 irq->pin()->replace<Del_irq_pin>(this);
386 Thread::remove_delete_irq()
391 Irq_base *tmp = _del_observer;
393 tmp->pin()->unbind_irq();
396 // end of: IPC-gate deletion stuff -------------------------------
399 /** Currently executing thread.
400 @return currently executing thread.
405 { return nonull_static_cast<Thread*>(current()); }
409 Thread::exception_triggered() const
410 { return _exc_cont.valid(); }
413 // state requests/manipulation
418 Overwrite Context's version of thread_lock() with a semantically
419 equivalent, but more efficient version.
420 @return lock used to synchronize accesses to the thread.
424 Thread::thread_lock()
425 { return &_thread_lock; }
428 PUBLIC inline NEEDS ["config.h", "timeout.h"]
430 Thread::handle_timer_interrupt()
432 unsigned _cpu = cpu(true);
433 // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
434 if (!Config::fine_grained_cputime)
435 consume_time(Config::scheduler_granularity);
437 bool resched = Rcu::do_pending_work(_cpu);
439 // Check if we need to reschedule due to timeouts or wakeups
440 if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
441 && !schedule_in_progress())
444 assert (timeslice_timeout.cpu(cpu(true))->is_set()); // Coma check
453 // Cancel must be cleared on all kernel entry paths. See slowtraps for
454 // why we delay doing it until here.
455 state_del(Thread_cancel);
457 // we haven't been re-initialized (cancel was not set) -- so sleep
458 if (state_change_safely(~Thread_ready, Thread_cancel | Thread_dead))
459 while (! (state() & Thread_ready))
465 Thread::halt_current ()
469 current_thread()->halt();
470 kdb_ke("Thread not halted");
474 PRIVATE static inline
476 Thread::user_invoke_generic()
478 Context *const c = current();
479 assert_kdb (c->state() & Thread_ready_mask);
481 if (c->handle_drq() && !c->schedule_in_progress())
484 // release CPU lock explicitly, because
485 // * the context that switched to us holds the CPU lock
486 // * we run on a newly-created stack without a CPU lock guard
492 Thread::leave_and_kill_myself()
494 current_thread()->do_kill();
496 WARN("dead thread scheduled: %lx\n", current_thread()->dbg_id());
498 kdb_ke("DEAD SCHED");
503 Thread::handle_kill_helper(Drq *src, Context *, void *)
505 delete nonull_static_cast<Thread*>(src->context());
506 return Drq::No_answer | Drq::Need_resched;
514 Lock_guard<Thread_lock> guard(thread_lock());
516 if (state() == Thread_invalid)
523 // But first prevent it from being woken up by asynchronous events
526 Lock_guard <Cpu_lock> guard(&cpu_lock);
528 // if IPC timeout active, reset it
532 // Switch to time-sharing mode
533 set_mode(Sched_mode(0));
535 // Switch to time-sharing scheduling context
536 if (sched() != sched_context())
537 switch_sched(sched_context());
539 if (!current_sched() || current_sched()->context() == this)
540 set_current_sched(current()->sched());
543 // possibly dequeue from a wait queue
546 // if other threads want to send me IPC messages, abort these
549 Lock_guard <Cpu_lock> guard(&cpu_lock);
550 while (Sender *s = Sender::cast(sender_list()->head()))
552 s->ipc_receiver_aborted();
553 Proc::preemption_point();
557 // if engaged in IPC operation, stop it
559 sender_dequeue(receiver()->sender_list());
566 vcpu_set_user_space(0);
570 state_change_dirty(0, Thread_dead);
572 // dequeue from system queues
577 _del_observer->pin()->unbind_irq();
584 state_del_dirty(Thread_ready_mask);
586 WARN("woken up dead thread %lx\n", dbg_id());
592 state_del_dirty(Thread_ready_mask);
596 kernel_context_drq(handle_kill_helper, 0);
603 Thread::handle_remote_kill(Drq *, Context *self, void *)
605 Thread *c = nonull_static_cast<Thread*>(self);
606 c->state_add_dirty(Thread_cancel | Thread_ready);
607 c->_exc_cont.restore(c->regs());
608 c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
617 Lock_guard<Cpu_lock> guard(&cpu_lock);
621 if (cpu() == current_cpu())
623 state_add_dirty(Thread_cancel | Thread_ready);
624 sched()->deblock(cpu());
625 _exc_cont.restore(regs()); // overwrite an already triggered exception
626 do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
627 // current()->switch_exec (this, Helping);
631 drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
639 Thread::set_sched_params(unsigned prio, Unsigned64 quantum)
641 Sched_context *sc = sched_context();
642 bool const change = prio != sc->prio()
643 || quantum != sc->quantum();
644 bool const ready_queued = in_ready_list();
646 if (!change && (ready_queued || this == current()))
652 sc->set_quantum(quantum);
655 if (sc == current_sched())
656 set_current_sched(sc);
658 if (state() & Thread_ready_mask)
660 if (this != current())
669 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler)
671 if (pager.is_valid())
674 if (exc_handler.is_valid())
675 _exc_handler = exc_handler;
681 PRIVATE static inline
682 bool FIASCO_WARN_RESULT
683 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
684 unsigned char rights)
686 assert (cpu_lock.test());
688 Utcb *snd_utcb = snd->utcb().access();
689 Utcb *rcv_utcb = rcv->utcb().access();
690 Mword s = tag.words();
691 Mword r = Utcb::Max_words;
693 Mem::memcpy_mwords (rcv_utcb->values, snd_utcb->values, r < s ? r : s);
697 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
699 if (tag.transfer_fpu() && rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
700 snd->transfer_fpu(rcv);
706 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
707 Thread::copy_ts_to_utcb]
708 bool FIASCO_WARN_RESULT
709 Thread::copy_utcb_to(L4_msg_tag const &tag, Thread* receiver,
710 unsigned char rights)
712 // we cannot copy trap state to trap state!
713 assert_kdb (!this->_utcb_handler || !receiver->_utcb_handler);
714 if (EXPECT_FALSE(this->_utcb_handler != 0))
715 return copy_ts_to_utcb(tag, this, receiver, rights);
716 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
717 return copy_utcb_to_ts(tag, this, receiver, rights);
719 return copy_utcb_to_utcb(tag, this, receiver, rights);
725 Thread::is_tcb_address(Address a)
727 a &= ~(Config::thread_block_size - 1);
728 return reinterpret_cast<Thread *>(a)->_magic == magic;
733 Thread::assert_irq_entry()
735 assert_kdb(current_thread()->schedule_in_progress()
736 || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting));
741 // ---------------------------------------------------------------------------
745 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
746 bool *have_recv) const
748 if (flags & L4_obj_ref::Ipc_recv)
750 *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
754 if (flags & L4_obj_ref::Ipc_send)
755 *partner = const_cast<Thread*>(this);
757 // FIXME: shall be removed flags == 0 is no-op
760 *sender = const_cast<Thread*>(this);
761 *partner = const_cast<Thread*>(this);
765 return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
770 Thread::handle_migration_helper(Drq *, Context *, void *p)
772 Migration_helper_info const *inf = (Migration_helper_info const *)p;
773 return inf->victim->migration_helper(&inf->inf);
779 Thread::do_migration()
781 assert_kdb(cpu_lock.test());
782 assert_kdb(current_cpu() == cpu(true));
784 Migration_helper_info inf;
787 Lock_guard<typeof(_migration_rq.affinity_lock)>
788 g(&_migration_rq.affinity_lock);
789 inf.inf = _migration_rq.inf;
790 _migration_rq.pending = false;
791 _migration_rq.in_progress = true;
794 unsigned on_cpu = cpu();
796 if (inf.inf.cpu == ~0U)
798 state_add_dirty(Thread_suspended);
799 set_sched_params(0, 0);
800 _migration_rq.in_progress = false;
804 state_del_dirty(Thread_suspended);
806 if (inf.inf.cpu == on_cpu)
809 set_sched_params(inf.inf.prio, inf.inf.quantum);
810 _migration_rq.in_progress = false;
814 // spill FPU state into memory before migration
815 if (state() & Thread_fpu_owner)
817 if (current() != this)
821 Fpu::set_owner(on_cpu, 0);
826 // if we are in the middle of the scheduler, leave it now
827 if (schedule_in_progress() == this)
828 reset_schedule_in_progress();
832 if (current() == this && Config::Max_num_cpus > 1)
833 kernel_context_drq(handle_migration_helper, &inf);
835 migration_helper(&inf.inf);
840 Thread::initiate_migration()
845 Thread::finish_migration()
846 { enqueue_timeout_again(); }
851 Thread::migrate(Migration_info const &info)
853 assert_kdb (cpu_lock.test());
855 LOG_TRACE("Thread migration", "mig", this, __thread_migration_log_fmt,
856 Migration_log *l = tbe->payload<Migration_log>();
859 l->target_cpu = info.cpu;
860 l->user_ip = regs()->ip();
864 Lock_guard<typeof(_migration_rq.affinity_lock)>
865 g(&_migration_rq.affinity_lock);
866 _migration_rq.inf = info;
867 _migration_rq.pending = true;
870 unsigned cpu = this->cpu();
872 if (current_cpu() == cpu)
882 //---------------------------------------------------------------------------
883 IMPLEMENTATION [fpu && !ux]:
886 #include "fpu_alloc.h"
887 #include "fpu_state.h"
889 PUBLIC inline NEEDS ["fpu.h"]
893 // If we own the FPU, we should never be getting an "FPU unavailable" trap
894 assert_kdb (Fpu::owner(cpu()) == this);
895 assert_kdb (state() & Thread_fpu_owner);
896 assert_kdb (fpu_state());
898 // Save the FPU state of the previous FPU owner (lazy) if applicable
899 Fpu::save_state (fpu_state());
900 state_del_dirty (Thread_fpu_owner);
905 * Handle FPU trap for this context. Assumes disabled interrupts
907 PUBLIC inline NEEDS [Thread::spill_fpu, "fpu_alloc.h","fpu_state.h"]
909 Thread::switchin_fpu(bool alloc_new_fpu = true)
911 unsigned cpu = this->cpu(true);
913 if (state() & Thread_vcpu_fpu_disabled)
916 // If we own the FPU, we should never be getting an "FPU unavailable" trap
917 assert_kdb (Fpu::owner(cpu) != this);
919 // Allocate FPU state slab if we didn't already have one
920 if (!fpu_state()->state_buffer()
921 && (EXPECT_FALSE((!alloc_new_fpu
922 || (state() & Thread_alien))
923 || !Fpu_alloc::alloc_state(_quota, fpu_state()))))
926 // Enable the FPU before accessing it, otherwise recursive trap
929 // Save the FPU state of the previous FPU owner (lazy) if applicable
931 nonull_static_cast<Thread*>(Fpu::owner(cpu))->spill_fpu();
933 // Become FPU owner and restore own FPU state
934 Fpu::restore_state(fpu_state());
936 state_add_dirty(Thread_fpu_owner);
937 Fpu::set_owner(cpu, this);
941 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
943 Thread::transfer_fpu(Thread *to)
945 unsigned cpu = this->cpu();
946 if (cpu != to->cpu())
949 if (to->fpu_state()->state_buffer())
950 Fpu_alloc::free_state(to->fpu_state());
952 to->fpu_state()->state_buffer(fpu_state()->state_buffer());
953 fpu_state()->state_buffer(0);
955 assert (current() == this || current() == to);
957 Fpu::disable(); // it will be reanabled in switch_fpu
959 if (EXPECT_FALSE(Fpu::owner(cpu) == to))
961 assert_kdb (to->state() & Thread_fpu_owner);
963 Fpu::set_owner(cpu, 0);
964 to->state_del_dirty (Thread_fpu_owner);
966 else if (Fpu::owner(cpu) == this)
968 assert_kdb (state() & Thread_fpu_owner);
970 state_del_dirty (Thread_fpu_owner);
972 to->state_add_dirty (Thread_fpu_owner);
973 Fpu::set_owner(cpu, to);
974 if (EXPECT_FALSE(current() == to))
979 //---------------------------------------------------------------------------
980 IMPLEMENTATION [!fpu]:
984 Thread::switchin_fpu(bool alloc_new_fpu = true)
995 //---------------------------------------------------------------------------
996 IMPLEMENTATION [!fpu || ux]:
1000 Thread::transfer_fpu(Thread *)
1003 //---------------------------------------------------------------------------
1004 IMPLEMENTATION [!log]:
1007 unsigned Thread::sys_ipc_log(Syscall_frame *)
1011 unsigned Thread::sys_ipc_trace(Syscall_frame *)
1015 void Thread::page_fault_log(Address, unsigned, unsigned)
1018 PUBLIC static inline
1019 int Thread::log_page_fault()
1023 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
1027 // ----------------------------------------------------------------------------
1028 IMPLEMENTATION [!mp]:
1033 Thread::migration_helper(Migration_info const *inf)
1035 unsigned cpu = inf->cpu;
1036 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
1042 // Not sure if this can ever happen
1043 Sched_context *csc = Context::current_sched();
1044 if (!csc || csc->context() == this)
1045 Context::set_current_sched(current()->sched());
1048 Sched_context *sc = sched_context();
1049 sc->set_prio(inf->prio);
1050 sc->set_quantum(inf->quantum);
1055 state_add_dirty(Thread_drq_ready);
1057 set_cpu_of(this, cpu);
1058 return Drq::No_answer | Drq::Need_resched;
1063 Thread::migrate_xcpu(unsigned cpu)
1070 //----------------------------------------------------------------------------
1073 EXTENSION class Thread
1076 struct Migration_log
1081 unsigned target_cpu;
1083 static unsigned fmt(Tb_entry *, int, char *)
1084 asm ("__thread_migration_log_fmt");
1089 // ----------------------------------------------------------------------------
1090 IMPLEMENTATION [mp]:
1096 Thread::handle_remote_requests_irq()
1098 assert_kdb (cpu_lock.test());
1099 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1100 Ipi::eoi(Ipi::Request);
1101 Context *const c = current();
1102 //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1103 Context *migration_q = 0;
1104 bool resched = _pending_rqq.cpu(c->cpu()).handle_requests(&migration_q);
1106 resched |= Rcu::do_pending_work(c->cpu());
1109 static_cast<Thread*>(migration_q)->do_migration();
1111 if ((resched || c->handle_drq()) && !c->schedule_in_progress())
1113 //LOG_MSG_3VAL(c, "ipis", 0, 0, 0);
1114 // printf("CPU[%2u]: RQ IPI sched %p\n", current_cpu(), current());
1117 // printf("CPU[%2u]: < RQ IPI (current=%p)\n", current_cpu(), current());
1122 Thread::handle_global_remote_requests_irq()
1124 assert_kdb (cpu_lock.test());
1125 // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1126 Ipi::eoi(Ipi::Global_request);
1127 Context::handle_global_requests();
1132 Thread::migration_helper(Migration_info const *inf)
1134 // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), 0);
1135 assert_kdb (cpu() == current_cpu());
1136 assert_kdb (current() != this);
1137 assert_kdb (cpu_lock.test());
1144 // Not sure if this can ever happen
1145 Sched_context *csc = Context::current_sched();
1146 if (!csc || csc->context() == this)
1147 Context::set_current_sched(current()->sched());
1150 unsigned cpu = inf->cpu;
1153 Queue &q = _pending_rqq.cpu(current_cpu());
1154 // The queue lock of the current CPU protects the cpu number in
1156 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1158 // potentailly dequeue from our local queue
1159 if (_pending_rq.queued())
1160 check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1162 Sched_context *sc = sched_context();
1163 sc->set_prio(inf->prio);
1164 sc->set_quantum(inf->quantum);
1169 state_add_dirty(Thread_drq_ready);
1173 assert_kdb (!in_ready_list());
1175 set_cpu_of(this, cpu);
1176 // now we are migrated away fom current_cpu
1182 Queue &q = _pending_rqq.cpu(cpu);
1183 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1185 // migrated meanwhile
1186 if (this->cpu() != cpu || _pending_rq.queued())
1187 return Drq::No_answer | Drq::Need_resched;
1192 q.enqueue(&_pending_rq);
1197 //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1198 Ipi::cpu(cpu).send(Ipi::Request);
1201 return Drq::No_answer | Drq::Need_resched;
1206 Thread::migrate_xcpu(unsigned cpu)
1211 Queue &q = Context::_pending_rqq.cpu(cpu);
1212 Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1215 if (cpu != this->cpu())
1221 if (!_pending_rq.queued())
1222 q.enqueue(&_pending_rq);
1228 Ipi::cpu(cpu).send(Ipi::Request);
1231 //----------------------------------------------------------------------------
1232 IMPLEMENTATION [debug]:
1236 Thread::Migration_log::fmt(Tb_entry *e, int maxlen, char *buf)
1238 Migration_log *l = e->payload<Migration_log>();
1239 return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1240 l->src_cpu, l->target_cpu, l->state, l->user_ip);