]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread.cpp
update
[l4.git] / kernel / fiasco / src / kern / thread.cpp
1 INTERFACE:
2
3 #include "l4_types.h"
4 #include "config.h"
5 #include "continuation.h"
6 #include "helping_lock.h"
7 #include "kobject.h"
8 #include "mem_layout.h"
9 #include "member_offs.h"
10 #include "receiver.h"
11 #include "ref_obj.h"
12 #include "sender.h"
13 #include "space.h"              // Space_index
14 #include "spin_lock.h"
15 #include "thread_lock.h"
16
17 class Return_frame;
18 class Syscall_frame;
19 class Vcpu_state;
20 class Irq_base;
21
22 typedef Context_ptr_base<Thread> Thread_ptr;
23
24
25 /** A thread.  This class is the driver class for most kernel functionality.
26  */
27 class Thread :
28   public Receiver,
29   public Sender,
30   public Kobject
31 {
32   MEMBER_OFFSET();
33   FIASCO_DECLARE_KOBJ();
34
35   friend class Jdb;
36   friend class Jdb_bt;
37   friend class Jdb_tcb;
38   friend class Jdb_thread;
39   friend class Jdb_thread_list;
40   friend class Jdb_list_threads;
41   friend class Jdb_list_timeouts;
42   friend class Jdb_tbuf_show;
43
44 public:
45   enum Context_mode_kernel { Kernel = 0 };
46   enum Operation
47   {
48     Opcode_mask = 0xffff,
49     Op_control = 0,
50     Op_ex_regs = 1,
51     Op_switch  = 2,
52     Op_stats   = 3,
53     Op_vcpu_resume = 4,
54     Op_register_del_irq = 5,
55     Op_modify_senders = 6,
56     Op_vcpu_control= 7,
57     Op_gdt_x86 = 0x10,
58   };
59
60   enum Control_flags
61   {
62     Ctl_set_pager       = 0x0010000,
63     Ctl_bind_task       = 0x0200000,
64     Ctl_alien_thread    = 0x0400000,
65     Ctl_ux_native       = 0x0800000,
66     Ctl_set_exc_handler = 0x1000000,
67   };
68
69   enum Ex_regs_flags
70   {
71     Exr_cancel            = 0x10000,
72     Exr_trigger_exception = 0x20000,
73   };
74
75   enum Vcpu_ctl_flags
76   {
77     Vcpu_ctl_extendet_vcpu = 0x10000,
78   };
79
80
81   class Dbg_stack
82   {
83   public:
84     enum { Stack_size = Config::PAGE_SIZE };
85     void *stack_top;
86     Dbg_stack();
87   };
88
89   static Per_cpu<Dbg_stack> dbg_stack;
90
91 public:
92   typedef void (Utcb_copy_func)(Thread *sender, Thread *receiver);
93
94   /**
95    * Constructor.
96    *
97    * @param task the task the thread should reside in.
98    * @param id user-visible thread ID of the sender.
99    * @param init_prio initial priority.
100    * @param mcp maximum controlled priority.
101    *
102    * @post state() != Thread_invalid.
103    */
104   Thread();
105
106   int handle_page_fault (Address pfa, Mword error, Mword pc,
107       Return_frame *regs);
108
109 private:
110   struct Migration_helper_info
111   {
112     Migration_info inf;
113     Thread *victim;
114   };
115
116   Thread(const Thread&);        ///< Default copy constructor is undefined
117   void *operator new(size_t);   ///< Default new operator undefined
118
119   bool handle_sigma0_page_fault (Address pfa);
120
121   /**
122    * Return to user.
123    *
124    * This function is the default routine run if a newly
125    * initialized context is being switch_exec()'ed.
126    */
127   static void user_invoke();
128
129 public:
130   static bool pagein_tcb_request(Return_frame *regs);
131
132   inline Mword user_ip() const;
133   inline void user_ip(Mword);
134
135   inline Mword user_sp() const;
136   inline void user_sp(Mword);
137
138   inline Mword user_flags() const;
139
140   /** nesting level in debugger (always critical) if >1 */
141   static Per_cpu<unsigned long> nested_trap_recover;
142   static void handle_remote_requests_irq() asm ("handle_remote_cpu_requests");
143   static void handle_global_remote_requests_irq() asm ("ipi_remote_call");
144
145 protected:
146   explicit Thread(Context_mode_kernel);
147
148   // Another critical TCB cache line:
149   Thread_lock  _thread_lock;
150
151   // More ipc state
152   Thread_ptr _pager;
153   Thread_ptr _exc_handler;
154
155 protected:
156   Ram_quota *_quota;
157   Irq_base *_del_observer;
158
159   // debugging stuff
160   unsigned _magic;
161   static const unsigned magic = 0xf001c001;
162 };
163
164
165 IMPLEMENTATION:
166
167 #include <cassert>
168 #include <cstdlib>              // panic()
169 #include <cstring>
170 #include "atomic.h"
171 #include "entry_frame.h"
172 #include "fpu_alloc.h"
173 #include "globals.h"
174 #include "kdb_ke.h"
175 #include "kmem_alloc.h"
176 #include "logdefs.h"
177 #include "map_util.h"
178 #include "ram_quota.h"
179 #include "sched_context.h"
180 #include "space.h"
181 #include "std_macros.h"
182 #include "task.h"
183 #include "thread_state.h"
184 #include "timeout.h"
185
186 FIASCO_DEFINE_KOBJ(Thread);
187
188 Per_cpu<unsigned long> DEFINE_PER_CPU Thread::nested_trap_recover;
189
190
191 IMPLEMENT
192 Thread::Dbg_stack::Dbg_stack()
193 {
194   stack_top = Kmem_alloc::allocator()->unaligned_alloc(Stack_size); 
195   if (stack_top)
196     stack_top = (char *)stack_top + Stack_size;
197   //printf("JDB STACK start= %p - %p\n", (char *)stack_top - Stack_size, (char *)stack_top);
198 }
199
200
201 PUBLIC inline NEEDS[Thread::thread_lock]
202 void
203 Thread::kill_lock()
204 { thread_lock()->lock(); }
205
206
207 PUBLIC inline
208 void *
209 Thread::operator new(size_t, Ram_quota *q) throw ()
210 {
211   void *t = Mapped_allocator::allocator()->q_unaligned_alloc(q, Config::thread_block_size);
212   if (t)
213     {
214       memset(t, 0, sizeof(Thread));
215       reinterpret_cast<Thread*>(t)->_quota = q;
216     }
217   return t;
218 }
219
220 /** Class-specific allocator.
221     This allocator ensures that threads are allocated at a fixed virtual
222     address computed from their thread ID.
223     @param id thread ID
224     @return address of new thread control block
225  */
226 PRIVATE inline
227 void *
228 Thread::operator new(size_t, Thread *t) throw ()
229 {
230   // Allocate TCB in TCB space.  Actually, do not allocate anything,
231   // just return the address.  Allocation happens on the fly in
232   // Thread::handle_page_fault().
233   return t;
234 }
235
236
237 PUBLIC
238 bool
239 Thread::bind(Task *t, User<Utcb>::Ptr utcb)
240 {
241   // _utcb == 0 for all kernel threads
242   Space::Ku_mem const *u = t->find_ku_mem(utcb, sizeof(Utcb));
243
244   // kernel thread?
245   if (EXPECT_FALSE(utcb && !u))
246     return false;
247
248   Lock_guard<typeof(*_space.lock())> guard(_space.lock());
249   if (_space.space())
250     return false;
251
252   _space.space(t);
253   t->inc_ref();
254
255   if (u)
256     _utcb.set(utcb, u->kern_addr(utcb));
257
258   return true;
259 }
260
261
262 PUBLIC inline NEEDS["kdb_ke.h", "cpu_lock.h", "space.h"]
263 bool
264 Thread::unbind()
265 {
266   Task *old;
267
268     {
269       Lock_guard<typeof(*_space.lock())> guard(_space.lock());
270
271       if (!_space.space())
272         return true;
273
274       old = static_cast<Task*>(_space.space());
275       _space.space(0);
276
277       Mem_space *oms = old->mem_space();
278
279       if (old->dec_ref())
280         old = 0;
281
282       // switch to a safe page table
283       if (Mem_space::current_mem_space(current_cpu()) == oms)
284         Mem_space::kernel_space()->switchin_context(oms);
285     }
286
287   if (old)
288     {
289       current()->rcu_wait();
290       delete old;
291     }
292
293   return true;
294 }
295
296 /** Cut-down version of Thread constructor; only for kernel threads
297     Do only what's necessary to get a kernel thread started --
298     skip all fancy stuff, no locking is necessary.
299     @param task the address space
300     @param id user-visible thread ID of the sender
301  */
302 IMPLEMENT inline
303 Thread::Thread(Context_mode_kernel)
304   : Receiver(), Sender(), _del_observer(0), _magic(magic)
305 {
306   *reinterpret_cast<void(**)()>(--_kernel_sp) = user_invoke;
307
308   inc_ref();
309
310   if (Config::stack_depth)
311     std::memset((char*)this + sizeof(Thread), '5',
312                 Config::thread_block_size-sizeof(Thread)-64);
313 }
314
315
316 /** Destructor.  Reestablish the Context constructor's precondition.
317     @pre current() == thread_lock()->lock_owner()
318          && state() == Thread_dead
319     @pre lock_cnt() == 0
320     @post (_kernel_sp == 0)  &&  (* (stack end) == 0)  &&  !exists()
321  */
322 PUBLIC virtual
323 Thread::~Thread()               // To be called in locked state.
324 {
325
326   unsigned long *init_sp = reinterpret_cast<unsigned long*>
327     (reinterpret_cast<unsigned long>(this) + size - sizeof(Entry_frame));
328
329
330   _kernel_sp = 0;
331   *--init_sp = 0;
332   Fpu_alloc::free_state(fpu_state());
333   _state = Thread_invalid;
334 }
335
336
337 // IPC-gate deletion stuff ------------------------------------
338
339 PUBLIC inline
340 void
341 Thread::ipc_gate_deleted(Mword id)
342 {
343   (void) id;
344   Lock_guard<Cpu_lock> g(&cpu_lock);
345   if (_del_observer)
346     _del_observer->hit();
347 }
348
349 class Del_irq_pin : public Irq_pin_dummy
350 {
351 };
352
353 PUBLIC inline
354 Del_irq_pin::Del_irq_pin(Thread *o)
355 { payload()[0] = (Address)o; }
356
357 PUBLIC inline
358 Thread *
359 Del_irq_pin::thread() const
360 { return (Thread*)payload()[0]; }
361
362 PUBLIC inline
363 void
364 Del_irq_pin::unbind_irq()
365 { thread()->remove_delete_irq(); }
366
367 PUBLIC inline
368 Del_irq_pin::~Del_irq_pin()
369 { unbind_irq(); }
370
371 PUBLIC
372 void
373 Thread::register_delete_irq(Irq_base *irq)
374 {
375   irq->pin()->unbind_irq();
376   irq->pin()->replace<Del_irq_pin>(this);
377   _del_observer = irq;
378 }
379
380 PUBLIC
381 void
382 Thread::remove_delete_irq()
383 {
384   if (!_del_observer)
385     return;
386
387   Irq_base *tmp = _del_observer;
388   _del_observer = 0;
389   tmp->pin()->unbind_irq();
390 }
391
392 // end of: IPC-gate deletion stuff -------------------------------
393
394
395 /** Currently executing thread.
396     @return currently executing thread.
397  */
398 inline
399 Thread*
400 current_thread()
401 { return nonull_static_cast<Thread*>(current()); }
402
403 PUBLIC inline
404 bool
405 Thread::exception_triggered() const
406 { return _exc_cont.valid(); }
407
408 PUBLIC inline
409 bool
410 Thread::continuation_test_and_restore()
411 {
412   bool v = _exc_cont.valid();
413   if (v)
414     _exc_cont.restore(regs());
415   return v;
416 }
417
418 //
419 // state requests/manipulation
420 //
421
422
423 /** Thread lock.
424     Overwrite Context's version of thread_lock() with a semantically
425     equivalent, but more efficient version.
426     @return lock used to synchronize accesses to the thread.
427  */
428 PUBLIC inline
429 Thread_lock *
430 Thread::thread_lock()
431 { return &_thread_lock; }
432
433
434 PUBLIC inline NEEDS ["config.h", "timeout.h"]
435 void
436 Thread::handle_timer_interrupt()
437 {
438   unsigned _cpu = cpu(true);
439   // XXX: This assumes periodic timers (i.e. bogus in one-shot mode)
440   if (!Config::fine_grained_cputime)
441     consume_time(Config::scheduler_granularity);
442
443   bool resched = Rcu::do_pending_work(_cpu);
444
445   // Check if we need to reschedule due to timeouts or wakeups
446   if ((Timeout_q::timeout_queue.cpu(_cpu).do_timeouts() || resched)
447       && !schedule_in_progress())
448     {
449       schedule();
450       assert (timeslice_timeout.cpu(cpu(true))->is_set());      // Coma check
451     }
452 }
453
454
455 PUBLIC
456 void
457 Thread::halt()
458 {
459   // Cancel must be cleared on all kernel entry paths. See slowtraps for
460   // why we delay doing it until here.
461   state_del(Thread_cancel);
462
463   // we haven't been re-initialized (cancel was not set) -- so sleep
464   if (state_change_safely(~Thread_ready, Thread_cancel | Thread_dead))
465     while (! (state() & Thread_ready))
466       schedule();
467 }
468
469 PUBLIC static
470 void
471 Thread::halt_current ()
472 {
473   for (;;)
474     {
475       current_thread()->halt();
476       kdb_ke("Thread not halted");
477     }
478 }
479
480 PRIVATE static inline
481 void
482 Thread::user_invoke_generic()
483 {
484   Context *const c = current();
485   assert_kdb (c->state() & Thread_ready_mask);
486
487   if (c->handle_drq() && !c->schedule_in_progress())
488     c->schedule();
489
490   // release CPU lock explicitly, because
491   // * the context that switched to us holds the CPU lock
492   // * we run on a newly-created stack without a CPU lock guard
493   cpu_lock.clear();
494 }
495
496
497 PRIVATE static void
498 Thread::leave_and_kill_myself()
499 {
500   current_thread()->do_kill();
501 #ifdef CONFIG_JDB
502   WARN("dead thread scheduled: %lx\n", current_thread()->dbg_id());
503 #endif
504   kdb_ke("DEAD SCHED");
505 }
506
507 PUBLIC static
508 unsigned
509 Thread::handle_kill_helper(Drq *src, Context *, void *)
510 {
511   delete nonull_static_cast<Thread*>(src->context());
512   return Drq::No_answer | Drq::Need_resched;
513 }
514
515
516 PRIVATE
517 bool
518 Thread::do_kill()
519 {
520   Lock_guard<Thread_lock> guard(thread_lock());
521
522   if (state() == Thread_invalid)
523     return false;
524
525   //
526   // Kill this thread
527   //
528
529   // But first prevent it from being woken up by asynchronous events
530
531   {
532     Lock_guard <Cpu_lock> guard(&cpu_lock);
533
534     // if IPC timeout active, reset it
535     if (_timeout)
536       _timeout->reset();
537
538     // Switch to time-sharing mode
539     set_mode(Sched_mode(0));
540
541     // Switch to time-sharing scheduling context
542     if (sched() != sched_context())
543       switch_sched(sched_context());
544
545     if (!current_sched() || current_sched()->context() == this)
546       set_current_sched(current()->sched());
547   }
548
549   // possibly dequeue from a wait queue
550   wait_queue_kill();
551
552   // if other threads want to send me IPC messages, abort these
553   // operations
554   {
555     Lock_guard <Cpu_lock> guard(&cpu_lock);
556     while (Sender *s = Sender::cast(sender_list()->head()))
557       {
558         s->ipc_receiver_aborted();
559         Proc::preemption_point();
560       }
561   }
562
563   // if engaged in IPC operation, stop it
564   if (receiver())
565     sender_dequeue(receiver()->sender_list());
566
567   Context::do_kill();
568
569   vcpu_update_state();
570
571   unbind();
572   vcpu_set_user_space(0);
573
574   cpu_lock.lock();
575
576   state_change_dirty(0, Thread_dead);
577
578   // dequeue from system queues
579   ready_dequeue();
580
581   if (_del_observer)
582     {
583       _del_observer->pin()->unbind_irq();
584       _del_observer = 0;
585     }
586
587   if (dec_ref())
588     while (1)
589       {
590         state_del_dirty(Thread_ready_mask);
591         schedule();
592         WARN("woken up dead thread %lx\n", dbg_id());
593         kdb_ke("X");
594       }
595
596   rcu_wait();
597
598   state_del_dirty(Thread_ready_mask);
599
600   ready_dequeue();
601
602   kernel_context_drq(handle_kill_helper, 0);
603   kdb_ke("Im dead");
604   return true;
605 }
606
607 PRIVATE static
608 unsigned
609 Thread::handle_remote_kill(Drq *, Context *self, void *)
610 {
611   Thread *c = nonull_static_cast<Thread*>(self);
612   c->state_add_dirty(Thread_cancel | Thread_ready);
613   c->_exc_cont.restore(c->regs());
614   c->do_trigger_exception(c->regs(), (void*)&Thread::leave_and_kill_myself);
615   return 0;
616 }
617
618
619 PROTECTED
620 bool
621 Thread::kill()
622 {
623   Lock_guard<Cpu_lock> guard(&cpu_lock);
624   inc_ref();
625
626
627   if (cpu() == current_cpu())
628     {
629       state_add_dirty(Thread_cancel | Thread_ready);
630       sched()->deblock(cpu());
631       _exc_cont.restore(regs()); // overwrite an already triggered exception
632       do_trigger_exception(regs(), (void*)&Thread::leave_and_kill_myself);
633 //          current()->switch_exec (this, Helping);
634       return true;
635     }
636
637   drq(Thread::handle_remote_kill, 0, 0, Drq::Any_ctxt);
638
639   return true;
640 }
641
642
643 PUBLIC
644 void
645 Thread::set_sched_params(unsigned prio, Unsigned64 quantum)
646 {
647   Sched_context *sc = sched_context();
648   bool const change = prio != sc->prio()
649                    || quantum != sc->quantum();
650   bool const ready_queued = in_ready_list();
651
652   if (!change && (ready_queued || this == current()))
653     return;
654
655   ready_dequeue();
656
657   sc->set_prio(prio);
658   sc->set_quantum(quantum);
659   sc->replenish();
660
661   if (sc == current_sched())
662     set_current_sched(sc);
663
664   if (state() & Thread_ready_mask)
665     {
666       if (this != current())
667         ready_enqueue();
668       else
669         schedule();
670     }
671 }
672
673 PUBLIC
674 long
675 Thread::control(Thread_ptr const &pager, Thread_ptr const &exc_handler)
676 {
677   if (pager.is_valid())
678     _pager = pager;
679
680   if (exc_handler.is_valid())
681     _exc_handler = exc_handler;
682
683   return 0;
684 }
685
686 PUBLIC static inline
687 bool
688 Thread::is_tcb_address(Address a)
689 {
690   a &= ~(Config::thread_block_size - 1);
691   return reinterpret_cast<Thread *>(a)->_magic == magic;
692 }
693
694 PUBLIC static inline
695 void
696 Thread::assert_irq_entry()
697 {
698   assert_kdb(current_thread()->schedule_in_progress()
699              || current_thread()->state() & (Thread_ready_mask | Thread_drq_wait | Thread_waiting | Thread_ipc_transfer));
700 }
701
702
703
704 // ---------------------------------------------------------------------------
705
706 PUBLIC inline
707 bool
708 Thread::check_sys_ipc(unsigned flags, Thread **partner, Thread **sender,
709                       bool *have_recv) const
710 {
711   if (flags & L4_obj_ref::Ipc_recv)
712     {
713       *sender = flags & L4_obj_ref::Ipc_open_wait ? 0 : const_cast<Thread*>(this);
714       *have_recv = true;
715     }
716
717   if (flags & L4_obj_ref::Ipc_send)
718     *partner = const_cast<Thread*>(this);
719
720   // FIXME: shall be removed flags == 0 is no-op
721   if (!flags)
722     {
723       *sender = const_cast<Thread*>(this);
724       *partner = const_cast<Thread*>(this);
725       *have_recv = true;
726     }
727
728   return *have_recv || ((flags & L4_obj_ref::Ipc_send) && *partner);
729 }
730
731 PUBLIC static
732 unsigned
733 Thread::handle_migration_helper(Drq *, Context *, void *p)
734 {
735   Migration_helper_info const *inf = (Migration_helper_info const *)p;
736   return inf->victim->migration_helper(&inf->inf);
737 }
738
739
740 PRIVATE
741 void
742 Thread::do_migration()
743 {
744   assert_kdb(cpu_lock.test());
745   assert_kdb(current_cpu() == cpu(true));
746
747   Migration_helper_info inf;
748
749     {
750       Lock_guard<typeof(_migration_rq.affinity_lock)>
751         g(&_migration_rq.affinity_lock);
752       inf.inf = _migration_rq.inf;
753       _migration_rq.pending = false;
754       _migration_rq.in_progress = true;
755     }
756
757   unsigned on_cpu = cpu();
758
759   if (inf.inf.cpu == ~0U)
760     {
761       state_add_dirty(Thread_suspended);
762       set_sched_params(0, 0);
763       _migration_rq.in_progress = false;
764       return;
765     }
766
767   state_del_dirty(Thread_suspended);
768
769   if (inf.inf.cpu == on_cpu)
770     {
771       // stay here
772       set_sched_params(inf.inf.prio, inf.inf.quantum);
773       _migration_rq.in_progress = false;
774       return;
775     }
776
777   // spill FPU state into memory before migration
778   if (state() & Thread_fpu_owner)
779     {
780       if (current() != this)
781         Fpu::enable();
782
783       spill_fpu();
784       Fpu::set_owner(on_cpu, 0);
785       Fpu::disable();
786     }
787
788
789   // if we are in the middle of the scheduler, leave it now
790   if (schedule_in_progress() == this)
791     reset_schedule_in_progress();
792
793   inf.victim = this;
794
795   if (current() == this && Config::Max_num_cpus > 1)
796     kernel_context_drq(handle_migration_helper, &inf);
797   else
798     migration_helper(&inf.inf);
799 }
800
801 PUBLIC
802 void
803 Thread::initiate_migration()
804 { do_migration(); }
805
806 PUBLIC
807 void
808 Thread::finish_migration()
809 { enqueue_timeout_again(); }
810
811
812 PUBLIC
813 void
814 Thread::migrate(Migration_info const &info)
815 {
816   assert_kdb (cpu_lock.test());
817
818   LOG_TRACE("Thread migration", "mig", this, __thread_migration_log_fmt,
819       Migration_log *l = tbe->payload<Migration_log>();
820       l->state = state();
821       l->src_cpu = cpu();
822       l->target_cpu = info.cpu;
823       l->user_ip = regs()->ip();
824   );
825
826     {
827       Lock_guard<typeof(_migration_rq.affinity_lock)>
828         g(&_migration_rq.affinity_lock);
829       _migration_rq.inf = info;
830       _migration_rq.pending = true;
831     }
832
833   unsigned cpu = this->cpu();
834
835   if (current_cpu() == cpu)
836     {
837       do_migration();
838       return;
839     }
840
841   migrate_xcpu(cpu);
842 }
843
844
845 //---------------------------------------------------------------------------
846 IMPLEMENTATION [fpu && !ux]:
847
848 #include "fpu.h"
849 #include "fpu_alloc.h"
850 #include "fpu_state.h"
851
852 PUBLIC inline NEEDS ["fpu.h"]
853 void
854 Thread::spill_fpu()
855 {
856   // If we own the FPU, we should never be getting an "FPU unavailable" trap
857   assert_kdb (Fpu::owner(cpu()) == this);
858   assert_kdb (state() & Thread_fpu_owner);
859   assert_kdb (fpu_state());
860
861   // Save the FPU state of the previous FPU owner (lazy) if applicable
862   Fpu::save_state (fpu_state());
863   state_del_dirty (Thread_fpu_owner);
864 }
865
866
867 /*
868  * Handle FPU trap for this context. Assumes disabled interrupts
869  */
870 PUBLIC inline NEEDS [Thread::spill_fpu, "fpu_alloc.h","fpu_state.h"]
871 int
872 Thread::switchin_fpu(bool alloc_new_fpu = true)
873 {
874   unsigned cpu = this->cpu(true);
875
876   if (state() & Thread_vcpu_fpu_disabled)
877     return 0;
878
879   // If we own the FPU, we should never be getting an "FPU unavailable" trap
880   assert_kdb (Fpu::owner(cpu) != this);
881
882   // Allocate FPU state slab if we didn't already have one
883   if (!fpu_state()->state_buffer()
884       && (EXPECT_FALSE((!alloc_new_fpu
885                         || (state() & Thread_alien))
886                        || !Fpu_alloc::alloc_state(_quota, fpu_state()))))
887     return 0;
888
889   // Enable the FPU before accessing it, otherwise recursive trap
890   Fpu::enable();
891
892   // Save the FPU state of the previous FPU owner (lazy) if applicable
893   if (Fpu::owner(cpu))
894     nonull_static_cast<Thread*>(Fpu::owner(cpu))->spill_fpu();
895
896   // Become FPU owner and restore own FPU state
897   Fpu::restore_state(fpu_state());
898
899   state_add_dirty(Thread_fpu_owner);
900   Fpu::set_owner(cpu, this);
901   return 1;
902 }
903
904 PUBLIC inline NEEDS["fpu.h", "fpu_alloc.h"]
905 void
906 Thread::transfer_fpu(Thread *to)
907 {
908   unsigned cpu = this->cpu();
909   if (cpu != to->cpu())
910     return;
911
912   if (to->fpu_state()->state_buffer())
913     Fpu_alloc::free_state(to->fpu_state());
914
915   to->fpu_state()->state_buffer(fpu_state()->state_buffer());
916   fpu_state()->state_buffer(0);
917
918   assert (current() == this || current() == to);
919
920   Fpu::disable(); // it will be reanabled in switch_fpu
921
922   if (EXPECT_FALSE(Fpu::owner(cpu) == to))
923     {
924       assert_kdb (to->state() & Thread_fpu_owner);
925
926       Fpu::set_owner(cpu, 0);
927       to->state_del_dirty (Thread_fpu_owner);
928     }
929   else if (Fpu::owner(cpu) == this)
930     {
931       assert_kdb (state() & Thread_fpu_owner);
932
933       state_del_dirty (Thread_fpu_owner);
934
935       to->state_add_dirty (Thread_fpu_owner);
936       Fpu::set_owner(cpu, to);
937       if (EXPECT_FALSE(current() == to))
938         Fpu::enable();
939     }
940 }
941
942 //---------------------------------------------------------------------------
943 IMPLEMENTATION [!fpu]:
944
945 PUBLIC inline
946 int
947 Thread::switchin_fpu(bool alloc_new_fpu = true)
948 {
949   (void)alloc_new_fpu;
950   return 0;
951 }
952
953 PUBLIC inline
954 void
955 Thread::spill_fpu()
956 {}
957
958 //---------------------------------------------------------------------------
959 IMPLEMENTATION [!fpu || ux]:
960
961 PUBLIC inline
962 void
963 Thread::transfer_fpu(Thread *)
964 {}
965
966 //---------------------------------------------------------------------------
967 IMPLEMENTATION [!log]:
968
969 PUBLIC inline
970 unsigned Thread::sys_ipc_log(Syscall_frame *)
971 { return 0; }
972
973 PUBLIC inline
974 unsigned Thread::sys_ipc_trace(Syscall_frame *)
975 { return 0; }
976
977 static inline
978 void Thread::page_fault_log(Address, unsigned, unsigned)
979 {}
980
981 PUBLIC static inline
982 int Thread::log_page_fault()
983 { return 0; }
984
985 PUBLIC inline
986 unsigned Thread::sys_fpage_unmap_log(Syscall_frame *)
987 { return 0; }
988
989
990 // ----------------------------------------------------------------------------
991 IMPLEMENTATION [!mp]:
992
993
994 PRIVATE inline
995 unsigned
996 Thread::migration_helper(Migration_info const *inf)
997 {
998   unsigned cpu = inf->cpu;
999   //  LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), Context::current_sched());
1000   if (_timeout)
1001     _timeout->reset();
1002   ready_dequeue();
1003
1004     {
1005       // Not sure if this can ever happen
1006       Sched_context *csc = Context::current_sched();
1007       if (!csc || csc->context() == this)
1008         Context::set_current_sched(current()->sched());
1009     }
1010
1011   Sched_context *sc = sched_context();
1012   sc->set_prio(inf->prio);
1013   sc->set_quantum(inf->quantum);
1014   sc->replenish();
1015   set_sched(sc);
1016
1017   if (drq_pending())
1018     state_add_dirty(Thread_drq_ready);
1019
1020   set_cpu_of(this, cpu);
1021   return  Drq::No_answer | Drq::Need_resched;
1022 }
1023
1024 PRIVATE inline
1025 void
1026 Thread::migrate_xcpu(unsigned cpu)
1027 {
1028   (void)cpu;
1029   assert_kdb (false);
1030 }
1031
1032
1033 //----------------------------------------------------------------------------
1034 INTERFACE [debug]:
1035
1036 EXTENSION class Thread
1037 {
1038 protected:
1039   struct Migration_log
1040   {
1041     Mword    state;
1042     Address  user_ip;
1043     unsigned src_cpu;
1044     unsigned target_cpu;
1045
1046     static unsigned fmt(Tb_entry *, int, char *)
1047     asm ("__thread_migration_log_fmt");
1048   };
1049 };
1050
1051
1052 // ----------------------------------------------------------------------------
1053 IMPLEMENTATION [mp]:
1054
1055 #include "ipi.h"
1056
1057 IMPLEMENT
1058 void
1059 Thread::handle_remote_requests_irq()
1060 {
1061   assert_kdb (cpu_lock.test());
1062   // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1063   Ipi::eoi(Ipi::Request);
1064   Context *const c = current();
1065   //LOG_MSG_3VAL(c, "ipi", c->cpu(), (Mword)c, c->drq_pending());
1066   Context *migration_q = 0;
1067   bool resched = _pending_rqq.cpu(c->cpu()).handle_requests(&migration_q);
1068
1069   resched |= Rcu::do_pending_work(c->cpu());
1070
1071   if (migration_q)
1072     static_cast<Thread*>(migration_q)->do_migration();
1073
1074   if ((resched || c->handle_drq()) && !c->schedule_in_progress())
1075     {
1076       //LOG_MSG_3VAL(c, "ipis", 0, 0, 0);
1077       // printf("CPU[%2u]: RQ IPI sched %p\n", current_cpu(), current());
1078       c->schedule();
1079     }
1080   // printf("CPU[%2u]: < RQ IPI (current=%p)\n", current_cpu(), current());
1081 }
1082
1083 IMPLEMENT
1084 void
1085 Thread::handle_global_remote_requests_irq()
1086 {
1087   assert_kdb (cpu_lock.test());
1088   // printf("CPU[%2u]: > RQ IPI (current=%p)\n", current_cpu(), current());
1089   Ipi::eoi(Ipi::Global_request);
1090   Context::handle_global_requests();
1091 }
1092
1093 PRIVATE inline
1094 unsigned
1095 Thread::migration_helper(Migration_info const *inf)
1096 {
1097   // LOG_MSG_3VAL(this, "MGi ", Mword(current()), (current_cpu() << 16) | cpu(), 0);
1098   assert_kdb (cpu() == current_cpu());
1099   assert_kdb (current() != this);
1100   assert_kdb (cpu_lock.test());
1101
1102   if (_timeout)
1103     _timeout->reset();
1104   ready_dequeue();
1105
1106     {
1107       // Not sure if this can ever happen
1108       Sched_context *csc = Context::current_sched();
1109       if (!csc || csc->context() == this)
1110         Context::set_current_sched(current()->sched());
1111     }
1112
1113   unsigned cpu = inf->cpu;
1114
1115     {
1116       Queue &q = _pending_rqq.cpu(current_cpu());
1117       // The queue lock of the current CPU protects the cpu number in
1118       // the thread
1119       Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1120
1121       // potentailly dequeue from our local queue
1122       if (_pending_rq.queued())
1123         check_kdb (q.dequeue(&_pending_rq, Queue_item::Ok));
1124
1125       Sched_context *sc = sched_context();
1126       sc->set_prio(inf->prio);
1127       sc->set_quantum(inf->quantum);
1128       sc->replenish();
1129       set_sched(sc);
1130
1131       if (drq_pending())
1132         state_add_dirty(Thread_drq_ready);
1133
1134       Mem::mp_wmb();
1135
1136       assert_kdb (!in_ready_list());
1137
1138       set_cpu_of(this, cpu);
1139       // now we are migrated away fom current_cpu
1140     }
1141
1142   bool ipi = true;
1143
1144     {
1145       Queue &q = _pending_rqq.cpu(cpu);
1146       Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1147
1148       // migrated meanwhile
1149       if (this->cpu() != cpu || _pending_rq.queued())
1150         return  Drq::No_answer | Drq::Need_resched;
1151
1152       if (q.first())
1153         ipi = false;
1154
1155       q.enqueue(&_pending_rq);
1156     }
1157
1158   if (ipi)
1159     {
1160       //LOG_MSG_3VAL(this, "sipi", current_cpu(), cpu(), (Mword)current());
1161       Ipi::cpu(cpu).send(Ipi::Request);
1162     }
1163
1164   return  Drq::No_answer | Drq::Need_resched;
1165 }
1166
1167 PRIVATE inline
1168 void
1169 Thread::migrate_xcpu(unsigned cpu)
1170 {
1171   bool ipi = true;
1172
1173     {
1174       Queue &q = Context::_pending_rqq.cpu(cpu);
1175       Lock_guard<Pending_rqq::Inner_lock> g(q.q_lock());
1176
1177       // already migrated
1178       if (cpu != this->cpu())
1179         return;
1180
1181       if (q.first())
1182         ipi = false;
1183
1184       if (!_pending_rq.queued())
1185         q.enqueue(&_pending_rq);
1186       else
1187         ipi = false;
1188     }
1189
1190   if (ipi)
1191     Ipi::cpu(cpu).send(Ipi::Request);
1192 }
1193
1194 //----------------------------------------------------------------------------
1195 IMPLEMENTATION [debug]:
1196
1197 IMPLEMENT
1198 unsigned
1199 Thread::Migration_log::fmt(Tb_entry *e, int maxlen, char *buf)
1200 {
1201   Migration_log *l = e->payload<Migration_log>();
1202   return snprintf(buf, maxlen, "migrate from %u to %u (state=%lx user ip=%lx)",
1203       l->src_cpu, l->target_cpu, l->state, l->user_ip);
1204 }
1205