]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread-ipc.cpp
update
[l4.git] / kernel / fiasco / src / kern / thread-ipc.cpp
1 INTERFACE:
2
3 #include "l4_buf_iter.h"
4 #include "l4_error.h"
5
6 class Syscall_frame;
7
8 EXTENSION class Thread
9 {
10 protected:
11   struct Log_pf_invalid
12   {
13     Mword pfa;
14     Mword cap_idx;
15     Mword err;
16   };
17
18   struct Log_exc_invalid
19   {
20     Mword cap_idx;
21   };
22
23   enum Check_sender_result
24   {
25     Ok = 0,
26     Queued = 2,
27     Receive_in_progress = 4,
28     Failed = 1,
29   };
30
31   Syscall_frame *_snd_regs;
32 };
33
34 class Buf_utcb_saver
35 {
36 public:
37   Buf_utcb_saver(Utcb const *u);
38   void restore(Utcb *u);
39 private:
40   L4_buf_desc buf_desc;
41   Mword buf[2];
42 };
43
44 /**
45  * Save critical contents of UTCB during nested IPC.
46  */
47 class Pf_msg_utcb_saver : public Buf_utcb_saver
48 {
49 public:
50   Pf_msg_utcb_saver(Utcb const *u);
51   void restore(Utcb *u);
52 private:
53   Mword msg[2];
54 };
55
56 // ------------------------------------------------------------------------
57 INTERFACE [debug]:
58
59 #include "tb_entry.h"
60
61 EXTENSION class Thread
62 {
63 protected:
64   static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
65   static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
66 };
67
68 // ------------------------------------------------------------------------
69 IMPLEMENTATION:
70
71 // IPC setup, and handling of ``short IPC'' and page-fault IPC
72
73 // IDEAS for enhancing this implementation: 
74
75 // Volkmar has suggested a possible optimization for
76 // short-flexpage-to-long-message-buffer transfers: Currently, we have
77 // to resort to long IPC in that case because the message buffer might
78 // contain a receive-flexpage option.  An easy optimization would be
79 // to cache the receive-flexpage option in the TCB for that case.
80 // This would save us the long-IPC setup because we wouldn't have to
81 // touch the receiver's user memory in that case.  Volkmar argues that
82 // cases like that are quite common -- for example, imagine a pager
83 // which at the same time is also a server for ``normal'' requests.
84
85 // The handling of cancel and timeout conditions could be improved as
86 // follows: Cancel and Timeout should not reset the ipc_in_progress
87 // flag.  Instead, they should just set and/or reset a flag of their
88 // own that is checked every time an (IPC) system call wants to go to
89 // sleep.  That would mean that IPCs that do not block are not
90 // cancelled or aborted.
91 //-
92
93 #include <cstdlib>              // panic()
94
95 #include "l4_types.h"
96 #include "l4_msg_item.h"
97
98 #include "config.h"
99 #include "cpu_lock.h"
100 #include "ipc_timeout.h"
101 #include "lock_guard.h"
102 #include "logdefs.h"
103 #include "map_util.h"
104 #include "processor.h"
105 #include "timer.h"
106 #include "kdb_ke.h"
107 #include "warn.h"
108
109 PUBLIC
110 virtual void
111 Thread::ipc_receiver_aborted()
112 {
113   assert_kdb (receiver());
114
115   sender_dequeue(receiver()->sender_list());
116   receiver()->vcpu_update_state();
117   set_receiver(0);
118
119   if (!(state() & Thread_ipc_in_progress))
120     return;
121
122   state_add_dirty(Thread_ready);
123   sched()->deblock(cpu());
124 }
125
126 /** Receiver-ready callback.  
127     Receivers make sure to call this function on waiting senders when
128     they get ready to receive a message from that sender. Senders need
129     to overwrite this interface.
130
131     Class Thread's implementation wakes up the sender if it is still in
132     sender-wait state.
133  */
134 PUBLIC virtual
135 bool
136 Thread::ipc_receiver_ready(Receiver *recv)
137 {
138   if (cpu() == current_cpu())
139     return ipc_local_receiver_ready(recv);
140   else
141     return ipc_remote_receiver_ready(recv);
142 }
143
144 PUBLIC virtual
145 void
146 Thread::modify_label(Mword const *todo, int cnt)
147 {
148   assert_kdb (_snd_regs);
149   Mword l = _snd_regs->from_spec();
150   for (int i = 0; i < cnt*4; i += 4)
151     {
152       Mword const test_mask = todo[i];
153       Mword const test      = todo[i+1];
154       if ((l & test_mask) == test)
155         {
156           Mword const del_mask = todo[i+2];
157           Mword const add_mask = todo[i+3];
158
159           l = (l & ~del_mask) | add_mask;
160           _snd_regs->from(l);
161           return;
162         }
163     }
164 }
165
166 PRIVATE inline
167 bool
168 Thread::ipc_local_receiver_ready(Receiver *recv)
169 {
170   assert_kdb (receiver());
171   assert_kdb (receiver() == recv);
172   assert_kdb (receiver() == current());
173
174   if (!(state() & Thread_ipc_in_progress))
175     return false;
176
177   if (!recv->sender_ok(this))
178     return false;
179
180   recv->ipc_init(this);
181
182   state_add_dirty(Thread_ready | Thread_transfer_in_progress);
183
184   sched()->deblock(cpu());
185   sender_dequeue(recv->sender_list());
186   recv->vcpu_update_state();
187
188   // put receiver into sleep
189   receiver()->state_del_dirty(Thread_ready);
190
191   return true;
192 }
193
194 PRIVATE inline
195 void
196 Thread::snd_regs(Syscall_frame *r)
197 { _snd_regs = r; }
198
199
200 /** Page fault handler.
201     This handler suspends any ongoing IPC, then sets up page-fault IPC.
202     Finally, the ongoing IPC's state (if any) is restored.
203     @param pfa page-fault virtual address
204     @param error_code page-fault error code.
205  */
206 PRIVATE
207 bool
208 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
209                                 Address pfa, Mword error_code,
210                                 L4_msg_tag::Protocol protocol)
211 {
212 #ifndef NDEBUG
213   // do not handle user space page faults from kernel mode if we're
214   // already handling a request
215   if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
216                    && thread_lock()->test() == Thread_lock::Locked))
217     {
218       kdb_ke("Fiasco BUG: page fault, under lock");
219       panic("page fault in locked operation");
220     }
221 #endif
222
223   if (EXPECT_FALSE((state() & Thread_alien)
224                    && !(state() & Thread_ipc_in_progress)))
225     return false;
226
227   Lock_guard<Cpu_lock> guard(&cpu_lock);
228
229   unsigned char rights;
230   Kobject_iface *pager = _pager.ptr(space(), &rights);
231
232   if (!pager)
233     {
234       WARN ("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
235             ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
236             current_cpu(), dbg_info()->dbg_id(), pfa, error_code,
237             _pager.raw(), regs()->ip());
238
239
240       LOG_TRACE("Page fault invalid pager", "pf", this,
241                 __fmt_page_fault_invalid_pager,
242                 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
243                 l->cap_idx = _pager.raw();
244                 l->err     = error_code;
245                 l->pfa     = pfa);
246
247       pager = this; // block on ourselves
248     }
249
250   // set up a register block used as an IPC parameter block for the
251   // page fault IPC
252   Syscall_frame r;
253   Utcb *utcb = access_utcb();
254
255   // save the UTCB fields affected by PF IPC
256   Pf_msg_utcb_saver saved_utcb_fields(utcb);
257
258
259   utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
260   utcb->buffers[0] = L4_msg_item::map(0).raw();
261   utcb->buffers[1] = L4_fpage::all_spaces().raw();
262
263   utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
264   utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
265
266   L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
267   
268   // This might be a page fault in midst of a long-message IPC operation.
269   // Save the current IPC state and restore it later.
270   Sender *orig_partner;
271   Syscall_frame *orig_rcv_regs;
272   save_receiver_state (&orig_partner, &orig_rcv_regs);
273
274   Receiver *orig_snd_partner = receiver();
275   Timeout *orig_timeout = _timeout;
276   if (orig_timeout)
277     orig_timeout->reset();
278
279   unsigned orig_ipc_state = state() & Thread_ipc_mask;
280
281   state_del(orig_ipc_state);
282   if (orig_ipc_state)
283     timeout = utcb->xfer;       // in long IPC -- use pagefault timeout
284
285   L4_msg_tag tag(2, 0, 0, protocol);
286
287   r.timeout(timeout);
288   r.tag(tag);
289   r.from(0);
290   r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
291   pager->invoke(r.ref(), rights, &r, utcb);
292
293
294   bool success = true;
295
296   if (EXPECT_FALSE(r.tag().has_error()))
297     {
298       if (Config::conservative)
299         {
300           printf(" page fault %s error = 0x%lx\n",
301                  utcb->error.snd_phase() ? "send" : "rcv",
302                  utcb->error.raw());
303           kdb_ke("ipc to pager failed");
304         }
305
306       if (utcb->error.snd_phase()
307           && (utcb->error.error() == L4_error::Not_existent)
308           && PF::is_usermode_error(error_code)
309           && !(state() & Thread_cancel))
310         {
311           success = false;
312         }
313     }
314   else // no error
315     {
316       // If the pager rejects the mapping, it replies -1 in msg.w0
317       if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
318         success = false;
319     }
320
321   // restore previous IPC state
322
323   saved_utcb_fields.restore(utcb);
324
325   set_receiver(orig_snd_partner);
326   restore_receiver_state(orig_partner, orig_rcv_regs);
327   state_add(orig_ipc_state);
328
329   if (orig_timeout)
330     orig_timeout->set_again(cpu());
331
332   return success;
333 }
334
335 PRIVATE inline
336 Mword
337 Thread::check_sender(Thread *sender, bool timeout)
338 {
339   if (EXPECT_FALSE(is_invalid()))
340     {
341       sender->access_utcb()->error = L4_error::Not_existent;
342       return Failed;
343     }
344
345   if (EXPECT_FALSE(!sender_ok(sender)))
346     {
347       if (!timeout)
348         {
349           sender->access_utcb()->error = L4_error::Timeout;
350           return Failed;
351         }
352
353       sender->set_receiver(this);
354       sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
355       vcpu_set_irq_pending();
356       return Queued;
357     }
358
359   return Ok;
360 }
361
362
363 PRIVATE inline NEEDS["timer.h"]
364 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
365 {
366   if (EXPECT_FALSE
367      ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
368       != (Thread_receiving | Thread_ipc_in_progress)))
369     return;
370
371   IPC_timeout timeout;
372
373   if (EXPECT_FALSE(t.is_finite() && !_timeout))
374     {
375
376       state_del_dirty(Thread_ready);
377
378       Unsigned64 tval = t.microsecs(Timer::system_clock(), utcb);
379
380       if (EXPECT_TRUE((tval != 0)))
381         {
382           set_timeout(&timeout);
383           timeout.set(tval, cpu());
384         }
385       else // timeout already hit
386         state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
387
388     }
389   else
390     {
391       if (EXPECT_TRUE(t.is_never()))
392         state_del_dirty(Thread_ready);
393       else
394         state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
395     }
396
397   if (sender == this)
398     switch_sched(sched());
399
400   schedule();
401
402   if (EXPECT_FALSE((long)_timeout))
403     {
404       timeout.reset();
405       set_timeout(0);
406     }
407
408   assert_kdb (state() & Thread_ready);
409 }
410
411
412
413
414 /**
415  * @pre cpu_lock must be held
416  */
417 PRIVATE inline NEEDS["logdefs.h"]
418 unsigned
419 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
420 {
421   assert_kdb(cpu_lock.test());
422
423   switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
424     {
425     case Failed:
426       return Failed;
427     case Queued:
428       state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
429       return Queued;
430     default:
431       return Ok;
432     }
433 }
434
435
436 PRIVATE inline
437 void
438 Thread::wake_receiver(Thread *receiver)
439 {
440   // If neither IPC partner is delayed, just update the receiver's state
441   if (1) // rt:EXPECT_TRUE(!((state() | receiver->state()) & Thread_delayed_ipc)))
442     {
443       receiver->state_change_dirty(~(Thread_ipc_receiving_mask
444                                      | Thread_ipc_in_progress),
445                                    Thread_ready);
446       return;
447     }
448
449   // Critical section if either IPC partner is delayed until its next period
450   assert_kdb (cpu_lock.test());
451 #if 0 // rt ext
452   // Sender has no receive phase and deadline timeout already hit
453   if ( (state() & (Thread_receiving |
454                    Thread_delayed_deadline | Thread_delayed_ipc)) ==
455       Thread_delayed_ipc)
456     {
457       state_change_dirty (~Thread_delayed_ipc, 0);
458       switch_sched (sched_context()->next());
459       _deadline_timeout.set (Timer::system_clock() + period(), cpu());
460     }
461
462   // Receiver's deadline timeout already hit
463   if ( (receiver->state() & (Thread_delayed_deadline |
464                              Thread_delayed_ipc) ==
465                              Thread_delayed_ipc))
466     {
467       receiver->state_change_dirty (~Thread_delayed_ipc, 0);
468       receiver->switch_sched (receiver->sched_context()->next());
469       receiver->_deadline_timeout.set (Timer::system_clock() +
470                                        receiver->period(), receiver->cpu());
471     }
472 #endif
473   receiver->state_change_dirty(~(Thread_ipc_mask | Thread_delayed_ipc), Thread_ready);
474 }
475
476 PRIVATE inline
477 void
478 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
479 {
480   access_utcb()->error = e;
481   rcv->access_utcb()->error = L4_error(e, L4_error::Rcv);
482 }
483
484 PRIVATE inline NEEDS [Thread::do_send_wait]
485 bool
486 Thread::do_ipc_send(L4_msg_tag const &tag, Thread *partner,
487                     bool have_receive,
488                     L4_timeout_pair t, Syscall_frame *regs,
489                     bool *dont_switch, unsigned char rights)
490 {
491   unsigned result;
492
493   state_add_dirty(Thread_send_in_progress);
494   set_ipc_send_rights(rights);
495
496   if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
497       ((result = handshake_receiver(partner, t.snd)) == Failed
498        && partner->drq_pending()))
499     {
500       *dont_switch = true;
501       result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
502                                          regs, rights);
503     }
504
505   if (EXPECT_FALSE(result & Queued))
506     {
507       L4_timeout snd_t;
508       if (result & Receive_in_progress)
509         snd_t = L4_timeout::Never;
510       else
511         snd_t = t.snd;
512
513       // set _snd_regs, we may become a remote IPC while waiting
514       snd_regs(regs);
515
516       if (!do_send_wait(partner, snd_t))
517         return false;
518     }
519   else if (EXPECT_FALSE(result == Failed))
520     {
521       state_del_dirty(Thread_ipc_sending_mask
522                       | Thread_transfer_in_progress
523                       | Thread_ipc_in_progress);
524       return false;
525     }
526
527   // Case 1: The handshake told us it was Ok
528   // Case 2: The send_wait told us it had finished w/o error
529
530   // in The X-CPU IPC case the IPC has been already finished here
531   if (EXPECT_FALSE(partner->cpu() != current_cpu()
532                    || (!(state() & Thread_send_in_progress))))
533     {
534       state_del_dirty(Thread_ipc_sending_mask | Thread_transfer_in_progress);
535       return true;
536     }
537
538   assert_kdb (!(state() & Thread_polling));
539
540   partner->ipc_init(this);
541
542   // mmh, we can reset the receivers timeout
543   // ping pong with timeouts will profit from it, because
544   // it will require much less sorting overhead
545   // if we dont reset the timeout, the possibility is very high
546   // that the receiver timeout is in the timeout queue
547   partner->reset_timeout();
548
549   bool success = transfer_msg(tag, partner, regs, rights);
550
551   if (success && this->partner() == partner)
552     partner->set_caller(this, rights);
553
554   if (!tag.do_switch() || partner->state() & Thread_suspended)
555     *dont_switch = true;
556
557   // partner locked, i.e. lazy locking (not locked) or we own the lock
558   assert_kdb (!partner->thread_lock()->test()
559               || partner->thread_lock()->lock_owner() == this);
560
561
562   if (EXPECT_FALSE(!success || !have_receive))
563     {
564       // make the ipc partner ready if still engaged in ipc with us
565       if (partner->in_ipc(this))
566         {
567           wake_receiver(partner);
568           if (!dont_switch)
569             partner->thread_lock()->set_switch_hint(SWITCH_ACTIVATE_LOCKEE);
570         }
571
572       partner->thread_lock()->clear_dirty();
573
574       state_del(Thread_ipc_sending_mask
575                 | Thread_transfer_in_progress
576                 | Thread_ipc_in_progress);
577
578       return success;
579     }
580
581   partner->thread_lock()->clear_dirty_dont_switch();
582   // possible preemption point
583
584   if (EXPECT_TRUE(!partner->in_ipc(this)))
585     {
586       state_del(Thread_ipc_sending_mask
587                 | Thread_transfer_in_progress
588                 | Thread_ipc_in_progress);
589       sender_dequeue(partner->sender_list());
590       partner->vcpu_update_state();
591       access_utcb()->error = L4_error::Aborted;
592       return false;
593     }
594
595   wake_receiver(partner);
596   prepare_receive_dirty_2();
597   return true;
598 }
599
600 PRIVATE inline NOEXPORT
601 void
602 Thread::handle_abnormal_termination(Syscall_frame *regs)
603 {
604   if (EXPECT_TRUE (!(state() & Thread_ipc_receiving_mask)))
605     return;
606
607   Utcb *utcb = access_utcb();
608   // the IPC has not been finished.  could be timeout or cancel
609   // XXX should only modify the error-code part of the status code
610
611   if (EXPECT_FALSE((state() & Thread_busy)))
612     regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
613   else if (EXPECT_FALSE(state() & Thread_cancel))
614     {
615       // we've presumably been reset!
616       if (state() & Thread_transfer_in_progress)
617         regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
618       else
619         regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
620     }
621   else
622     regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
623 }
624
625
626 /**
627  * Send an IPC message.
628  *        Block until we can send the message or the timeout hits.
629  * @param partner the receiver of our message
630  * @param t a timeout specifier
631  * @param regs sender's IPC registers
632  * @pre cpu_lock must be held
633  * @return sender's IPC error code
634  */
635 PUBLIC
636 void
637 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
638                bool have_receive, Sender *sender,
639                L4_timeout_pair t, Syscall_frame *regs,
640                unsigned char rights)
641 {
642   assert_kdb (cpu_lock.test());
643   assert_kdb (this == current());
644
645   bool dont_switch = false;
646   //LOG_MSG_3VAL(this, "ipc", (Mword) partner, (Mword) sender, cpu());
647   assert_kdb (!(state() & Thread_ipc_sending_mask));
648
649   prepare_receive_dirty_1(sender, have_receive ? regs : 0);
650
651   if (have_send)
652     {
653       assert_kdb(!in_sender_list());
654       bool ok = do_ipc_send(tag, partner, have_receive, t, regs, &dont_switch, rights);
655       if (EXPECT_FALSE(!ok))
656         {
657           regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
658           assert_kdb (!in_sender_list());
659           return;
660         }
661
662       if (!have_receive)
663         {
664           regs->tag(L4_msg_tag(0,0,0,0));
665           assert_kdb (!in_sender_list());
666           return;
667         }
668     }
669   else
670     {
671       assert_kdb (have_receive);
672       prepare_receive_dirty_2();
673     }
674
675   assert_kdb (!in_sender_list());
676   assert_kdb (!(state() & Thread_ipc_sending_mask));
677
678   while (EXPECT_TRUE
679          ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
680           == (Thread_receiving | Thread_ipc_in_progress)) )
681     {
682       Sender *next = 0;
683
684       if (EXPECT_FALSE((long)sender_list()->head()))
685         {
686           if (sender) // closed wait
687             {
688               if (sender->in_sender_list()
689                   && this == sender->receiver()
690                   && sender->ipc_receiver_ready(this))
691                 next = sender;
692             }
693           else // open wait
694             {
695
696               next = Sender::cast(sender_list()->head());
697
698               assert_kdb (next->in_sender_list());
699
700               if (!next->ipc_receiver_ready(this)) 
701                 {
702                   next->sender_dequeue_head(sender_list());
703                   vcpu_update_state();
704                   Proc::preemption_point();
705                   continue;
706                 }
707             }
708         }
709
710       assert_kdb (cpu_lock.test());
711
712       // XXX: I'm not sure that EXPECT_FALSE ist the right here
713       if (EXPECT_FALSE((long) next))
714         {
715
716           assert_kdb (!(state() & Thread_ipc_in_progress)
717                  || !(state() & Thread_ready));
718
719           // maybe switch_exec should return an bool to avoid testing the 
720           // state twice
721           if (have_send) 
722             {
723               assert_kdb (partner);
724               assert_kdb (partner->sched());
725             }
726           /* dont_switch == true for xCPU */
727           if (EXPECT_TRUE(have_send && !dont_switch
728                           && (partner->state() & Thread_ready)
729                           && (next->sender_prio() <= partner->sched()->prio())))
730             switch_exec_schedule_locked(partner,  Context::Not_Helping);
731           else
732             {
733               if (have_send && partner->cpu() == cpu()
734                   && (partner->state() & Thread_ready))
735                 partner->sched()->deblock(cpu());
736               schedule();
737             }
738
739           assert_kdb (state() & Thread_ready);
740         }
741       else
742         {
743           if (EXPECT_TRUE(have_send && partner->cpu() == cpu()
744                           && (partner->state() & Thread_ready)))
745             {
746               have_send = false;
747               if (!dont_switch)
748                 {
749                   switch_exec_locked(partner,  Context::Not_Helping);
750                   // We have to retry if there are possible senders in our
751                   // sender queue, because a sender from a remote CPU may
752                   // have been enqueued in handle_drq, in switch_exec_locked
753                   continue;
754                 }
755               else
756                 partner->sched()->deblock(cpu());
757             }
758
759           goto_sleep(t.rcv, sender, access_utcb());
760           have_send = false;
761           // LOG_MSG_3VAL(this, "ipcrw", Mword(sender), state(), 0);
762         }
763     }
764
765   assert_kdb (!(state() & Thread_ipc_sending_mask));
766
767   // if the receive operation was canceled/finished before we 
768   // switched to the old receiver, finish the send
769   if (have_send && partner->cpu() == cpu()
770       && (partner->state() & Thread_ready))
771     {
772       if (!dont_switch && EXPECT_TRUE(partner != this))
773         switch_exec_schedule_locked(partner,  Context::Not_Helping);
774       else
775         partner->sched()->deblock(cpu());
776     }
777
778   // fast out if ipc is already finished
779   if (EXPECT_TRUE((state() & ~(Thread_transfer_in_progress | Thread_fpu_owner|Thread_cancel)) == Thread_ready))
780     {
781       state_del(Thread_transfer_in_progress);
782       return;
783     }
784   assert_kdb (!(state() & (Thread_ipc_sending_mask)));
785
786   // abnormal termination?
787   handle_abnormal_termination(regs);
788
789   state_del(Thread_ipc_mask);
790 }
791
792
793 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to, 
794                       Thread::unlock_receiver]
795 bool
796 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
797                      Syscall_frame *sender_regs, unsigned char rights)
798 {
799   Syscall_frame* dst_regs = receiver->rcv_regs();
800
801   bool success = copy_utcb_to(tag, receiver, rights);
802   tag.set_error(!success);
803   dst_regs->tag(tag);
804   dst_regs->from(sender_regs->from_spec());
805   return success;
806 }
807
808
809 /** Unlock the Receiver locked with ipc_try_lock().
810     If the sender goes to wait for a registered message enable LIPC.
811     @param receiver receiver to unlock
812     @param sender_regs dummy
813  */
814 PRIVATE inline NEEDS ["entry_frame.h"]
815 void
816 Thread::unlock_receiver(Receiver *receiver, const Syscall_frame*)
817 {
818   receiver->ipc_unlock();
819 }
820
821
822 IMPLEMENT inline
823 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
824 {
825   buf_desc = u->buf_desc;
826   buf[0] = u->buffers[0];
827   buf[1] = u->buffers[1];
828 }
829
830 IMPLEMENT inline
831 void
832 Buf_utcb_saver::restore(Utcb *u)
833 {
834   u->buf_desc = buf_desc;
835   u->buffers[0] = buf[0];
836   u->buffers[1] = buf[1];
837 }
838
839 IMPLEMENT inline
840 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
841 {
842   msg[0] = u->values[0];
843   msg[1] = u->values[1];
844 }
845
846 IMPLEMENT inline
847 void
848 Pf_msg_utcb_saver::restore(Utcb *u)
849 {
850   Buf_utcb_saver::restore(u);
851   u->values[0] = msg[0];
852   u->values[1] = msg[1];
853 }
854
855
856 /**
857  * \pre must run with local IRQs disabled (CPU lock held)
858  * to ensure that handler does not dissapear meanwhile.
859  */
860 PRIVATE
861 bool
862 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
863 {
864   Syscall_frame r;
865   L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
866
867   CNT_EXC_IPC;
868
869   void *old_utcb_handler = _utcb_handler;
870   _utcb_handler = ts;
871
872   // fill registers for IPC
873   Utcb *utcb = access_utcb();
874   Buf_utcb_saver saved_state(utcb);
875
876   utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
877   utcb->buffers[0] = L4_msg_item::map(0).raw();
878   utcb->buffers[1] = L4_fpage::all_spaces().raw();
879
880   // clear regs
881   L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
882                  L4_msg_tag::Label_exception);
883
884   r.tag(tag);
885   r.timeout(timeout);
886   r.from(0);
887   r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
888   spill_user_state();
889   handler->invoke(r.ref(), rights, &r, utcb);
890   fill_user_state();
891
892   saved_state.restore(utcb);
893
894   if (EXPECT_FALSE(r.tag().has_error()))
895     {
896       if (Config::conservative)
897         {
898           printf(" exception fault %s error = 0x%lx\n",
899                  utcb->error.snd_phase() ? "send" : "rcv",
900                  utcb->error.raw());
901           kdb_ke("ipc to pager failed");
902         }
903
904       state_del(Thread_in_exception);
905     }
906    else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
907      state_add(Thread_dis_alien);
908
909   // restore original utcb_handler
910   _utcb_handler = old_utcb_handler;
911
912   // FIXME: handle not existing pager properly
913   // for now, just ignore any errors
914   return 1;
915 }
916
917 /* return 1 if exception could be handled
918  * return 0 if not for send_exception and halt thread
919  */
920 PUBLIC inline NEEDS["task.h", "trap_state.h",
921                     Thread::fast_return_to_user,
922                     Thread::save_fpu_state_to_utcb]
923 int
924 Thread::send_exception(Trap_state *ts)
925 {
926   assert(cpu_lock.test());
927
928   Vcpu_state *vcpu = access_vcpu();
929
930   if (vcpu_exceptions_enabled(vcpu))
931     {
932       // no not reflect debug exceptions to the VCPU but handle them in
933       // Fiasco
934       if (EXPECT_FALSE(ts->is_debug_exception()
935                        && !(vcpu->state & Vcpu_state::F_debug_exc)))
936         return 0;
937
938       if (_exc_cont.valid())
939         return 1;
940       vcpu_enter_kernel_mode(vcpu);
941       spill_user_state();
942       LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
943           Vcpu_log *l = tbe->payload<Vcpu_log>();
944           l->type = 2;
945           l->state = vcpu->_saved_state;
946           l->ip = ts->ip();
947           l->sp = ts->sp();
948           l->trap = ts->trapno();
949           l->err = ts->error();
950           l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
951           );
952       memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
953       save_fpu_state_to_utcb(ts, access_utcb());
954       fast_return_to_user(vcpu->_entry_ip, vcpu->_sp);
955     }
956
957   // local IRQs must be disabled because we dereference a Thread_ptr
958   if (EXPECT_FALSE(_exc_handler.is_kernel()))
959     return 0;
960
961   if (!send_exception_arch(ts))
962     return 0; // do not send exception
963
964   unsigned char rights = 0;
965   Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
966
967   if (EXPECT_FALSE(!pager))
968     {
969       /* no pager (anymore), just ignore the exception, return success */
970       LOG_TRACE("Exception invalid handler", "exc", this,
971                 __fmt_exception_invalid_handler,
972                 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
973                 l->cap_idx = _exc_handler.raw());
974       if (EXPECT_FALSE(space() == sigma0_task))
975         {
976           WARNX(Error, "Sigma0 raised an exception --> HALT\n");
977           panic("...");
978         }
979
980       pager = this; // block on ourselves
981     }
982
983   state_change(~Thread_cancel, Thread_in_exception);
984
985   return exception(pager, ts, rights);
986 }
987
988 PRIVATE static
989 bool
990 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
991                               L4_fpage sfp, Mword *rcv_word, Thread* snd,
992                               Thread *rcv)
993 {
994   if (buf->b.is_rcv_id())
995     {
996       if (snd->space() == rcv->space())
997         {
998           rcv_word[-2] |= 6;
999           rcv_word[-1] = sfp.raw();
1000           return true;
1001         }
1002       else
1003         {
1004           unsigned char rights = 0;
1005           Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
1006           Kobject_iface *o = cap.obj();
1007           rights = cap.rights();
1008           if (EXPECT_TRUE(o && o->is_local(rcv->space())))
1009             {
1010               rcv_word[-2] |= 4;
1011               rcv_word[-1] = o->obj_id() | Mword(rights);
1012               return true;
1013             }
1014         }
1015     }
1016   return false;
1017 }
1018
1019
1020 PRIVATE static
1021 bool
1022 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
1023                            Thread *rcv, Utcb *rcv_utcb,
1024                            unsigned char rights)
1025 {
1026   // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
1027   L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
1028   L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
1029   L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
1030   L4_snd_item_iter snd_item(snd_utcb, tag.words());
1031   register int items = tag.items();
1032   Mword *rcv_word = rcv_utcb->values + tag.words();
1033
1034   // XXX: damn X-CPU state modification
1035   // snd->prepare_long_ipc(rcv);
1036   Reap_list rl;
1037
1038   for (;items > 0 && snd_item.more();)
1039     {
1040       if (EXPECT_FALSE(!snd_item.next()))
1041         {
1042           snd->set_ipc_error(L4_error::Overflow, rcv);
1043           return false;
1044         }
1045
1046       L4_snd_item_iter::Item const *const item = snd_item.get();
1047
1048       if (item->b.is_void())
1049         { // XXX: not sure if void fpages are needed
1050           // skip send item and current rcv_buffer
1051           --items;
1052           continue;
1053         }
1054
1055       L4_buf_iter *buf_iter = 0;
1056
1057       switch (item->b.type())
1058         {
1059         case L4_msg_item::Map:
1060           switch (L4_fpage(item->d).type())
1061             {
1062             case L4_fpage::Memory: buf_iter = &mem_buffer; break;
1063             case L4_fpage::Io:     buf_iter = &io_buffer; break;
1064             case L4_fpage::Obj:    buf_iter = &obj_buffer; break;
1065             default: break;
1066             }
1067           break;
1068         default:
1069           break;
1070         }
1071
1072       if (EXPECT_FALSE(!buf_iter))
1073         {
1074           // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
1075           snd->set_ipc_error(L4_error::Overflow, rcv);
1076           return false;
1077         }
1078
1079       L4_buf_iter::Item const *const buf = buf_iter->get();
1080
1081       if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
1082         {
1083           // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
1084           snd->set_ipc_error(L4_error::Overflow, rcv);
1085           return false;
1086         }
1087
1088         {
1089           assert_kdb (item->b.type() == L4_msg_item::Map);
1090           L4_fpage sfp(item->d);
1091           *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
1092
1093           rcv_word += 2;
1094
1095           if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
1096             {
1097               // we need to do a real mapping¿
1098
1099               // diminish when sending via restricted ipc gates
1100               if (sfp.type() == L4_fpage::Obj)
1101                 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
1102
1103               L4_error err = fpage_map(snd->space(), sfp,
1104                   rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
1105
1106               if (EXPECT_FALSE(!err.ok()))
1107                 {
1108                   snd->set_ipc_error(err, rcv);
1109                   return false;
1110                 }
1111             }
1112         }
1113
1114       --items;
1115
1116       if (!item->b.compund())
1117         buf_iter->next();
1118     }
1119
1120   if (EXPECT_FALSE(items))
1121     {
1122       snd->set_ipc_error(L4_error::Overflow, rcv);
1123       return false;
1124     }
1125
1126   return true;
1127 }
1128
1129
1130 /**
1131  * \pre Runs on the sender CPU
1132  */
1133 PRIVATE inline NEEDS[Thread::do_remote_abort_send]
1134 bool
1135 Thread::abort_send(L4_error const &e, Thread *partner)
1136 {
1137   state_del_dirty(Thread_send_in_progress | Thread_polling | Thread_ipc_in_progress
1138                   | Thread_transfer_in_progress);
1139
1140   if (_timeout && _timeout->is_set())
1141     _timeout->reset();
1142
1143   set_timeout(0);
1144
1145   if (partner->cpu() == current_cpu())
1146     {
1147       if (in_sender_list())
1148         {
1149           sender_dequeue(partner->sender_list());
1150           partner->vcpu_update_state();
1151         }
1152
1153       access_utcb()->error = e;
1154       return true;
1155     }
1156
1157   return do_remote_abort_send(e, partner);
1158 }
1159
1160
1161
1162 /**
1163  * \pre Runs on the sender CPU
1164  */
1165 PRIVATE inline
1166 bool
1167 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1168 {
1169   state_add_dirty(Thread_polling);
1170
1171   IPC_timeout timeout;
1172
1173   if (EXPECT_FALSE(snd_t.is_finite()))
1174     {
1175       Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), access_utcb());
1176       // Zero timeout or timeout expired already -- give up
1177       if (tval == 0)
1178         return abort_send(L4_error::Timeout, partner);
1179
1180       set_timeout(&timeout);
1181       timeout.set(tval, cpu());
1182     }
1183
1184   while (1)
1185     {
1186       if ((state() & (Thread_ipc_in_progress | Thread_polling
1187                       | Thread_cancel | Thread_transfer_in_progress))
1188            == (Thread_ipc_in_progress | Thread_polling))
1189         {
1190           state_del_dirty(Thread_ready);
1191           schedule();
1192         }
1193
1194       // ipc handshake bit is set
1195       if ((state() & (Thread_transfer_in_progress | Thread_receiving
1196                       | Thread_ipc_in_progress))
1197           != Thread_ipc_in_progress)
1198         break;
1199
1200       if (EXPECT_FALSE(state() & Thread_cancel))
1201         return abort_send(L4_error::Canceled, partner);
1202
1203       // FIXME: existence check
1204 #if 0
1205       if (EXPECT_FALSE(0 && partner->is_invalid()))
1206         {
1207           state_del_dirty(Thread_send_in_progress | Thread_polling
1208               | Thread_ipc_in_progress | Thread_transfer_in_progress);
1209
1210           if (_timeout && _timeout->is_set())
1211             _timeout->reset();
1212
1213           set_timeout(0);
1214
1215           access_utcb()->error = L4_error::Not_existent;
1216           return false;
1217         }
1218 #endif
1219
1220       // Make sure we're really still in IPC
1221       assert_kdb (state() & Thread_ipc_in_progress);
1222
1223       state_add_dirty(Thread_polling);
1224     }
1225
1226   state_del_dirty(Thread_polling);
1227
1228   if (EXPECT_FALSE((state() & (Thread_send_in_progress | Thread_cancel))
1229         == (Thread_send_in_progress | Thread_cancel)))
1230     return abort_send(L4_error::Canceled, partner);
1231
1232   // reset is only an simple dequeing operation from an double
1233   // linked list, so we dont need an extra preemption point for this
1234
1235   if (EXPECT_FALSE(timeout.has_hit() && (state() & (Thread_send_in_progress
1236                                | Thread_ipc_in_progress)) ==
1237       Thread_send_in_progress))
1238     return abort_send(L4_error::Timeout, partner);
1239
1240   timeout.reset();
1241   set_timeout(0);
1242
1243   return true;
1244 }
1245
1246
1247 //---------------------------------------------------------------------
1248 IMPLEMENTATION [!mp]:
1249
1250 PRIVATE inline
1251 void
1252 Thread::set_ipc_send_rights(unsigned char)
1253 {}
1254
1255 PRIVATE inline NEEDS ["l4_types.h"]
1256 unsigned
1257 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1258                                   bool, L4_timeout, Syscall_frame *, unsigned char)
1259 {
1260   kdb_ke("Remote IPC in UP kernel");
1261   return Failed;
1262 }
1263
1264 PRIVATE inline
1265 bool
1266 Thread::ipc_remote_receiver_ready(Receiver *)
1267 { kdb_ke("Remote IPC in UP kernel"); return false; }
1268
1269
1270 PRIVATE inline
1271 bool
1272 Thread::do_remote_abort_send(L4_error const &, Thread *)
1273 { kdb_ke("Remote abort send on UP kernel"); return false; }
1274
1275 //---------------------------------------------------------------------
1276 INTERFACE [mp]:
1277
1278 EXTENSION class Thread
1279 {
1280 private:
1281   unsigned char _ipc_send_rights;
1282 };
1283
1284 struct Ipc_remote_request;
1285
1286 struct Ipc_remote_request
1287 {
1288   L4_msg_tag tag;
1289   Thread *partner;
1290   Syscall_frame *regs;
1291   unsigned char rights;
1292   bool timeout;
1293   bool have_rcv;
1294
1295   unsigned result;
1296 };
1297
1298 struct Ready_queue_request;
1299
1300 struct Ready_queue_request
1301 {
1302   Thread *thread;
1303   Mword state_add;
1304   Mword state_del;
1305
1306   enum Result { Done, Wrong_cpu, Not_existent };
1307   Result result;
1308 };
1309
1310 //---------------------------------------------------------------------
1311 IMPLEMENTATION [mp]:
1312
1313
1314 PRIVATE inline
1315 void
1316 Thread::set_ipc_send_rights(unsigned char c)
1317 {
1318   _ipc_send_rights = c;
1319 }
1320
1321 PRIVATE inline
1322 void
1323 Thread::schedule_if(bool s)
1324 {
1325   if (!s || current()->schedule_in_progress())
1326     return;
1327
1328   current()->schedule();
1329 }
1330
1331 PRIVATE inline NEEDS[Thread::schedule_if]
1332 bool
1333 Thread::do_remote_abort_send(L4_error const &e, Thread *partner)
1334 {
1335   Ipc_remote_request rq;
1336   rq.partner = partner;
1337   partner->drq(handle_remote_abort_send, &rq);
1338   if (rq.tag.has_error())
1339     access_utcb()->error = e;
1340   schedule_if(handle_drq());
1341   return !rq.tag.has_error();
1342 }
1343
1344 /**
1345  *
1346  * Runs on the receiver CPU in the context of recv.
1347  * The 'this' pointer is the sender.
1348  */
1349 PRIVATE inline NEEDS[Thread::schedule_if]
1350 bool
1351 Thread::ipc_remote_receiver_ready(Receiver *recv)
1352 {
1353   //printf(" remote ready: %x.%x \n", id().task(), id().lthread());
1354   //LOG_MSG_3VAL(this, "recvr", Mword(recv), 0, 0);
1355   assert_kdb (recv->cpu() == current_cpu());
1356
1357   recv->ipc_init(this);
1358
1359   Syscall_frame *regs = _snd_regs;
1360
1361   recv->vcpu_disable_irqs();
1362   //printf("  transfer to %p\n", recv);
1363   bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights);
1364   //printf("  done\n");
1365   regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
1366   if (success && partner() == nonull_static_cast<Thread*>(recv))
1367     nonull_static_cast<Thread*>(recv)->set_caller(this, _ipc_send_rights);
1368
1369
1370   recv->state_del_dirty(Thread_ipc_receiving_mask | Thread_ipc_in_progress);
1371
1372   // dequeue sender from receiver's sending queue
1373   sender_dequeue(recv->sender_list());
1374   recv->vcpu_update_state();
1375
1376   Ready_queue_request rq;
1377   rq.thread = this;
1378   rq.state_add = Thread_transfer_in_progress;
1379   if (Receiver::prepared())
1380     { // same as in Receiver::prepare_receive_dirty_2
1381       rq.state_del = Thread_ipc_sending_mask;
1382       rq.state_add |= Thread_receiving;
1383     }
1384   else
1385     rq.state_del = 0;
1386
1387   drq(handle_remote_ready_enqueue, &rq);
1388   schedule_if(current()->handle_drq());
1389   //printf("  wakeup sender done\n");
1390
1391   return true;
1392 }
1393
1394 PRIVATE inline NOEXPORT
1395 bool
1396 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1397 {
1398   (void)src;
1399   //LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, 0);
1400 #if 0
1401   LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1402   printf("CPU[%u]: remote IPC send ...\n"
1403          "  partner=%p [%u]\n"
1404          "  sender =%p [%u] regs=%p\n"
1405          "  timeout=%u\n",
1406          current_cpu(),
1407          rq->partner, rq->partner->cpu(),
1408          src, src->cpu(),
1409          rq->regs,
1410          rq->timeout);
1411 #endif
1412   rq->result = Ok;
1413
1414   switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1415     {
1416     case Failed:
1417       rq->result = Failed;
1418       return false;
1419     case Queued:
1420       rq->result = Queued;
1421       return false;
1422     default:
1423       break;
1424     }
1425
1426   // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1427   // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1428   // thread code. However, this induces a overhead of two extra IPIs.
1429   if (rq->tag.items())
1430     {
1431       set_receiver(rq->partner);
1432       sender_enqueue(rq->partner->sender_list(), sched_context()->prio());
1433       rq->partner->vcpu_set_irq_pending();
1434
1435       //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1436       rq->result = Queued | Receive_in_progress;
1437       rq->partner->state_add_dirty(Thread_ready);
1438       rq->partner->sched()->deblock(current_cpu());
1439       return true;
1440     }
1441   rq->partner->vcpu_disable_irqs();
1442   bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1443   rq->result = success ? Ok : Failed;
1444
1445   if (success && partner() == rq->partner)
1446     rq->partner->set_caller(this, _ipc_send_rights);
1447
1448   rq->partner->state_change_dirty(~(Thread_ipc_receiving_mask | Thread_ipc_in_progress), Thread_ready);
1449   // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1450   return true;
1451 }
1452
1453 PRIVATE static
1454 unsigned
1455 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1456 {
1457   Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1458   bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1459   //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1460   return r ? Drq::Need_resched : 0;
1461 }
1462
1463 PRIVATE static
1464 unsigned
1465 Thread::handle_remote_abort_send(Drq *src, Context *, void *_rq)
1466 {
1467   Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1468   Thread *sender = nonull_static_cast<Thread*>(src->context());
1469   if (sender->in_sender_list())
1470     {
1471       // really cancled IPC
1472       rq->tag.set_error(true);
1473       sender->sender_dequeue(rq->partner->sender_list());
1474       rq->partner->vcpu_update_state();
1475       return 0;
1476     }
1477   else
1478     {
1479       // IPC already done
1480       return 0;
1481     }
1482 }
1483
1484
1485 PRIVATE static
1486 unsigned
1487 Thread::handle_remote_ready_enqueue(Drq *, Context *self, void *_rq)
1488 {
1489   Ready_queue_request *rq = (Ready_queue_request*)_rq;
1490   Context *c = self;
1491   //LOG_MSG_3VAL(current(), "rre", rq->state_add, rq->state_del, c->state());
1492
1493   c->state_add_dirty(rq->state_add);
1494   c->state_del_dirty(rq->state_del);
1495   rq->result = Ready_queue_request::Done;
1496
1497   if (EXPECT_FALSE(c->state() & Thread_ready))
1498     return Drq::Need_resched;
1499
1500   c->state_add_dirty(Thread_ready);
1501   // hm, should be done by our lazy queueing: c->ready_enqueue();
1502   return Drq::Need_resched;
1503 }
1504
1505
1506
1507
1508 /**
1509  * \pre Runs on the sender CPU
1510  */
1511 PRIVATE //inline NEEDS ["mp_request.h"]
1512 unsigned
1513 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1514                                   bool have_receive,
1515                                   L4_timeout snd_t, Syscall_frame *regs,
1516                                   unsigned char rights)
1517 {
1518   // Flag that there must be no switch in the receive path.
1519   // This flag also prevents the receive path from accessing
1520   // the thread state of a remote sender.
1521   Ipc_remote_request rq;
1522   rq.tag = tag;
1523   rq.have_rcv = have_receive;
1524   rq.partner = partner;
1525   rq.timeout = !snd_t.is_zero();
1526   rq.regs = regs;
1527   rq.rights = rights;
1528   _snd_regs = regs;
1529
1530   set_receiver(partner);
1531
1532   state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
1533
1534   partner->drq(handle_remote_ipc_send, &rq,
1535                remote_prepare_receive);
1536
1537
1538   return rq.result;
1539 }
1540
1541 PRIVATE static
1542 unsigned
1543 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1544 {
1545   Context *c = src->context();
1546   Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1547   //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1548
1549   if (EXPECT_FALSE(rq->result & Queued))
1550     return 0;
1551
1552   c->state_del(Thread_send_in_progress);
1553   if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1554     return 0;
1555
1556   Thread *t = nonull_static_cast<Thread*>(c);
1557   t->prepare_receive_dirty_2();
1558   return 0;
1559 }
1560
1561 //---------------------------------------------------------------------------
1562 IMPLEMENTATION [debug]:
1563
1564 IMPLEMENT
1565 unsigned
1566 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1567 {
1568   Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1569   return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1570 }
1571
1572 IMPLEMENT
1573 unsigned
1574 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1575 {
1576   Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1577   return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);
1578 }