]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread-ipc.cpp
feca98a976ba863c01c6cf414c4a5740306311ce
[l4.git] / kernel / fiasco / src / kern / thread-ipc.cpp
1 INTERFACE:
2
3 #include "l4_buf_iter.h"
4 #include "l4_error.h"
5
6 class Syscall_frame;
7
8 EXTENSION class Thread
9 {
10 protected:
11   struct Log_pf_invalid
12   {
13     Mword pfa;
14     Mword cap_idx;
15     Mword err;
16   };
17
18   struct Log_exc_invalid
19   {
20     Mword cap_idx;
21   };
22
23   enum Check_sender_result
24   {
25     Ok = 0,
26     Queued = 2,
27     Receive_in_progress = 4,
28     Failed = 1,
29   };
30
31   Syscall_frame *_snd_regs;
32 };
33
34 class Buf_utcb_saver
35 {
36 public:
37   Buf_utcb_saver(Utcb const *u);
38   void restore(Utcb *u);
39 private:
40   L4_buf_desc buf_desc;
41   Mword buf[2];
42 };
43
44 /**
45  * Save critical contents of UTCB during nested IPC.
46  */
47 class Pf_msg_utcb_saver : public Buf_utcb_saver
48 {
49 public:
50   Pf_msg_utcb_saver(Utcb const *u);
51   void restore(Utcb *u);
52 private:
53   Mword msg[2];
54 };
55
56 // ------------------------------------------------------------------------
57 INTERFACE [debug]:
58
59 #include "tb_entry.h"
60
61 EXTENSION class Thread
62 {
63 protected:
64   static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
65   static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
66 };
67
68 // ------------------------------------------------------------------------
69 IMPLEMENTATION:
70
71 // IPC setup, and handling of ``short IPC'' and page-fault IPC
72
73 // IDEAS for enhancing this implementation: 
74
75 // Volkmar has suggested a possible optimization for
76 // short-flexpage-to-long-message-buffer transfers: Currently, we have
77 // to resort to long IPC in that case because the message buffer might
78 // contain a receive-flexpage option.  An easy optimization would be
79 // to cache the receive-flexpage option in the TCB for that case.
80 // This would save us the long-IPC setup because we wouldn't have to
81 // touch the receiver's user memory in that case.  Volkmar argues that
82 // cases like that are quite common -- for example, imagine a pager
83 // which at the same time is also a server for ``normal'' requests.
84
85 // The handling of cancel and timeout conditions could be improved as
86 // follows: Cancel and Timeout should not reset the ipc_in_progress
87 // flag.  Instead, they should just set and/or reset a flag of their
88 // own that is checked every time an (IPC) system call wants to go to
89 // sleep.  That would mean that IPCs that do not block are not
90 // cancelled or aborted.
91 //-
92
93 #include <cstdlib>              // panic()
94
95 #include "l4_types.h"
96 #include "l4_msg_item.h"
97
98 #include "config.h"
99 #include "cpu_lock.h"
100 #include "ipc_timeout.h"
101 #include "lock_guard.h"
102 #include "logdefs.h"
103 #include "map_util.h"
104 #include "processor.h"
105 #include "timer.h"
106 #include "kdb_ke.h"
107 #include "warn.h"
108
109 PUBLIC
110 virtual void
111 Thread::ipc_receiver_aborted()
112 {
113   assert_kdb (receiver());
114
115   sender_dequeue(receiver()->sender_list());
116   receiver()->vcpu_update_state();
117   set_receiver(0);
118
119   if (!(state() & Thread_ipc_in_progress))
120     return;
121
122   state_add_dirty(Thread_ready);
123   sched()->deblock(cpu());
124 }
125
126 /** Receiver-ready callback.  
127     Receivers make sure to call this function on waiting senders when
128     they get ready to receive a message from that sender. Senders need
129     to overwrite this interface.
130
131     Class Thread's implementation wakes up the sender if it is still in
132     sender-wait state.
133  */
134 PUBLIC virtual
135 bool
136 Thread::ipc_receiver_ready(Receiver *recv)
137 {
138   if (cpu() == current_cpu())
139     return ipc_local_receiver_ready(recv);
140   else
141     return ipc_remote_receiver_ready(recv);
142 }
143
144 PUBLIC virtual
145 void
146 Thread::modify_label(Mword const *todo, int cnt)
147 {
148   assert_kdb (_snd_regs);
149   Mword l = _snd_regs->from_spec();
150   for (int i = 0; i < cnt*4; i += 4)
151     {
152       Mword const test_mask = todo[i];
153       Mword const test      = todo[i+1];
154       if ((l & test_mask) == test)
155         {
156           Mword const del_mask = todo[i+2];
157           Mword const add_mask = todo[i+3];
158
159           l = (l & ~del_mask) | add_mask;
160           _snd_regs->from(l);
161           return;
162         }
163     }
164 }
165
166 PRIVATE inline
167 bool
168 Thread::ipc_local_receiver_ready(Receiver *recv)
169 {
170   assert_kdb (receiver());
171   assert_kdb (receiver() == recv);
172   assert_kdb (receiver() == current());
173
174   if (!(state() & Thread_ipc_in_progress))
175     return false;
176
177   if (!recv->sender_ok(this))
178     return false;
179
180   recv->ipc_init(this);
181
182   state_add_dirty(Thread_ready | Thread_transfer_in_progress);
183
184   sched()->deblock(cpu());
185   sender_dequeue(recv->sender_list());
186   recv->vcpu_update_state();
187
188   // put receiver into sleep
189   receiver()->state_del_dirty(Thread_ready);
190
191   return true;
192 }
193
194 PRIVATE inline
195 void
196 Thread::snd_regs(Syscall_frame *r)
197 { _snd_regs = r; }
198
199
200 /** Page fault handler.
201     This handler suspends any ongoing IPC, then sets up page-fault IPC.
202     Finally, the ongoing IPC's state (if any) is restored.
203     @param pfa page-fault virtual address
204     @param error_code page-fault error code.
205  */
206 PRIVATE
207 bool
208 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
209                                 Address pfa, Mword error_code,
210                                 L4_msg_tag::Protocol protocol)
211 {
212 #ifndef NDEBUG
213   // do not handle user space page faults from kernel mode if we're
214   // already handling a request
215   if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
216                    && thread_lock()->test() == Thread_lock::Locked))
217     {
218       kdb_ke("Fiasco BUG: page fault, under lock");
219       panic("page fault in locked operation");
220     }
221 #endif
222
223   if (EXPECT_FALSE((state() & Thread_alien)
224                    && !(state() & Thread_ipc_in_progress)))
225     return false;
226
227   Lock_guard<Cpu_lock> guard(&cpu_lock);
228
229   unsigned char rights;
230   Kobject_iface *pager = _pager.ptr(space(), &rights);
231
232   if (!pager)
233     {
234       WARN ("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
235             ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
236             current_cpu(), dbg_id(), pfa, error_code,
237             _pager.raw(), regs()->ip());
238
239
240       LOG_TRACE("Page fault invalid pager", "pf", this,
241                 __fmt_page_fault_invalid_pager,
242                 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
243                 l->cap_idx = _pager.raw();
244                 l->err     = error_code;
245                 l->pfa     = pfa);
246
247       pager = this; // block on ourselves
248     }
249
250   // set up a register block used as an IPC parameter block for the
251   // page fault IPC
252   Syscall_frame r;
253   Utcb *utcb = this->utcb().access(true);
254
255   // save the UTCB fields affected by PF IPC
256   Pf_msg_utcb_saver saved_utcb_fields(utcb);
257
258
259   utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
260   utcb->buffers[0] = L4_msg_item::map(0).raw();
261   utcb->buffers[1] = L4_fpage::all_spaces().raw();
262
263   utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
264   utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
265
266   L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
267   
268   // This might be a page fault in midst of a long-message IPC operation.
269   // Save the current IPC state and restore it later.
270   Sender *orig_partner;
271   Syscall_frame *orig_rcv_regs;
272   save_receiver_state (&orig_partner, &orig_rcv_regs);
273
274   Receiver *orig_snd_partner = receiver();
275   Timeout *orig_timeout = _timeout;
276   if (orig_timeout)
277     orig_timeout->reset();
278
279   unsigned orig_ipc_state = state() & Thread_ipc_mask;
280
281   state_del(orig_ipc_state);
282   if (orig_ipc_state)
283     timeout = utcb->xfer;       // in long IPC -- use pagefault timeout
284
285   L4_msg_tag tag(2, 0, 0, protocol);
286
287   r.timeout(timeout);
288   r.tag(tag);
289   r.from(0);
290   r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
291   pager->invoke(r.ref(), rights, &r, utcb);
292
293
294   bool success = true;
295
296   if (EXPECT_FALSE(r.tag().has_error()))
297     {
298       if (Config::conservative)
299         {
300           printf(" page fault %s error = 0x%lx\n",
301                  utcb->error.snd_phase() ? "send" : "rcv",
302                  utcb->error.raw());
303           kdb_ke("ipc to pager failed");
304         }
305
306       if (utcb->error.snd_phase()
307           && (utcb->error.error() == L4_error::Not_existent)
308           && PF::is_usermode_error(error_code)
309           && !(state() & Thread_cancel))
310         {
311           success = false;
312         }
313     }
314   else // no error
315     {
316       // If the pager rejects the mapping, it replies -1 in msg.w0
317       if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
318         success = false;
319     }
320
321   // restore previous IPC state
322
323   saved_utcb_fields.restore(utcb);
324
325   set_receiver(orig_snd_partner);
326   restore_receiver_state(orig_partner, orig_rcv_regs);
327   state_add(orig_ipc_state);
328
329   if (orig_timeout)
330     orig_timeout->set_again(cpu());
331
332   return success;
333 }
334
335 PRIVATE inline
336 Mword
337 Thread::check_sender(Thread *sender, bool timeout)
338 {
339   if (EXPECT_FALSE(is_invalid()))
340     {
341       sender->utcb().access()->error = L4_error::Not_existent;
342       return Failed;
343     }
344
345   if (EXPECT_FALSE(!sender_ok(sender)))
346     {
347       if (!timeout)
348         {
349           sender->utcb().access()->error = L4_error::Timeout;
350           return Failed;
351         }
352
353       sender->set_receiver(this);
354       sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
355       vcpu_set_irq_pending();
356       return Queued;
357     }
358
359   return Ok;
360 }
361
362
363 PRIVATE inline NEEDS["timer.h"]
364 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
365 {
366   if (EXPECT_FALSE
367      ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
368       != (Thread_receiving | Thread_ipc_in_progress)))
369     return;
370
371   IPC_timeout timeout;
372
373   if (EXPECT_FALSE(t.is_finite() && !_timeout))
374     {
375
376       state_del_dirty(Thread_ready);
377
378       Unsigned64 tval = t.microsecs(Timer::system_clock(), utcb);
379
380       if (EXPECT_TRUE((tval != 0)))
381         {
382           set_timeout(&timeout);
383           timeout.set(tval, cpu());
384         }
385       else // timeout already hit
386         state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
387
388     }
389   else
390     {
391       if (EXPECT_TRUE(t.is_never()))
392         state_del_dirty(Thread_ready);
393       else
394         state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
395     }
396
397   if (sender == this)
398     switch_sched(sched());
399
400   schedule();
401
402   if (EXPECT_FALSE((long)_timeout))
403     {
404       timeout.reset();
405       set_timeout(0);
406     }
407
408   assert_kdb (state() & Thread_ready);
409 }
410
411
412
413
414 /**
415  * @pre cpu_lock must be held
416  */
417 PRIVATE inline NEEDS["logdefs.h"]
418 unsigned
419 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
420 {
421   assert_kdb(cpu_lock.test());
422
423   switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
424     {
425     case Failed:
426       return Failed;
427     case Queued:
428       state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
429       return Queued;
430     default:
431       return Ok;
432     }
433 }
434
435
436 PRIVATE inline
437 void
438 Thread::wake_receiver(Thread *receiver)
439 {
440   // If neither IPC partner is delayed, just update the receiver's state
441   if (1) // rt:EXPECT_TRUE(!((state() | receiver->state()) & Thread_delayed_ipc)))
442     {
443       receiver->state_change_dirty(~(Thread_ipc_receiving_mask
444                                      | Thread_ipc_in_progress),
445                                    Thread_ready);
446       return;
447     }
448
449   // Critical section if either IPC partner is delayed until its next period
450   assert_kdb (cpu_lock.test());
451 #if 0 // rt ext
452   // Sender has no receive phase and deadline timeout already hit
453   if ( (state() & (Thread_receiving |
454                    Thread_delayed_deadline | Thread_delayed_ipc)) ==
455       Thread_delayed_ipc)
456     {
457       state_change_dirty (~Thread_delayed_ipc, 0);
458       switch_sched (sched_context()->next());
459       _deadline_timeout.set (Timer::system_clock() + period(), cpu());
460     }
461
462   // Receiver's deadline timeout already hit
463   if ( (receiver->state() & (Thread_delayed_deadline |
464                              Thread_delayed_ipc) ==
465                              Thread_delayed_ipc))
466     {
467       receiver->state_change_dirty (~Thread_delayed_ipc, 0);
468       receiver->switch_sched (receiver->sched_context()->next());
469       receiver->_deadline_timeout.set (Timer::system_clock() +
470                                        receiver->period(), receiver->cpu());
471     }
472 #endif
473   receiver->state_change_dirty(~(Thread_ipc_mask | Thread_delayed_ipc), Thread_ready);
474 }
475
476 PRIVATE inline
477 void
478 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
479 {
480   utcb().access()->error = e;
481   rcv->utcb().access()->error = L4_error(e, L4_error::Rcv);
482 }
483
484 PRIVATE inline NEEDS [Thread::do_send_wait]
485 bool
486 Thread::do_ipc_send(L4_msg_tag const &tag, Thread *partner,
487                     bool have_receive,
488                     L4_timeout_pair t, Syscall_frame *regs,
489                     bool *do_switch, unsigned char rights)
490 {
491   unsigned result;
492
493   state_add_dirty(Thread_send_in_progress);
494   set_ipc_send_rights(rights);
495
496   if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
497       ((result = handshake_receiver(partner, t.snd)) == Failed
498        && partner->drq_pending()))
499     {
500       *do_switch = false;
501       result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
502                                          regs, rights);
503     }
504
505   if (EXPECT_FALSE(result & Queued))
506     {
507       L4_timeout snd_t;
508       if (result & Receive_in_progress)
509         snd_t = L4_timeout::Never;
510       else
511         snd_t = t.snd;
512
513       // set _snd_regs, we may become a remote IPC while waiting
514       snd_regs(regs);
515
516       if (!do_send_wait(partner, snd_t))
517         return false;
518     }
519   else if (EXPECT_FALSE(result == Failed))
520     {
521       state_del_dirty(Thread_ipc_sending_mask
522                       | Thread_transfer_in_progress
523                       | Thread_ipc_in_progress);
524       return false;
525     }
526
527   // Case 1: The handshake told us it was Ok
528   // Case 2: The send_wait told us it had finished w/o error
529
530   // in The X-CPU IPC case the IPC has been already finished here
531   if (EXPECT_FALSE(partner->cpu() != current_cpu()
532                    || (!(state() & Thread_send_in_progress))))
533     {
534       state_del_dirty(Thread_ipc_sending_mask | Thread_transfer_in_progress);
535       return true;
536     }
537
538   assert_kdb (!(state() & Thread_polling));
539
540   partner->ipc_init(this);
541
542   // mmh, we can reset the receivers timeout
543   // ping pong with timeouts will profit from it, because
544   // it will require much less sorting overhead
545   // if we dont reset the timeout, the possibility is very high
546   // that the receiver timeout is in the timeout queue
547   partner->reset_timeout();
548
549   bool success = transfer_msg(tag, partner, regs, rights);
550
551   if (success && this->partner() == partner)
552     partner->set_caller(this, rights);
553
554   if (!tag.do_switch() || partner->state() & Thread_suspended)
555     *do_switch = false;
556
557   if (EXPECT_FALSE(!success || !have_receive))
558     {
559       bool do_direct_switch = false;
560       // make the ipc partner ready if still engaged in ipc with us
561       if (partner->in_ipc(this))
562         {
563           wake_receiver(partner);
564           do_direct_switch = *do_switch;
565         }
566
567       if (do_direct_switch)
568         check (!switch_exec_locked(partner, Context::Not_Helping));
569       else if (partner->sched()->deblock(current_cpu(), sched(), true))
570         switch_to_locked(partner);
571
572       state_del(Thread_ipc_sending_mask
573                 | Thread_transfer_in_progress
574                 | Thread_ipc_in_progress);
575
576       return success;
577     }
578
579   // possible preemption point
580
581   if (EXPECT_TRUE(!partner->in_ipc(this)))
582     {
583       state_del(Thread_ipc_sending_mask
584                 | Thread_transfer_in_progress
585                 | Thread_ipc_in_progress);
586       sender_dequeue(partner->sender_list());
587       partner->vcpu_update_state();
588       utcb().access()->error = L4_error::Aborted;
589       return false;
590     }
591
592   wake_receiver(partner);
593   prepare_receive_dirty_2();
594   return true;
595 }
596
597 PRIVATE inline NOEXPORT
598 void
599 Thread::handle_abnormal_termination(Syscall_frame *regs)
600 {
601   if (EXPECT_TRUE (!(state() & Thread_ipc_receiving_mask)))
602     return;
603
604   Utcb *utcb = this->utcb().access(true);
605   // the IPC has not been finished.  could be timeout or cancel
606   // XXX should only modify the error-code part of the status code
607
608   if (EXPECT_FALSE(state() & Thread_cancel))
609     {
610       // we've presumably been reset!
611       if (state() & Thread_transfer_in_progress)
612         regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
613       else
614         regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
615     }
616   else
617     regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
618 }
619
620
621 /**
622  * Send an IPC message.
623  *        Block until we can send the message or the timeout hits.
624  * @param partner the receiver of our message
625  * @param t a timeout specifier
626  * @param regs sender's IPC registers
627  * @pre cpu_lock must be held
628  * @return sender's IPC error code
629  */
630 PUBLIC
631 void
632 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
633                bool have_receive, Sender *sender,
634                L4_timeout_pair t, Syscall_frame *regs,
635                unsigned char rights)
636 {
637   assert_kdb (cpu_lock.test());
638   assert_kdb (this == current());
639
640   bool do_switch = true;
641   //LOG_MSG_3VAL(this, "ipc", (Mword) partner, (Mword) sender, cpu());
642   assert_kdb (!(state() & Thread_ipc_sending_mask));
643
644   prepare_receive_dirty_1(sender, have_receive ? regs : 0);
645
646   if (have_send)
647     {
648       assert_kdb(!in_sender_list());
649       bool ok = do_ipc_send(tag, partner, have_receive, t, regs, &do_switch, rights);
650       if (EXPECT_FALSE(!ok))
651         {
652           regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
653           assert_kdb (!in_sender_list());
654           return;
655         }
656
657       if (!have_receive)
658         {
659           regs->tag(L4_msg_tag(0,0,0,0));
660           assert_kdb (!in_sender_list());
661           return;
662         }
663     }
664   else
665     {
666       assert_kdb (have_receive);
667       prepare_receive_dirty_2();
668     }
669
670   assert_kdb (!in_sender_list());
671   assert_kdb (!(state() & Thread_ipc_sending_mask));
672
673   while (EXPECT_TRUE
674          ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
675           == (Thread_receiving | Thread_ipc_in_progress)) )
676     {
677       Sender *next = 0;
678
679       if (EXPECT_FALSE((long)sender_list()->head()))
680         {
681           if (sender) // closed wait
682             {
683               if (sender->in_sender_list()
684                   && this == sender->receiver()
685                   && sender->ipc_receiver_ready(this))
686                 next = sender;
687             }
688           else // open wait
689             {
690
691               next = Sender::cast(sender_list()->head());
692
693               assert_kdb (next->in_sender_list());
694
695               if (!next->ipc_receiver_ready(this)) 
696                 {
697                   next->sender_dequeue_head(sender_list());
698                   vcpu_update_state();
699                   Proc::preemption_point();
700                   continue;
701                 }
702             }
703         }
704
705       assert_kdb (cpu_lock.test());
706
707       // XXX: I'm not sure that EXPECT_FALSE ist the right here
708       if (EXPECT_FALSE((long) next))
709         {
710
711           assert_kdb (!(state() & Thread_ipc_in_progress)
712                  || !(state() & Thread_ready));
713
714           // maybe switch_exec should return an bool to avoid testing the 
715           // state twice
716           if (have_send) 
717             {
718               assert_kdb (partner);
719               assert_kdb (partner->sched());
720             }
721           /* do_switch == false for xCPU */
722           if (EXPECT_TRUE(have_send && do_switch
723                           && (partner->state() & Thread_ready)
724                           && (next->sender_prio() <= partner->sched()->prio())))
725             switch_exec_schedule_locked(partner,  Context::Not_Helping);
726           else
727             {
728               if (have_send && partner->cpu() == cpu()
729                   && (partner->state() & Thread_ready))
730                 partner->sched()->deblock(cpu());
731               schedule();
732             }
733
734           assert_kdb (state() & Thread_ready);
735         }
736       else
737         {
738           if (EXPECT_TRUE(have_send && partner->cpu() == cpu()
739                           && (partner->state() & Thread_ready)))
740             {
741               have_send = false;
742               if (do_switch)
743                 {
744                   switch_exec_locked(partner,  Context::Not_Helping);
745                   // We have to retry if there are possible senders in our
746                   // sender queue, because a sender from a remote CPU may
747                   // have been enqueued in handle_drq, in switch_exec_locked
748                   continue;
749                 }
750               else
751                 partner->sched()->deblock(cpu());
752             }
753
754           goto_sleep(t.rcv, sender, utcb().access(true));
755           have_send = false;
756           // LOG_MSG_3VAL(this, "ipcrw", Mword(sender), state(), 0);
757         }
758     }
759
760   assert_kdb (!(state() & Thread_ipc_sending_mask));
761
762   // if the receive operation was canceled/finished before we 
763   // switched to the old receiver, finish the send
764   if (have_send && partner->cpu() == cpu()
765       && (partner->state() & Thread_ready))
766     {
767       if (do_switch && EXPECT_TRUE(partner != this))
768         switch_exec_schedule_locked(partner,  Context::Not_Helping);
769       else
770         partner->sched()->deblock(cpu());
771     }
772
773   // fast out if ipc is already finished
774   if (EXPECT_TRUE((state() & ~(Thread_transfer_in_progress | Thread_fpu_owner|Thread_cancel)) == Thread_ready))
775     {
776       state_del(Thread_transfer_in_progress);
777       return;
778     }
779   assert_kdb (!(state() & (Thread_ipc_sending_mask)));
780
781   // abnormal termination?
782   handle_abnormal_termination(regs);
783
784   state_del(Thread_ipc_mask);
785 }
786
787
788 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to]
789 bool
790 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
791                      Syscall_frame *sender_regs, unsigned char rights)
792 {
793   Syscall_frame* dst_regs = receiver->rcv_regs();
794
795   bool success = copy_utcb_to(tag, receiver, rights);
796   tag.set_error(!success);
797   dst_regs->tag(tag);
798   dst_regs->from(sender_regs->from_spec());
799   return success;
800 }
801
802
803
804 IMPLEMENT inline
805 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
806 {
807   buf_desc = u->buf_desc;
808   buf[0] = u->buffers[0];
809   buf[1] = u->buffers[1];
810 }
811
812 IMPLEMENT inline
813 void
814 Buf_utcb_saver::restore(Utcb *u)
815 {
816   u->buf_desc = buf_desc;
817   u->buffers[0] = buf[0];
818   u->buffers[1] = buf[1];
819 }
820
821 IMPLEMENT inline
822 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
823 {
824   msg[0] = u->values[0];
825   msg[1] = u->values[1];
826 }
827
828 IMPLEMENT inline
829 void
830 Pf_msg_utcb_saver::restore(Utcb *u)
831 {
832   Buf_utcb_saver::restore(u);
833   u->values[0] = msg[0];
834   u->values[1] = msg[1];
835 }
836
837
838 /**
839  * \pre must run with local IRQs disabled (CPU lock held)
840  * to ensure that handler does not dissapear meanwhile.
841  */
842 PRIVATE
843 bool
844 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
845 {
846   Syscall_frame r;
847   L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
848
849   CNT_EXC_IPC;
850
851   void *old_utcb_handler = _utcb_handler;
852   _utcb_handler = ts;
853
854   // fill registers for IPC
855   Utcb *utcb = this->utcb().access(true);
856   Buf_utcb_saver saved_state(utcb);
857
858   utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
859   utcb->buffers[0] = L4_msg_item::map(0).raw();
860   utcb->buffers[1] = L4_fpage::all_spaces().raw();
861
862   // clear regs
863   L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
864                  L4_msg_tag::Label_exception);
865
866   r.tag(tag);
867   r.timeout(timeout);
868   r.from(0);
869   r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
870   spill_user_state();
871   handler->invoke(r.ref(), rights, &r, utcb);
872   fill_user_state();
873
874   saved_state.restore(utcb);
875
876   if (EXPECT_FALSE(r.tag().has_error()))
877     {
878       if (Config::conservative)
879         {
880           printf(" exception fault %s error = 0x%lx\n",
881                  utcb->error.snd_phase() ? "send" : "rcv",
882                  utcb->error.raw());
883           kdb_ke("ipc to pager failed");
884         }
885
886       state_del(Thread_in_exception);
887     }
888    else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
889      state_add(Thread_dis_alien);
890
891   // restore original utcb_handler
892   _utcb_handler = old_utcb_handler;
893
894   // FIXME: handle not existing pager properly
895   // for now, just ignore any errors
896   return 1;
897 }
898
899 /* return 1 if exception could be handled
900  * return 0 if not for send_exception and halt thread
901  */
902 PUBLIC inline NEEDS["task.h", "trap_state.h",
903                     Thread::fast_return_to_user,
904                     Thread::save_fpu_state_to_utcb]
905 int
906 Thread::send_exception(Trap_state *ts)
907 {
908   assert(cpu_lock.test());
909
910   Vcpu_state *vcpu = vcpu_state().access();
911
912   if (vcpu_exceptions_enabled(vcpu))
913     {
914       // do not reflect debug exceptions to the VCPU but handle them in
915       // Fiasco
916       if (EXPECT_FALSE(ts->is_debug_exception()
917                        && !(vcpu->state & Vcpu_state::F_debug_exc)))
918         return 0;
919
920       if (_exc_cont.valid())
921         return 1;
922       vcpu_enter_kernel_mode(vcpu);
923       spill_user_state();
924       LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
925           Vcpu_log *l = tbe->payload<Vcpu_log>();
926           l->type = 2;
927           l->state = vcpu->_saved_state;
928           l->ip = ts->ip();
929           l->sp = ts->sp();
930           l->trap = ts->trapno();
931           l->err = ts->error();
932           l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
933           );
934       memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
935       save_fpu_state_to_utcb(ts, utcb().access());
936       fast_return_to_user(vcpu->_entry_ip, vcpu->_sp, vcpu_state().usr().get());
937     }
938
939   // local IRQs must be disabled because we dereference a Thread_ptr
940   if (EXPECT_FALSE(_exc_handler.is_kernel()))
941     return 0;
942
943   if (!send_exception_arch(ts))
944     return 0; // do not send exception
945
946   unsigned char rights = 0;
947   Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
948
949   if (EXPECT_FALSE(!pager))
950     {
951       /* no pager (anymore), just ignore the exception, return success */
952       LOG_TRACE("Exception invalid handler", "exc", this,
953                 __fmt_exception_invalid_handler,
954                 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
955                 l->cap_idx = _exc_handler.raw());
956       if (EXPECT_FALSE(space() == sigma0_task))
957         {
958           WARNX(Error, "Sigma0 raised an exception --> HALT\n");
959           panic("...");
960         }
961
962       pager = this; // block on ourselves
963     }
964
965   state_change(~Thread_cancel, Thread_in_exception);
966
967   return exception(pager, ts, rights);
968 }
969
970 PRIVATE static
971 bool
972 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
973                               L4_fpage sfp, Mword *rcv_word, Thread* snd,
974                               Thread *rcv)
975 {
976   if (buf->b.is_rcv_id())
977     {
978       if (snd->space() == rcv->space())
979         {
980           rcv_word[-2] |= 6;
981           rcv_word[-1] = sfp.raw();
982           return true;
983         }
984       else
985         {
986           unsigned char rights = 0;
987           Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
988           Kobject_iface *o = cap.obj();
989           rights = cap.rights();
990           if (EXPECT_TRUE(o && o->is_local(rcv->space())))
991             {
992               rcv_word[-2] |= 4;
993               rcv_word[-1] = o->obj_id() | Mword(rights);
994               return true;
995             }
996         }
997     }
998   return false;
999 }
1000
1001
1002 PRIVATE static
1003 bool
1004 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
1005                            Thread *rcv, Utcb *rcv_utcb,
1006                            unsigned char rights)
1007 {
1008   // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
1009   L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
1010   L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
1011   L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
1012   L4_snd_item_iter snd_item(snd_utcb, tag.words());
1013   register int items = tag.items();
1014   Mword *rcv_word = rcv_utcb->values + tag.words();
1015
1016   // XXX: damn X-CPU state modification
1017   // snd->prepare_long_ipc(rcv);
1018   Reap_list rl;
1019
1020   for (;items > 0 && snd_item.more();)
1021     {
1022       if (EXPECT_FALSE(!snd_item.next()))
1023         {
1024           snd->set_ipc_error(L4_error::Overflow, rcv);
1025           return false;
1026         }
1027
1028       L4_snd_item_iter::Item const *const item = snd_item.get();
1029
1030       if (item->b.is_void())
1031         { // XXX: not sure if void fpages are needed
1032           // skip send item and current rcv_buffer
1033           --items;
1034           continue;
1035         }
1036
1037       L4_buf_iter *buf_iter = 0;
1038
1039       switch (item->b.type())
1040         {
1041         case L4_msg_item::Map:
1042           switch (L4_fpage(item->d).type())
1043             {
1044             case L4_fpage::Memory: buf_iter = &mem_buffer; break;
1045             case L4_fpage::Io:     buf_iter = &io_buffer; break;
1046             case L4_fpage::Obj:    buf_iter = &obj_buffer; break;
1047             default: break;
1048             }
1049           break;
1050         default:
1051           break;
1052         }
1053
1054       if (EXPECT_FALSE(!buf_iter))
1055         {
1056           // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
1057           snd->set_ipc_error(L4_error::Overflow, rcv);
1058           return false;
1059         }
1060
1061       L4_buf_iter::Item const *const buf = buf_iter->get();
1062
1063       if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
1064         {
1065           // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
1066           snd->set_ipc_error(L4_error::Overflow, rcv);
1067           return false;
1068         }
1069
1070         {
1071           assert_kdb (item->b.type() == L4_msg_item::Map);
1072           L4_fpage sfp(item->d);
1073           *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
1074
1075           rcv_word += 2;
1076
1077           if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
1078             {
1079               // we need to do a real mapping¿
1080
1081               // diminish when sending via restricted ipc gates
1082               if (sfp.type() == L4_fpage::Obj)
1083                 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
1084
1085               L4_error err = fpage_map(snd->space(), sfp,
1086                   rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
1087
1088               if (EXPECT_FALSE(!err.ok()))
1089                 {
1090                   snd->set_ipc_error(err, rcv);
1091                   return false;
1092                 }
1093             }
1094         }
1095
1096       --items;
1097
1098       if (!item->b.compund())
1099         buf_iter->next();
1100     }
1101
1102   if (EXPECT_FALSE(items))
1103     {
1104       snd->set_ipc_error(L4_error::Overflow, rcv);
1105       return false;
1106     }
1107
1108   return true;
1109 }
1110
1111
1112 /**
1113  * \pre Runs on the sender CPU
1114  */
1115 PRIVATE inline NEEDS[Thread::do_remote_abort_send]
1116 bool
1117 Thread::abort_send(L4_error const &e, Thread *partner)
1118 {
1119   state_del_dirty(Thread_send_in_progress | Thread_polling | Thread_ipc_in_progress
1120                   | Thread_transfer_in_progress);
1121
1122   if (_timeout && _timeout->is_set())
1123     _timeout->reset();
1124
1125   set_timeout(0);
1126
1127   if (partner->cpu() == current_cpu())
1128     {
1129       if (in_sender_list())
1130         {
1131           sender_dequeue(partner->sender_list());
1132           partner->vcpu_update_state();
1133         }
1134
1135       utcb().access()->error = e;
1136       return true;
1137     }
1138
1139   return do_remote_abort_send(e, partner);
1140 }
1141
1142
1143
1144 /**
1145  * \pre Runs on the sender CPU
1146  */
1147 PRIVATE inline
1148 bool
1149 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1150 {
1151   state_add_dirty(Thread_polling);
1152
1153   IPC_timeout timeout;
1154
1155   if (EXPECT_FALSE(snd_t.is_finite()))
1156     {
1157       Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), utcb().access(true));
1158       // Zero timeout or timeout expired already -- give up
1159       if (tval == 0)
1160         return abort_send(L4_error::Timeout, partner);
1161
1162       set_timeout(&timeout);
1163       timeout.set(tval, cpu());
1164     }
1165
1166   while (1)
1167     {
1168       if ((state() & (Thread_ipc_in_progress | Thread_polling
1169                       | Thread_cancel | Thread_transfer_in_progress))
1170            == (Thread_ipc_in_progress | Thread_polling))
1171         {
1172           state_del_dirty(Thread_ready);
1173           schedule();
1174         }
1175
1176       // ipc handshake bit is set
1177       if ((state() & (Thread_transfer_in_progress | Thread_receiving
1178                       | Thread_ipc_in_progress))
1179           != Thread_ipc_in_progress)
1180         break;
1181
1182       if (EXPECT_FALSE(state() & Thread_cancel))
1183         return abort_send(L4_error::Canceled, partner);
1184
1185       // FIXME: existence check
1186 #if 0
1187       if (EXPECT_FALSE(0 && partner->is_invalid()))
1188         {
1189           state_del_dirty(Thread_send_in_progress | Thread_polling
1190               | Thread_ipc_in_progress | Thread_transfer_in_progress);
1191
1192           if (_timeout && _timeout->is_set())
1193             _timeout->reset();
1194
1195           set_timeout(0);
1196
1197           utcb().access()->error = L4_error::Not_existent;
1198           return false;
1199         }
1200 #endif
1201
1202       // Make sure we're really still in IPC
1203       assert_kdb (state() & Thread_ipc_in_progress);
1204
1205       state_add_dirty(Thread_polling);
1206     }
1207
1208   state_del_dirty(Thread_polling);
1209
1210   if (EXPECT_FALSE((state() & (Thread_send_in_progress | Thread_cancel))
1211         == (Thread_send_in_progress | Thread_cancel)))
1212     return abort_send(L4_error::Canceled, partner);
1213
1214   // reset is only an simple dequeing operation from an double
1215   // linked list, so we dont need an extra preemption point for this
1216
1217   if (EXPECT_FALSE(timeout.has_hit() && (state() & (Thread_send_in_progress
1218                                | Thread_ipc_in_progress)) ==
1219       Thread_send_in_progress))
1220     return abort_send(L4_error::Timeout, partner);
1221
1222   timeout.reset();
1223   set_timeout(0);
1224
1225   return true;
1226 }
1227
1228
1229 //---------------------------------------------------------------------
1230 IMPLEMENTATION [!mp]:
1231
1232 PRIVATE inline
1233 void
1234 Thread::set_ipc_send_rights(unsigned char)
1235 {}
1236
1237 PRIVATE inline NEEDS ["l4_types.h"]
1238 unsigned
1239 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1240                                   bool, L4_timeout, Syscall_frame *, unsigned char)
1241 {
1242   kdb_ke("Remote IPC in UP kernel");
1243   return Failed;
1244 }
1245
1246 PRIVATE inline
1247 bool
1248 Thread::ipc_remote_receiver_ready(Receiver *)
1249 { kdb_ke("Remote IPC in UP kernel"); return false; }
1250
1251
1252 PRIVATE inline
1253 bool
1254 Thread::do_remote_abort_send(L4_error const &, Thread *)
1255 { kdb_ke("Remote abort send on UP kernel"); return false; }
1256
1257 //---------------------------------------------------------------------
1258 INTERFACE [mp]:
1259
1260 EXTENSION class Thread
1261 {
1262 private:
1263   unsigned char _ipc_send_rights;
1264 };
1265
1266 struct Ipc_remote_request;
1267
1268 struct Ipc_remote_request
1269 {
1270   L4_msg_tag tag;
1271   Thread *partner;
1272   Syscall_frame *regs;
1273   unsigned char rights;
1274   bool timeout;
1275   bool have_rcv;
1276
1277   unsigned result;
1278 };
1279
1280 struct Ready_queue_request;
1281
1282 struct Ready_queue_request
1283 {
1284   Thread *thread;
1285   Mword state_add;
1286   Mword state_del;
1287
1288   enum Result { Done, Wrong_cpu, Not_existent };
1289   Result result;
1290 };
1291
1292 //---------------------------------------------------------------------
1293 IMPLEMENTATION [mp]:
1294
1295
1296 PRIVATE inline
1297 void
1298 Thread::set_ipc_send_rights(unsigned char c)
1299 {
1300   _ipc_send_rights = c;
1301 }
1302
1303
1304 PRIVATE inline
1305 bool
1306 Thread::do_remote_abort_send(L4_error const &e, Thread *partner)
1307 {
1308   if (partner->Receiver::abort_send(current_thread()))
1309     return true;
1310
1311   utcb().access()->error = e;
1312   schedule_if(handle_drq());
1313   return false;
1314 }
1315
1316 /**
1317  *
1318  * Runs on the receiver CPU in the context of recv.
1319  * The 'this' pointer is the sender.
1320  */
1321 PRIVATE inline
1322 bool
1323 Thread::ipc_remote_receiver_ready(Receiver *recv)
1324 {
1325   //printf(" remote ready: %x.%x \n", id().task(), id().lthread());
1326   //LOG_MSG_3VAL(this, "recvr", Mword(recv), 0, 0);
1327   assert_kdb (recv->cpu() == current_cpu());
1328
1329   recv->ipc_init(this);
1330
1331   Syscall_frame *regs = _snd_regs;
1332
1333   recv->vcpu_disable_irqs();
1334   //printf("  transfer to %p\n", recv);
1335   bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights);
1336   //printf("  done\n");
1337   regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
1338   if (success && partner() == nonull_static_cast<Thread*>(recv))
1339     nonull_static_cast<Thread*>(recv)->set_caller(this, _ipc_send_rights);
1340
1341
1342   recv->state_del_dirty(Thread_ipc_receiving_mask | Thread_ipc_in_progress);
1343
1344   // dequeue sender from receiver's sending queue
1345   sender_dequeue(recv->sender_list());
1346   recv->vcpu_update_state();
1347
1348   Ready_queue_request rq;
1349   rq.thread = this;
1350   rq.state_add = Thread_transfer_in_progress;
1351   if (Receiver::prepared())
1352     { // same as in Receiver::prepare_receive_dirty_2
1353       rq.state_del = Thread_ipc_sending_mask;
1354       rq.state_add |= Thread_receiving;
1355     }
1356   else
1357     rq.state_del = 0;
1358
1359   drq(handle_remote_ready_enqueue, &rq);
1360   current()->schedule_if(current()->handle_drq());
1361   //printf("  wakeup sender done\n");
1362
1363   return true;
1364 }
1365
1366 PRIVATE inline NOEXPORT
1367 bool
1368 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1369 {
1370   (void)src;
1371   //LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, 0);
1372 #if 0
1373   LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1374   printf("CPU[%u]: remote IPC send ...\n"
1375          "  partner=%p [%u]\n"
1376          "  sender =%p [%u] regs=%p\n"
1377          "  timeout=%u\n",
1378          current_cpu(),
1379          rq->partner, rq->partner->cpu(),
1380          src, src->cpu(),
1381          rq->regs,
1382          rq->timeout);
1383 #endif
1384   rq->result = Ok;
1385
1386   switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1387     {
1388     case Failed:
1389       rq->result = Failed;
1390       return false;
1391     case Queued:
1392       rq->result = Queued;
1393       return false;
1394     default:
1395       break;
1396     }
1397
1398   // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1399   // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1400   // thread code. However, this induces a overhead of two extra IPIs.
1401   if (rq->tag.items())
1402     {
1403       set_receiver(rq->partner);
1404       sender_enqueue(rq->partner->sender_list(), sched_context()->prio());
1405       rq->partner->vcpu_set_irq_pending();
1406
1407       //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1408       rq->result = Queued | Receive_in_progress;
1409       rq->partner->state_add_dirty(Thread_ready);
1410       rq->partner->sched()->deblock(current_cpu());
1411       return true;
1412     }
1413   rq->partner->vcpu_disable_irqs();
1414   bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1415   rq->result = success ? Ok : Failed;
1416
1417   if (success && partner() == rq->partner)
1418     rq->partner->set_caller(this, _ipc_send_rights);
1419
1420   rq->partner->state_change_dirty(~(Thread_ipc_receiving_mask | Thread_ipc_in_progress), Thread_ready);
1421   // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1422   return true;
1423 }
1424
1425 PRIVATE static
1426 unsigned
1427 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1428 {
1429   Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1430   bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1431   //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1432   return r ? Drq::Need_resched : 0;
1433 }
1434
1435
1436 PRIVATE static
1437 unsigned
1438 Thread::handle_remote_ready_enqueue(Drq *, Context *self, void *_rq)
1439 {
1440   Ready_queue_request *rq = (Ready_queue_request*)_rq;
1441   Context *c = self;
1442   //LOG_MSG_3VAL(current(), "rre", rq->state_add, rq->state_del, c->state());
1443
1444   c->state_add_dirty(rq->state_add);
1445   c->state_del_dirty(rq->state_del);
1446   rq->result = Ready_queue_request::Done;
1447
1448   if (EXPECT_FALSE(c->state() & Thread_ready))
1449     return Drq::Need_resched;
1450
1451   c->state_add_dirty(Thread_ready);
1452   // hm, should be done by our lazy queueing: c->ready_enqueue();
1453   return Drq::Need_resched;
1454 }
1455
1456
1457
1458
1459 /**
1460  * \pre Runs on the sender CPU
1461  */
1462 PRIVATE //inline NEEDS ["mp_request.h"]
1463 unsigned
1464 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1465                                   bool have_receive,
1466                                   L4_timeout snd_t, Syscall_frame *regs,
1467                                   unsigned char rights)
1468 {
1469   // Flag that there must be no switch in the receive path.
1470   // This flag also prevents the receive path from accessing
1471   // the thread state of a remote sender.
1472   Ipc_remote_request rq;
1473   rq.tag = tag;
1474   rq.have_rcv = have_receive;
1475   rq.partner = partner;
1476   rq.timeout = !snd_t.is_zero();
1477   rq.regs = regs;
1478   rq.rights = rights;
1479   _snd_regs = regs;
1480
1481   set_receiver(partner);
1482
1483   state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
1484
1485   partner->drq(handle_remote_ipc_send, &rq,
1486                remote_prepare_receive);
1487
1488
1489   return rq.result;
1490 }
1491
1492 PRIVATE static
1493 unsigned
1494 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1495 {
1496   Context *c = src->context();
1497   Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1498   //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1499
1500   if (EXPECT_FALSE(rq->result & Queued))
1501     return 0;
1502
1503   c->state_del(Thread_send_in_progress);
1504   if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1505     return 0;
1506
1507   Thread *t = nonull_static_cast<Thread*>(c);
1508   t->prepare_receive_dirty_2();
1509   return 0;
1510 }
1511
1512 //---------------------------------------------------------------------------
1513 IMPLEMENTATION [debug]:
1514
1515 IMPLEMENT
1516 unsigned
1517 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1518 {
1519   Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1520   return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1521 }
1522
1523 IMPLEMENT
1524 unsigned
1525 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1526 {
1527   Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1528   return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);
1529 }