3 #include "l4_buf_iter.h"
18 struct Log_exc_invalid
23 enum Check_sender_result
27 Receive_in_progress = 4,
31 Syscall_frame *_snd_regs;
37 Buf_utcb_saver(Utcb const *u);
38 void restore(Utcb *u);
45 * Save critical contents of UTCB during nested IPC.
47 class Pf_msg_utcb_saver : public Buf_utcb_saver
50 Pf_msg_utcb_saver(Utcb const *u);
51 void restore(Utcb *u);
56 // ------------------------------------------------------------------------
61 EXTENSION class Thread
64 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
65 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
68 // ------------------------------------------------------------------------
71 // IPC setup, and handling of ``short IPC'' and page-fault IPC
73 // IDEAS for enhancing this implementation:
75 // Volkmar has suggested a possible optimization for
76 // short-flexpage-to-long-message-buffer transfers: Currently, we have
77 // to resort to long IPC in that case because the message buffer might
78 // contain a receive-flexpage option. An easy optimization would be
79 // to cache the receive-flexpage option in the TCB for that case.
80 // This would save us the long-IPC setup because we wouldn't have to
81 // touch the receiver's user memory in that case. Volkmar argues that
82 // cases like that are quite common -- for example, imagine a pager
83 // which at the same time is also a server for ``normal'' requests.
85 // The handling of cancel and timeout conditions could be improved as
86 // follows: Cancel and Timeout should not reset the ipc_in_progress
87 // flag. Instead, they should just set and/or reset a flag of their
88 // own that is checked every time an (IPC) system call wants to go to
89 // sleep. That would mean that IPCs that do not block are not
90 // cancelled or aborted.
93 #include <cstdlib> // panic()
96 #include "l4_msg_item.h"
100 #include "ipc_timeout.h"
101 #include "lock_guard.h"
103 #include "map_util.h"
104 #include "processor.h"
110 Thread::ipc_receiver_aborted()
112 assert_kdb (receiver());
114 sender_dequeue(receiver()->sender_list());
115 receiver()->vcpu_update_state();
118 if (!(state() & Thread_ipc_in_progress))
121 state_add_dirty(Thread_ready);
122 sched()->deblock(cpu());
125 /** Receiver-ready callback.
126 Receivers make sure to call this function on waiting senders when
127 they get ready to receive a message from that sender. Senders need
128 to overwrite this interface.
130 Class Thread's implementation wakes up the sender if it is still in
135 Thread::ipc_receiver_ready(Receiver *recv)
137 if (cpu() == current_cpu())
138 return ipc_local_receiver_ready(recv);
140 return ipc_remote_receiver_ready(recv);
145 Thread::modify_label(Mword const *todo, int cnt)
147 assert_kdb (_snd_regs);
148 Mword l = _snd_regs->from_spec();
149 for (int i = 0; i < cnt*4; i += 4)
151 Mword const test_mask = todo[i];
152 Mword const test = todo[i+1];
153 if ((l & test_mask) == test)
155 Mword const del_mask = todo[i+2];
156 Mword const add_mask = todo[i+3];
158 l = (l & ~del_mask) | add_mask;
167 Thread::ipc_local_receiver_ready(Receiver *recv)
169 assert_kdb (receiver());
170 assert_kdb (receiver() == recv);
171 assert_kdb (receiver() == current());
173 if (!(state() & Thread_ipc_in_progress))
176 if (!recv->sender_ok(this))
179 recv->ipc_init(this);
181 state_add_dirty(Thread_ready | Thread_transfer_in_progress);
183 sched()->deblock(cpu());
184 sender_dequeue(recv->sender_list());
185 recv->vcpu_update_state();
187 // put receiver into sleep
188 receiver()->state_del_dirty(Thread_ready);
195 Thread::snd_regs(Syscall_frame *r)
199 /** Page fault handler.
200 This handler suspends any ongoing IPC, then sets up page-fault IPC.
201 Finally, the ongoing IPC's state (if any) is restored.
202 @param pfa page-fault virtual address
203 @param error_code page-fault error code.
207 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
208 Address pfa, Mword error_code,
209 L4_msg_tag::Protocol protocol)
212 // do not handle user space page faults from kernel mode if we're
213 // already handling a request
214 if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
215 && thread_lock()->test() == Thread_lock::Locked))
217 kdb_ke("Fiasco BUG: page fault, under lock");
218 panic("page fault in locked operation");
222 if (EXPECT_FALSE((state() & Thread_alien)
223 && !(state() & Thread_ipc_in_progress)))
226 Lock_guard<Cpu_lock> guard(&cpu_lock);
228 unsigned char rights;
229 Kobject_iface *pager = _pager.ptr(space(), &rights);
233 WARN ("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
234 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
235 current_cpu(), dbg_id(), pfa, error_code,
236 _pager.raw(), regs()->ip());
239 LOG_TRACE("Page fault invalid pager", "pf", this,
240 __fmt_page_fault_invalid_pager,
241 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
242 l->cap_idx = _pager.raw();
246 pager = this; // block on ourselves
249 // set up a register block used as an IPC parameter block for the
252 Utcb *utcb = access_utcb();
254 // save the UTCB fields affected by PF IPC
255 Pf_msg_utcb_saver saved_utcb_fields(utcb);
258 utcb->buf_desc = L4_buf_desc(0,0,0,0,L4_buf_desc::Inherit_fpu);
259 utcb->buffers[0] = L4_msg_item::map(0).raw();
260 utcb->buffers[1] = L4_fpage::all_spaces().raw();
262 utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
263 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
265 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
267 // This might be a page fault in midst of a long-message IPC operation.
268 // Save the current IPC state and restore it later.
269 Sender *orig_partner;
270 Syscall_frame *orig_rcv_regs;
271 save_receiver_state (&orig_partner, &orig_rcv_regs);
273 Receiver *orig_snd_partner = receiver();
274 Timeout *orig_timeout = _timeout;
276 orig_timeout->reset();
278 unsigned orig_ipc_state = state() & Thread_ipc_mask;
280 state_del(orig_ipc_state);
282 timeout = utcb->xfer; // in long IPC -- use pagefault timeout
284 L4_msg_tag tag(2, 0, 0, protocol);
289 r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
290 pager->invoke(r.ref(), rights, &r, utcb);
295 if (EXPECT_FALSE(r.tag().has_error()))
297 if (Config::conservative)
299 printf(" page fault %s error = 0x%lx\n",
300 utcb->error.snd_phase() ? "send" : "rcv",
302 kdb_ke("ipc to pager failed");
305 if (utcb->error.snd_phase()
306 && (utcb->error.error() == L4_error::Not_existent)
307 && PF::is_usermode_error(error_code)
308 && !(state() & Thread_cancel))
315 // If the pager rejects the mapping, it replies -1 in msg.w0
316 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
320 // restore previous IPC state
322 saved_utcb_fields.restore(utcb);
324 set_receiver(orig_snd_partner);
325 restore_receiver_state(orig_partner, orig_rcv_regs);
326 state_add(orig_ipc_state);
329 orig_timeout->set_again(cpu());
332 if (virqs && vcpu_irqs_pending())
334 vcpu_enter_kernel_mode();
335 vcpu_state()->_saved_state |= Vcpu_state::F_irqs;
336 do_ipc(L4_msg_tag(), 0, 0, true, 0,
337 L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
338 &vcpu_state()->_ipc_regs, 7);
339 vcpu_state()->_ts.set_ipc_upcall();
340 fast_return_to_user(vcpu_state()->_entry_ip, vcpu_state()->_sp);
348 /** L4 IPC system call.
349 This is the `normal'' version of the IPC system call. It usually only
350 gets called if ipc_short_cut() has failed.
351 @param regs system-call arguments.
353 IMPLEMENT inline NOEXPORT ALWAYS_INLINE
357 assert_kdb (!(state() & Thread_drq_ready));
359 Syscall_frame *f = this->regs();
361 Obj_cap obj = f->ref();
362 Utcb *utcb = access_utcb();
363 // printf("sys_invoke_object(f=%p, obj=%x)\n", f, f->obj_ref());
364 unsigned char rights;
365 Kobject_iface *o = obj.deref(&rights);
367 if (EXPECT_TRUE(o!=0))
368 o->invoke(obj, rights, f, utcb);
370 f->tag(commit_error(utcb, L4_error::Not_existent));
377 // Don't allow interrupts before we've got a call frame with a return
378 // address on our stack, so that we can tweak the return address for
379 // sysenter + sys_ex_regs to the iret path.
381 current_thread()->sys_ipc();
383 assert_kdb (!(current()->state() &
384 (Thread_delayed_deadline | Thread_delayed_ipc)));
386 // If we return with a modified return address, we must not be interrupted
393 ipc_short_cut_wrapper()
395 register Thread *const ct = current_thread();
398 // If we return with a modified return address, we must not be interrupted
404 Thread::check_sender(Thread *sender, bool timeout)
406 if (EXPECT_FALSE(is_invalid()))
408 sender->access_utcb()->error = L4_error::Not_existent;
412 if (EXPECT_FALSE(!sender_ok(sender)))
416 sender->access_utcb()->error = L4_error::Timeout;
420 sender->set_receiver(this);
421 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
422 vcpu_set_irq_pending();
431 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
434 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
435 != (Thread_receiving | Thread_ipc_in_progress)))
440 if (EXPECT_FALSE(t.is_finite() && !_timeout))
443 state_del_dirty(Thread_ready);
445 Unsigned64 tval = t.microsecs(Timer::system_clock(), utcb);
447 if (EXPECT_TRUE((tval != 0)))
449 set_timeout(&timeout);
450 timeout.set(tval, cpu());
452 else // timeout already hit
453 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
458 if (EXPECT_TRUE(t.is_never()))
459 state_del_dirty(Thread_ready);
461 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
465 switch_sched(sched());
469 if (EXPECT_FALSE((long)_timeout))
475 assert_kdb (state() & Thread_ready);
482 * @pre cpu_lock must be held
484 PRIVATE inline NEEDS["logdefs.h"]
486 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
488 assert_kdb(cpu_lock.test());
490 switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
495 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
505 Thread::wake_receiver(Thread *receiver)
507 // If neither IPC partner is delayed, just update the receiver's state
508 if (1) // rt:EXPECT_TRUE(!((state() | receiver->state()) & Thread_delayed_ipc)))
510 receiver->state_change_dirty(~(Thread_ipc_receiving_mask
511 | Thread_ipc_in_progress),
516 // Critical section if either IPC partner is delayed until its next period
517 assert_kdb (cpu_lock.test());
519 // Sender has no receive phase and deadline timeout already hit
520 if ( (state() & (Thread_receiving |
521 Thread_delayed_deadline | Thread_delayed_ipc)) ==
524 state_change_dirty (~Thread_delayed_ipc, 0);
525 switch_sched (sched_context()->next());
526 _deadline_timeout.set (Timer::system_clock() + period(), cpu());
529 // Receiver's deadline timeout already hit
530 if ( (receiver->state() & (Thread_delayed_deadline |
531 Thread_delayed_ipc) ==
534 receiver->state_change_dirty (~Thread_delayed_ipc, 0);
535 receiver->switch_sched (receiver->sched_context()->next());
536 receiver->_deadline_timeout.set (Timer::system_clock() +
537 receiver->period(), receiver->cpu());
540 receiver->state_change_dirty(~(Thread_ipc_mask | Thread_delayed_ipc), Thread_ready);
545 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
547 access_utcb()->error = e;
548 rcv->access_utcb()->error = L4_error(e, L4_error::Rcv);
551 PRIVATE inline NEEDS [Thread::do_send_wait]
553 Thread::do_ipc_send(L4_msg_tag const &tag, Thread *partner,
555 L4_timeout_pair t, Syscall_frame *regs,
556 bool *dont_switch, unsigned char rights)
560 state_add_dirty(Thread_send_in_progress);
561 set_ipc_send_rights(rights);
563 if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
564 ((result = handshake_receiver(partner, t.snd)) == Failed
565 && partner->drq_pending()))
568 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
572 if (EXPECT_FALSE(result & Queued))
575 if (result & Receive_in_progress)
576 snd_t = L4_timeout::Never;
580 // set _snd_regs, we may become a remote IPC while waiting
583 if (!do_send_wait(partner, snd_t))
586 else if (EXPECT_FALSE(result == Failed))
588 state_del_dirty(Thread_ipc_sending_mask
589 | Thread_transfer_in_progress
590 | Thread_ipc_in_progress);
594 // Case 1: The handshake told us it was Ok
595 // Case 2: The send_wait told us it had finished w/o error
597 // in The X-CPU IPC case the IPC has been already finished here
598 if (EXPECT_FALSE(partner->cpu() != current_cpu()
599 || (!(state() & Thread_send_in_progress))))
601 state_del_dirty(Thread_ipc_sending_mask | Thread_transfer_in_progress);
605 assert_kdb (!(state() & Thread_polling));
607 partner->ipc_init(this);
609 // mmh, we can reset the receivers timeout
610 // ping pong with timeouts will profit from it, because
611 // it will require much less sorting overhead
612 // if we dont reset the timeout, the possibility is very high
613 // that the receiver timeout is in the timeout queue
614 partner->reset_timeout();
616 bool success = transfer_msg(tag, partner, regs, rights);
618 if (success && this->partner() == partner)
619 partner->set_caller(this, rights);
621 if (!tag.do_switch() || partner->state() & Thread_suspended)
624 // partner locked, i.e. lazy locking (not locked) or we own the lock
625 assert_kdb (!partner->thread_lock()->test()
626 || partner->thread_lock()->lock_owner() == this);
629 if (EXPECT_FALSE(!success || !have_receive))
631 // make the ipc partner ready if still engaged in ipc with us
632 if (partner->in_ipc(this))
634 wake_receiver(partner);
636 partner->thread_lock()->set_switch_hint(SWITCH_ACTIVATE_LOCKEE);
639 partner->thread_lock()->clear_dirty();
641 state_del(Thread_ipc_sending_mask
642 | Thread_transfer_in_progress
643 | Thread_ipc_in_progress);
648 partner->thread_lock()->clear_dirty_dont_switch();
649 // possible preemption point
651 if (EXPECT_TRUE(!partner->in_ipc(this)))
653 state_del(Thread_ipc_sending_mask
654 | Thread_transfer_in_progress
655 | Thread_ipc_in_progress);
656 sender_dequeue(partner->sender_list());
657 partner->vcpu_update_state();
658 access_utcb()->error = L4_error::Aborted;
662 wake_receiver(partner);
663 prepare_receive_dirty_2();
667 PRIVATE inline NOEXPORT
669 Thread::handle_abnormal_termination(Syscall_frame *regs)
671 if (EXPECT_TRUE (!(state() & Thread_ipc_receiving_mask)))
674 Utcb *utcb = access_utcb();
675 // the IPC has not been finished. could be timeout or cancel
676 // XXX should only modify the error-code part of the status code
678 if (EXPECT_FALSE((state() & Thread_busy)))
679 regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
680 else if (EXPECT_FALSE(state() & Thread_cancel))
682 // we've presumably been reset!
683 if (state() & Thread_transfer_in_progress)
684 regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
686 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
689 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
694 * Send an IPC message.
695 * Block until we can send the message or the timeout hits.
696 * @param partner the receiver of our message
697 * @param t a timeout specifier
698 * @param regs sender's IPC registers
699 * @pre cpu_lock must be held
700 * @return sender's IPC error code
704 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
705 bool have_receive, Sender *sender,
706 L4_timeout_pair t, Syscall_frame *regs,
707 unsigned char rights)
709 assert_kdb (cpu_lock.test());
710 assert_kdb (this == current());
712 bool dont_switch = false;
713 //LOG_MSG_3VAL(this, "ipc", (Mword) partner, (Mword) sender, cpu());
714 assert_kdb (!(state() & Thread_ipc_sending_mask));
716 prepare_receive_dirty_1(sender, have_receive ? regs : 0);
720 assert_kdb(!in_sender_list());
721 bool ok = do_ipc_send(tag, partner, have_receive, t, regs, &dont_switch, rights);
722 if (EXPECT_FALSE(!ok))
724 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
725 assert_kdb (!in_sender_list());
731 regs->tag(L4_msg_tag(0,0,0,0));
732 assert_kdb (!in_sender_list());
738 assert_kdb (have_receive);
739 prepare_receive_dirty_2();
742 assert_kdb (!in_sender_list());
743 assert_kdb (!(state() & Thread_ipc_sending_mask));
746 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
747 == (Thread_receiving | Thread_ipc_in_progress)) )
751 if (EXPECT_FALSE((long)sender_list()->head()))
753 if (sender) // closed wait
755 if (sender->in_sender_list()
756 && this == sender->receiver()
757 && sender->ipc_receiver_ready(this))
763 next = Sender::cast(sender_list()->head());
765 assert_kdb (next->in_sender_list());
767 if (!next->ipc_receiver_ready(this))
769 next->sender_dequeue_head(sender_list());
771 Proc::preemption_point();
777 assert_kdb (cpu_lock.test());
779 // XXX: I'm not sure that EXPECT_FALSE ist the right here
780 if (EXPECT_FALSE((long) next))
783 assert_kdb (!(state() & Thread_ipc_in_progress)
784 || !(state() & Thread_ready));
786 // maybe switch_exec should return an bool to avoid testing the
790 assert_kdb (partner);
791 assert_kdb (partner->sched());
793 /* dont_switch == true for xCPU */
794 if (EXPECT_TRUE(have_send && !dont_switch
795 && (partner->state() & Thread_ready)
796 && (next->sender_prio() <= partner->sched()->prio())))
797 switch_exec_schedule_locked(partner, Context::Not_Helping);
800 if (have_send && partner->cpu() == cpu()
801 && (partner->state() & Thread_ready))
802 partner->sched()->deblock(cpu());
806 assert_kdb (state() & Thread_ready);
810 if (EXPECT_TRUE(have_send && partner->cpu() == cpu()
811 && (partner->state() & Thread_ready)))
816 switch_exec_locked(partner, Context::Not_Helping);
817 // We have to retry if there are possible senders in our
818 // sender queue, because a sender from a remote CPU may
819 // have been enqueued in handle_drq, in switch_exec_locked
823 partner->sched()->deblock(cpu());
826 goto_sleep(t.rcv, sender, access_utcb());
828 // LOG_MSG_3VAL(this, "ipcrw", Mword(sender), state(), 0);
832 assert_kdb (!(state() & Thread_ipc_sending_mask));
834 // if the receive operation was canceled/finished before we
835 // switched to the old receiver, finish the send
836 if (have_send && partner->cpu() == cpu()
837 && (partner->state() & Thread_ready))
839 if (!dont_switch && EXPECT_TRUE(partner != this))
840 switch_exec_schedule_locked(partner, Context::Not_Helping);
842 partner->sched()->deblock(cpu());
845 // fast out if ipc is already finished
846 if (EXPECT_TRUE((state() & ~(Thread_transfer_in_progress | Thread_fpu_owner|Thread_cancel)) == Thread_ready))
848 state_del(Thread_transfer_in_progress);
851 assert_kdb (!(state() & (Thread_ipc_sending_mask)));
853 // abnormal termination?
854 handle_abnormal_termination(regs);
856 state_del(Thread_ipc_mask);
860 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to,
861 Thread::unlock_receiver]
863 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
864 Syscall_frame *sender_regs, unsigned char rights)
866 Syscall_frame* dst_regs = receiver->rcv_regs();
868 bool success = copy_utcb_to(tag, receiver, rights);
869 tag.set_error(!success);
871 dst_regs->from(sender_regs->from_spec());
876 /** Unlock the Receiver locked with ipc_try_lock().
877 If the sender goes to wait for a registered message enable LIPC.
878 @param receiver receiver to unlock
879 @param sender_regs dummy
881 PRIVATE inline NEEDS ["entry_frame.h"]
883 Thread::unlock_receiver(Receiver *receiver, const Syscall_frame*)
885 receiver->ipc_unlock();
890 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
892 buf_desc = u->buf_desc;
893 buf[0] = u->buffers[0];
894 buf[1] = u->buffers[1];
899 Buf_utcb_saver::restore(Utcb *u)
901 u->buf_desc = buf_desc;
902 u->buffers[0] = buf[0];
903 u->buffers[1] = buf[1];
907 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
909 msg[0] = u->values[0];
910 msg[1] = u->values[1];
915 Pf_msg_utcb_saver::restore(Utcb *u)
917 Buf_utcb_saver::restore(u);
918 u->values[0] = msg[0];
919 u->values[1] = msg[1];
924 * \pre must run with local IRQs disabled (CPU lock held)
925 * to ensure that handler does not dissapear meanwhile.
929 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
932 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
936 void *old_utcb_handler = _utcb_handler;
939 // fill registers for IPC
940 Utcb *utcb = access_utcb();
941 Buf_utcb_saver saved_state(utcb);
943 utcb->buf_desc = L4_buf_desc(0,0,0,0,L4_buf_desc::Inherit_fpu);
944 utcb->buffers[0] = L4_msg_item::map(0).raw();
945 utcb->buffers[1] = L4_fpage::all_spaces().raw();
948 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
949 L4_msg_tag::Label_exception);
954 r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
956 handler->invoke(r.ref(), rights, &r, utcb);
959 saved_state.restore(utcb);
961 if (EXPECT_FALSE(r.tag().has_error()))
963 if (Config::conservative)
965 printf(" exception fault %s error = 0x%lx\n",
966 utcb->error.snd_phase() ? "send" : "rcv",
968 kdb_ke("ipc to pager failed");
971 state_del(Thread_in_exception);
973 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
974 state_add(Thread_dis_alien);
976 // restore original utcb_handler
977 _utcb_handler = old_utcb_handler;
979 // FIXME: handle not existing pager properly
980 // for now, just ignore any errors
984 /* return 1 if exception could be handled
985 * return 0 if not for send_exception and halt thread
987 PUBLIC inline NEEDS["task.h", "trap_state.h",
988 Thread::fast_return_to_user,
989 Thread::save_fpu_state_to_utcb]
991 Thread::send_exception(Trap_state *ts)
993 assert(cpu_lock.test());
995 if (vcpu_exceptions_enabled())
997 // no not reflect debug exceptions to the VCPU but handle them in
999 if (EXPECT_FALSE(ts->is_debug_exception()
1000 && !(vcpu_state()->state & Vcpu_state::F_debug_exc)))
1003 if (_exc_cont.valid())
1005 vcpu_enter_kernel_mode();
1007 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
1008 Vcpu_log *l = tbe->payload<Vcpu_log>();
1010 l->state = vcpu_state()->_saved_state;
1013 l->trap = ts->trapno();
1014 l->err = ts->error();
1015 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
1017 memcpy(&vcpu_state()->_ts, ts, sizeof(Trap_state));
1018 save_fpu_state_to_utcb(ts, access_utcb());
1019 fast_return_to_user(vcpu_state()->_entry_ip, vcpu_state()->_sp);
1022 // local IRQs must be disabled because we dereference a Thread_ptr
1023 if (EXPECT_FALSE(_exc_handler.is_kernel()))
1026 if (!send_exception_arch(ts))
1027 return 0; // do not send exception
1029 unsigned char rights = 0;
1030 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
1032 if (EXPECT_FALSE(!pager))
1034 /* no pager (anymore), just ignore the exception, return success */
1035 LOG_TRACE("Exception invalid handler", "exc", this,
1036 __fmt_exception_invalid_handler,
1037 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
1038 l->cap_idx = _exc_handler.raw());
1039 if (EXPECT_FALSE(space() == sigma0_task))
1041 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
1045 pager = this; // block on ourselves
1048 state_change(~Thread_cancel, Thread_in_exception);
1050 return exception(pager, ts, rights);
1055 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
1056 L4_fpage sfp, Mword *rcv_word, Thread* snd,
1059 if (buf->b.is_rcv_id())
1061 if (snd->space() == rcv->space())
1064 rcv_word[-1] = sfp.raw();
1069 unsigned char rights = 0;
1070 Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
1071 Kobject_iface *o = cap.obj();
1072 rights = cap.rights();
1073 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
1076 rcv_word[-1] = o->obj_id() | Mword(rights);
1087 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
1088 Thread *rcv, Utcb *rcv_utcb,
1089 unsigned char rights)
1091 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
1092 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
1093 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
1094 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
1095 L4_snd_item_iter snd_item(snd_utcb, tag.words());
1096 register int items = tag.items();
1097 Mword *rcv_word = rcv_utcb->values + tag.words();
1099 // XXX: damn X-CPU state modification
1100 // snd->prepare_long_ipc(rcv);
1103 for (;items > 0 && snd_item.more();)
1105 if (EXPECT_FALSE(!snd_item.next()))
1107 snd->set_ipc_error(L4_error::Overflow, rcv);
1111 L4_snd_item_iter::Item const *const item = snd_item.get();
1113 if (item->b.is_void())
1114 { // XXX: not sure if void fpages are needed
1115 // skip send item and current rcv_buffer
1120 L4_buf_iter *buf_iter = 0;
1122 switch (item->b.type())
1124 case L4_msg_item::Map:
1125 switch (L4_fpage(item->d).type())
1127 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
1128 case L4_fpage::Io: buf_iter = &io_buffer; break;
1129 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
1137 if (EXPECT_FALSE(!buf_iter))
1139 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
1140 snd->set_ipc_error(L4_error::Overflow, rcv);
1144 L4_buf_iter::Item const *const buf = buf_iter->get();
1146 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
1148 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
1149 snd->set_ipc_error(L4_error::Overflow, rcv);
1154 assert_kdb (item->b.type() == L4_msg_item::Map);
1155 L4_fpage sfp(item->d);
1156 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
1160 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
1162 // we need to do a real mapping¿
1164 // diminish when sending via restricted ipc gates
1165 if (sfp.type() == L4_fpage::Obj)
1166 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
1168 L4_error err = fpage_map(snd->space(), sfp,
1169 rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
1171 if (EXPECT_FALSE(!err.ok()))
1173 snd->set_ipc_error(err, rcv);
1181 if (!item->b.compund())
1185 if (EXPECT_FALSE(items))
1187 snd->set_ipc_error(L4_error::Overflow, rcv);
1196 * \pre Runs on the sender CPU
1198 PRIVATE inline NEEDS[Thread::do_remote_abort_send]
1200 Thread::abort_send(L4_error const &e, Thread *partner)
1202 state_del_dirty(Thread_send_in_progress | Thread_polling | Thread_ipc_in_progress
1203 | Thread_transfer_in_progress);
1205 if (_timeout && _timeout->is_set())
1210 if (partner->cpu() == current_cpu())
1212 if (in_sender_list())
1214 sender_dequeue(partner->sender_list());
1215 partner->vcpu_update_state();
1218 access_utcb()->error = e;
1222 return do_remote_abort_send(e, partner);
1228 * \pre Runs on the sender CPU
1232 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1234 state_add_dirty(Thread_polling);
1236 IPC_timeout timeout;
1238 if (EXPECT_FALSE(snd_t.is_finite()))
1240 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), access_utcb());
1241 // Zero timeout or timeout expired already -- give up
1243 return abort_send(L4_error::Timeout, partner);
1245 set_timeout(&timeout);
1246 timeout.set(tval, cpu());
1251 if ((state() & (Thread_ipc_in_progress | Thread_polling
1252 | Thread_cancel | Thread_transfer_in_progress))
1253 == (Thread_ipc_in_progress | Thread_polling))
1255 state_del_dirty(Thread_ready);
1259 // ipc handshake bit is set
1260 if ((state() & (Thread_transfer_in_progress | Thread_receiving
1261 | Thread_ipc_in_progress))
1262 != Thread_ipc_in_progress)
1265 if (EXPECT_FALSE(state() & Thread_cancel))
1266 return abort_send(L4_error::Canceled, partner);
1268 // FIXME: existence check
1270 if (EXPECT_FALSE(0 && partner->is_invalid()))
1272 state_del_dirty(Thread_send_in_progress | Thread_polling
1273 | Thread_ipc_in_progress | Thread_transfer_in_progress);
1275 if (_timeout && _timeout->is_set())
1280 access_utcb()->error = L4_error::Not_existent;
1285 // Make sure we're really still in IPC
1286 assert_kdb (state() & Thread_ipc_in_progress);
1288 state_add_dirty(Thread_polling);
1291 state_del_dirty(Thread_polling);
1293 if (EXPECT_FALSE((state() & (Thread_send_in_progress | Thread_cancel))
1294 == (Thread_send_in_progress | Thread_cancel)))
1295 return abort_send(L4_error::Canceled, partner);
1297 // reset is only an simple dequeing operation from an double
1298 // linked list, so we dont need an extra preemption point for this
1300 if (EXPECT_FALSE(timeout.has_hit() && (state() & (Thread_send_in_progress
1301 | Thread_ipc_in_progress)) ==
1302 Thread_send_in_progress))
1303 return abort_send(L4_error::Timeout, partner);
1312 //---------------------------------------------------------------------
1313 IMPLEMENTATION [!mp]:
1317 Thread::set_ipc_send_rights(unsigned char)
1320 PRIVATE inline NEEDS ["l4_types.h"]
1322 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1323 bool, L4_timeout, Syscall_frame *, unsigned char)
1325 kdb_ke("Remote IPC in UP kernel");
1331 Thread::ipc_remote_receiver_ready(Receiver *)
1332 { kdb_ke("Remote IPC in UP kernel"); return false; }
1337 Thread::do_remote_abort_send(L4_error const &, Thread *)
1338 { kdb_ke("Remote abort send on UP kernel"); return false; }
1340 //---------------------------------------------------------------------
1343 EXTENSION class Thread
1346 unsigned char _ipc_send_rights;
1349 struct Ipc_remote_request;
1351 struct Ipc_remote_request
1355 Syscall_frame *regs;
1356 unsigned char rights;
1363 struct Ready_queue_request;
1365 struct Ready_queue_request
1371 enum Result { Done, Wrong_cpu, Not_existent };
1375 //---------------------------------------------------------------------
1376 IMPLEMENTATION [mp]:
1381 Thread::set_ipc_send_rights(unsigned char c)
1383 _ipc_send_rights = c;
1388 Thread::schedule_if(bool s)
1390 if (!s || current()->schedule_in_progress())
1393 current()->schedule();
1396 PRIVATE inline NEEDS[Thread::schedule_if]
1398 Thread::do_remote_abort_send(L4_error const &e, Thread *partner)
1400 Ipc_remote_request rq;
1401 rq.partner = partner;
1402 partner->drq(handle_remote_abort_send, &rq);
1403 if (rq.tag.has_error())
1404 access_utcb()->error = e;
1405 schedule_if(handle_drq());
1406 return !rq.tag.has_error();
1411 * Runs on the receiver CPU in the context of recv.
1412 * The 'this' pointer is the sender.
1414 PRIVATE inline NEEDS[Thread::schedule_if]
1416 Thread::ipc_remote_receiver_ready(Receiver *recv)
1418 //printf(" remote ready: %x.%x \n", id().task(), id().lthread());
1419 //LOG_MSG_3VAL(this, "recvr", Mword(recv), 0, 0);
1420 assert_kdb (recv->cpu() == current_cpu());
1422 recv->ipc_init(this);
1424 Syscall_frame *regs = _snd_regs;
1426 recv->vcpu_disable_irqs();
1427 //printf(" transfer to %p\n", recv);
1428 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights);
1429 //printf(" done\n");
1430 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
1431 if (success && partner() == nonull_static_cast<Thread*>(recv))
1432 nonull_static_cast<Thread*>(recv)->set_caller(this, _ipc_send_rights);
1435 recv->state_del_dirty(Thread_ipc_receiving_mask | Thread_ipc_in_progress);
1437 // dequeue sender from receiver's sending queue
1438 sender_dequeue(recv->sender_list());
1439 recv->vcpu_update_state();
1441 Ready_queue_request rq;
1443 rq.state_add = Thread_transfer_in_progress;
1444 if (Receiver::prepared())
1445 { // same as in Receiver::prepare_receive_dirty_2
1446 rq.state_del = Thread_ipc_sending_mask;
1447 rq.state_add |= Thread_receiving;
1452 drq(handle_remote_ready_enqueue, &rq);
1453 schedule_if(current()->handle_drq());
1454 //printf(" wakeup sender done\n");
1459 PRIVATE inline NOEXPORT
1461 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1464 //LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, 0);
1466 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1467 printf("CPU[%u]: remote IPC send ...\n"
1468 " partner=%p [%u]\n"
1469 " sender =%p [%u] regs=%p\n"
1472 rq->partner, rq->partner->cpu(),
1479 switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1482 rq->result = Failed;
1485 rq->result = Queued;
1491 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1492 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1493 // thread code. However, this induces a overhead of two extra IPIs.
1494 if (rq->tag.items())
1496 set_receiver(rq->partner);
1497 sender_enqueue(rq->partner->sender_list(), sched_context()->prio());
1498 rq->partner->vcpu_set_irq_pending();
1500 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1501 rq->result = Queued | Receive_in_progress;
1502 rq->partner->state_add_dirty(Thread_ready);
1503 rq->partner->sched()->deblock(current_cpu());
1506 rq->partner->vcpu_disable_irqs();
1507 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1508 rq->result = success ? Ok : Failed;
1510 if (success && partner() == rq->partner)
1511 rq->partner->set_caller(this, _ipc_send_rights);
1513 rq->partner->state_change_dirty(~(Thread_ipc_receiving_mask | Thread_ipc_in_progress), Thread_ready);
1514 // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1520 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1522 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1523 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1524 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1525 return r ? Drq::Need_resched : 0;
1530 Thread::handle_remote_abort_send(Drq *src, Context *, void *_rq)
1532 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1533 Thread *sender = nonull_static_cast<Thread*>(src->context());
1534 if (sender->in_sender_list())
1536 // really cancled IPC
1537 rq->tag.set_error(true);
1538 sender->sender_dequeue(rq->partner->sender_list());
1539 rq->partner->vcpu_update_state();
1552 Thread::handle_remote_ready_enqueue(Drq *, Context *self, void *_rq)
1554 Ready_queue_request *rq = (Ready_queue_request*)_rq;
1556 //LOG_MSG_3VAL(current(), "rre", rq->state_add, rq->state_del, c->state());
1558 c->state_add_dirty(rq->state_add);
1559 c->state_del_dirty(rq->state_del);
1560 rq->result = Ready_queue_request::Done;
1562 if (EXPECT_FALSE(c->state() & Thread_ready))
1563 return Drq::Need_resched;
1565 c->state_add_dirty(Thread_ready);
1566 // hm, should be done by our lazy queueing: c->ready_enqueue();
1567 return Drq::Need_resched;
1574 * \pre Runs on the sender CPU
1576 PRIVATE //inline NEEDS ["mp_request.h"]
1578 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1580 L4_timeout snd_t, Syscall_frame *regs,
1581 unsigned char rights)
1583 // Flag that there must be no switch in the receive path.
1584 // This flag also prevents the receive path from accessing
1585 // the thread state of a remote sender.
1586 Ipc_remote_request rq;
1588 rq.have_rcv = have_receive;
1589 rq.partner = partner;
1590 rq.timeout = !snd_t.is_zero();
1595 set_receiver(partner);
1597 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
1599 partner->drq(handle_remote_ipc_send, &rq,
1600 remote_prepare_receive);
1608 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1610 Context *c = src->context();
1611 Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1612 //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1614 if (EXPECT_FALSE(rq->result & Queued))
1617 c->state_del(Thread_send_in_progress);
1618 if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1621 Thread *t = nonull_static_cast<Thread*>(c);
1622 t->prepare_receive_dirty_2();
1626 //---------------------------------------------------------------------------
1627 IMPLEMENTATION [debug]:
1631 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1633 Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1634 return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1639 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1641 Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1642 return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);