3 #include "l4_buf_iter.h"
18 struct Log_exc_invalid
23 enum Check_sender_result
27 Receive_in_progress = 4,
31 Syscall_frame *_snd_regs;
37 Buf_utcb_saver(Utcb const *u);
38 void restore(Utcb *u);
45 * Save critical contents of UTCB during nested IPC.
47 class Pf_msg_utcb_saver : public Buf_utcb_saver
50 Pf_msg_utcb_saver(Utcb const *u);
51 void restore(Utcb *u);
56 // ------------------------------------------------------------------------
61 EXTENSION class Thread
64 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
65 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
68 // ------------------------------------------------------------------------
71 // IPC setup, and handling of ``short IPC'' and page-fault IPC
73 // IDEAS for enhancing this implementation:
75 // Volkmar has suggested a possible optimization for
76 // short-flexpage-to-long-message-buffer transfers: Currently, we have
77 // to resort to long IPC in that case because the message buffer might
78 // contain a receive-flexpage option. An easy optimization would be
79 // to cache the receive-flexpage option in the TCB for that case.
80 // This would save us the long-IPC setup because we wouldn't have to
81 // touch the receiver's user memory in that case. Volkmar argues that
82 // cases like that are quite common -- for example, imagine a pager
83 // which at the same time is also a server for ``normal'' requests.
85 // The handling of cancel and timeout conditions could be improved as
86 // follows: Cancel and Timeout should not reset the ipc_in_progress
87 // flag. Instead, they should just set and/or reset a flag of their
88 // own that is checked every time an (IPC) system call wants to go to
89 // sleep. That would mean that IPCs that do not block are not
90 // cancelled or aborted.
93 #include <cstdlib> // panic()
96 #include "l4_msg_item.h"
100 #include "ipc_timeout.h"
101 #include "lock_guard.h"
103 #include "map_util.h"
104 #include "processor.h"
111 Thread::ipc_receiver_aborted()
113 assert_kdb (receiver());
115 sender_dequeue(receiver()->sender_list());
116 receiver()->vcpu_update_state();
119 if (!(state() & Thread_ipc_in_progress))
122 state_add_dirty(Thread_ready);
123 sched()->deblock(cpu());
126 /** Receiver-ready callback.
127 Receivers make sure to call this function on waiting senders when
128 they get ready to receive a message from that sender. Senders need
129 to overwrite this interface.
131 Class Thread's implementation wakes up the sender if it is still in
136 Thread::ipc_receiver_ready(Receiver *recv)
138 if (cpu() == current_cpu())
139 return ipc_local_receiver_ready(recv);
141 return ipc_remote_receiver_ready(recv);
146 Thread::modify_label(Mword const *todo, int cnt)
148 assert_kdb (_snd_regs);
149 Mword l = _snd_regs->from_spec();
150 for (int i = 0; i < cnt*4; i += 4)
152 Mword const test_mask = todo[i];
153 Mword const test = todo[i+1];
154 if ((l & test_mask) == test)
156 Mword const del_mask = todo[i+2];
157 Mword const add_mask = todo[i+3];
159 l = (l & ~del_mask) | add_mask;
168 Thread::ipc_local_receiver_ready(Receiver *recv)
170 assert_kdb (receiver());
171 assert_kdb (receiver() == recv);
172 assert_kdb (receiver() == current());
174 if (!(state() & Thread_ipc_in_progress))
177 if (!recv->sender_ok(this))
180 recv->ipc_init(this);
182 state_add_dirty(Thread_ready | Thread_transfer_in_progress);
184 sched()->deblock(cpu());
185 sender_dequeue(recv->sender_list());
186 recv->vcpu_update_state();
188 // put receiver into sleep
189 receiver()->state_del_dirty(Thread_ready);
196 Thread::snd_regs(Syscall_frame *r)
200 /** Page fault handler.
201 This handler suspends any ongoing IPC, then sets up page-fault IPC.
202 Finally, the ongoing IPC's state (if any) is restored.
203 @param pfa page-fault virtual address
204 @param error_code page-fault error code.
208 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
209 Address pfa, Mword error_code,
210 L4_msg_tag::Protocol protocol)
213 // do not handle user space page faults from kernel mode if we're
214 // already handling a request
215 if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
216 && thread_lock()->test() == Thread_lock::Locked))
218 kdb_ke("Fiasco BUG: page fault, under lock");
219 panic("page fault in locked operation");
223 if (EXPECT_FALSE((state() & Thread_alien)
224 && !(state() & Thread_ipc_in_progress)))
227 Lock_guard<Cpu_lock> guard(&cpu_lock);
229 unsigned char rights;
230 Kobject_iface *pager = _pager.ptr(space(), &rights);
234 WARN ("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
235 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
236 current_cpu(), dbg_info()->dbg_id(), pfa, error_code,
237 _pager.raw(), regs()->ip());
240 LOG_TRACE("Page fault invalid pager", "pf", this,
241 __fmt_page_fault_invalid_pager,
242 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
243 l->cap_idx = _pager.raw();
247 pager = this; // block on ourselves
250 // set up a register block used as an IPC parameter block for the
253 Utcb *utcb = access_utcb();
255 // save the UTCB fields affected by PF IPC
256 Pf_msg_utcb_saver saved_utcb_fields(utcb);
259 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
260 utcb->buffers[0] = L4_msg_item::map(0).raw();
261 utcb->buffers[1] = L4_fpage::all_spaces().raw();
263 utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
264 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
266 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
268 // This might be a page fault in midst of a long-message IPC operation.
269 // Save the current IPC state and restore it later.
270 Sender *orig_partner;
271 Syscall_frame *orig_rcv_regs;
272 save_receiver_state (&orig_partner, &orig_rcv_regs);
274 Receiver *orig_snd_partner = receiver();
275 Timeout *orig_timeout = _timeout;
277 orig_timeout->reset();
279 unsigned orig_ipc_state = state() & Thread_ipc_mask;
281 state_del(orig_ipc_state);
283 timeout = utcb->xfer; // in long IPC -- use pagefault timeout
285 L4_msg_tag tag(2, 0, 0, protocol);
290 r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
291 pager->invoke(r.ref(), rights, &r, utcb);
296 if (EXPECT_FALSE(r.tag().has_error()))
298 if (Config::conservative)
300 printf(" page fault %s error = 0x%lx\n",
301 utcb->error.snd_phase() ? "send" : "rcv",
303 kdb_ke("ipc to pager failed");
306 if (utcb->error.snd_phase()
307 && (utcb->error.error() == L4_error::Not_existent)
308 && PF::is_usermode_error(error_code)
309 && !(state() & Thread_cancel))
316 // If the pager rejects the mapping, it replies -1 in msg.w0
317 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
321 // restore previous IPC state
323 saved_utcb_fields.restore(utcb);
325 set_receiver(orig_snd_partner);
326 restore_receiver_state(orig_partner, orig_rcv_regs);
327 state_add(orig_ipc_state);
330 orig_timeout->set_again(cpu());
337 Thread::check_sender(Thread *sender, bool timeout)
339 if (EXPECT_FALSE(is_invalid()))
341 sender->access_utcb()->error = L4_error::Not_existent;
345 if (EXPECT_FALSE(!sender_ok(sender)))
349 sender->access_utcb()->error = L4_error::Timeout;
353 sender->set_receiver(this);
354 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
355 vcpu_set_irq_pending();
363 PRIVATE inline NEEDS["timer.h"]
364 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
367 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
368 != (Thread_receiving | Thread_ipc_in_progress)))
373 if (EXPECT_FALSE(t.is_finite() && !_timeout))
376 state_del_dirty(Thread_ready);
378 Unsigned64 tval = t.microsecs(Timer::system_clock(), utcb);
380 if (EXPECT_TRUE((tval != 0)))
382 set_timeout(&timeout);
383 timeout.set(tval, cpu());
385 else // timeout already hit
386 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
391 if (EXPECT_TRUE(t.is_never()))
392 state_del_dirty(Thread_ready);
394 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
398 switch_sched(sched());
402 if (EXPECT_FALSE((long)_timeout))
408 assert_kdb (state() & Thread_ready);
415 * @pre cpu_lock must be held
417 PRIVATE inline NEEDS["logdefs.h"]
419 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
421 assert_kdb(cpu_lock.test());
423 switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
428 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
438 Thread::wake_receiver(Thread *receiver)
440 // If neither IPC partner is delayed, just update the receiver's state
441 if (1) // rt:EXPECT_TRUE(!((state() | receiver->state()) & Thread_delayed_ipc)))
443 receiver->state_change_dirty(~(Thread_ipc_receiving_mask
444 | Thread_ipc_in_progress),
449 // Critical section if either IPC partner is delayed until its next period
450 assert_kdb (cpu_lock.test());
452 // Sender has no receive phase and deadline timeout already hit
453 if ( (state() & (Thread_receiving |
454 Thread_delayed_deadline | Thread_delayed_ipc)) ==
457 state_change_dirty (~Thread_delayed_ipc, 0);
458 switch_sched (sched_context()->next());
459 _deadline_timeout.set (Timer::system_clock() + period(), cpu());
462 // Receiver's deadline timeout already hit
463 if ( (receiver->state() & (Thread_delayed_deadline |
464 Thread_delayed_ipc) ==
467 receiver->state_change_dirty (~Thread_delayed_ipc, 0);
468 receiver->switch_sched (receiver->sched_context()->next());
469 receiver->_deadline_timeout.set (Timer::system_clock() +
470 receiver->period(), receiver->cpu());
473 receiver->state_change_dirty(~(Thread_ipc_mask | Thread_delayed_ipc), Thread_ready);
478 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
480 access_utcb()->error = e;
481 rcv->access_utcb()->error = L4_error(e, L4_error::Rcv);
484 PRIVATE inline NEEDS [Thread::do_send_wait]
486 Thread::do_ipc_send(L4_msg_tag const &tag, Thread *partner,
488 L4_timeout_pair t, Syscall_frame *regs,
489 bool *dont_switch, unsigned char rights)
493 state_add_dirty(Thread_send_in_progress);
494 set_ipc_send_rights(rights);
496 if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
497 ((result = handshake_receiver(partner, t.snd)) == Failed
498 && partner->drq_pending()))
501 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
505 if (EXPECT_FALSE(result & Queued))
508 if (result & Receive_in_progress)
509 snd_t = L4_timeout::Never;
513 // set _snd_regs, we may become a remote IPC while waiting
516 if (!do_send_wait(partner, snd_t))
519 else if (EXPECT_FALSE(result == Failed))
521 state_del_dirty(Thread_ipc_sending_mask
522 | Thread_transfer_in_progress
523 | Thread_ipc_in_progress);
527 // Case 1: The handshake told us it was Ok
528 // Case 2: The send_wait told us it had finished w/o error
530 // in The X-CPU IPC case the IPC has been already finished here
531 if (EXPECT_FALSE(partner->cpu() != current_cpu()
532 || (!(state() & Thread_send_in_progress))))
534 state_del_dirty(Thread_ipc_sending_mask | Thread_transfer_in_progress);
538 assert_kdb (!(state() & Thread_polling));
540 partner->ipc_init(this);
542 // mmh, we can reset the receivers timeout
543 // ping pong with timeouts will profit from it, because
544 // it will require much less sorting overhead
545 // if we dont reset the timeout, the possibility is very high
546 // that the receiver timeout is in the timeout queue
547 partner->reset_timeout();
549 bool success = transfer_msg(tag, partner, regs, rights);
551 if (success && this->partner() == partner)
552 partner->set_caller(this, rights);
554 if (!tag.do_switch() || partner->state() & Thread_suspended)
557 // partner locked, i.e. lazy locking (not locked) or we own the lock
558 assert_kdb (!partner->thread_lock()->test()
559 || partner->thread_lock()->lock_owner() == this);
562 if (EXPECT_FALSE(!success || !have_receive))
564 // make the ipc partner ready if still engaged in ipc with us
565 if (partner->in_ipc(this))
567 wake_receiver(partner);
569 partner->thread_lock()->set_switch_hint(SWITCH_ACTIVATE_LOCKEE);
572 partner->thread_lock()->clear_dirty();
574 state_del(Thread_ipc_sending_mask
575 | Thread_transfer_in_progress
576 | Thread_ipc_in_progress);
581 partner->thread_lock()->clear_dirty_dont_switch();
582 // possible preemption point
584 if (EXPECT_TRUE(!partner->in_ipc(this)))
586 state_del(Thread_ipc_sending_mask
587 | Thread_transfer_in_progress
588 | Thread_ipc_in_progress);
589 sender_dequeue(partner->sender_list());
590 partner->vcpu_update_state();
591 access_utcb()->error = L4_error::Aborted;
595 wake_receiver(partner);
596 prepare_receive_dirty_2();
600 PRIVATE inline NOEXPORT
602 Thread::handle_abnormal_termination(Syscall_frame *regs)
604 if (EXPECT_TRUE (!(state() & Thread_ipc_receiving_mask)))
607 Utcb *utcb = access_utcb();
608 // the IPC has not been finished. could be timeout or cancel
609 // XXX should only modify the error-code part of the status code
611 if (EXPECT_FALSE((state() & Thread_busy)))
612 regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
613 else if (EXPECT_FALSE(state() & Thread_cancel))
615 // we've presumably been reset!
616 if (state() & Thread_transfer_in_progress)
617 regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
619 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
622 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
627 * Send an IPC message.
628 * Block until we can send the message or the timeout hits.
629 * @param partner the receiver of our message
630 * @param t a timeout specifier
631 * @param regs sender's IPC registers
632 * @pre cpu_lock must be held
633 * @return sender's IPC error code
637 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
638 bool have_receive, Sender *sender,
639 L4_timeout_pair t, Syscall_frame *regs,
640 unsigned char rights)
642 assert_kdb (cpu_lock.test());
643 assert_kdb (this == current());
645 bool dont_switch = false;
646 //LOG_MSG_3VAL(this, "ipc", (Mword) partner, (Mword) sender, cpu());
647 assert_kdb (!(state() & Thread_ipc_sending_mask));
649 prepare_receive_dirty_1(sender, have_receive ? regs : 0);
653 assert_kdb(!in_sender_list());
654 bool ok = do_ipc_send(tag, partner, have_receive, t, regs, &dont_switch, rights);
655 if (EXPECT_FALSE(!ok))
657 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
658 assert_kdb (!in_sender_list());
664 regs->tag(L4_msg_tag(0,0,0,0));
665 assert_kdb (!in_sender_list());
671 assert_kdb (have_receive);
672 prepare_receive_dirty_2();
675 assert_kdb (!in_sender_list());
676 assert_kdb (!(state() & Thread_ipc_sending_mask));
679 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
680 == (Thread_receiving | Thread_ipc_in_progress)) )
684 if (EXPECT_FALSE((long)sender_list()->head()))
686 if (sender) // closed wait
688 if (sender->in_sender_list()
689 && this == sender->receiver()
690 && sender->ipc_receiver_ready(this))
696 next = Sender::cast(sender_list()->head());
698 assert_kdb (next->in_sender_list());
700 if (!next->ipc_receiver_ready(this))
702 next->sender_dequeue_head(sender_list());
704 Proc::preemption_point();
710 assert_kdb (cpu_lock.test());
712 // XXX: I'm not sure that EXPECT_FALSE ist the right here
713 if (EXPECT_FALSE((long) next))
716 assert_kdb (!(state() & Thread_ipc_in_progress)
717 || !(state() & Thread_ready));
719 // maybe switch_exec should return an bool to avoid testing the
723 assert_kdb (partner);
724 assert_kdb (partner->sched());
726 /* dont_switch == true for xCPU */
727 if (EXPECT_TRUE(have_send && !dont_switch
728 && (partner->state() & Thread_ready)
729 && (next->sender_prio() <= partner->sched()->prio())))
730 switch_exec_schedule_locked(partner, Context::Not_Helping);
733 if (have_send && partner->cpu() == cpu()
734 && (partner->state() & Thread_ready))
735 partner->sched()->deblock(cpu());
739 assert_kdb (state() & Thread_ready);
743 if (EXPECT_TRUE(have_send && partner->cpu() == cpu()
744 && (partner->state() & Thread_ready)))
749 switch_exec_locked(partner, Context::Not_Helping);
750 // We have to retry if there are possible senders in our
751 // sender queue, because a sender from a remote CPU may
752 // have been enqueued in handle_drq, in switch_exec_locked
756 partner->sched()->deblock(cpu());
759 goto_sleep(t.rcv, sender, access_utcb());
761 // LOG_MSG_3VAL(this, "ipcrw", Mword(sender), state(), 0);
765 assert_kdb (!(state() & Thread_ipc_sending_mask));
767 // if the receive operation was canceled/finished before we
768 // switched to the old receiver, finish the send
769 if (have_send && partner->cpu() == cpu()
770 && (partner->state() & Thread_ready))
772 if (!dont_switch && EXPECT_TRUE(partner != this))
773 switch_exec_schedule_locked(partner, Context::Not_Helping);
775 partner->sched()->deblock(cpu());
778 // fast out if ipc is already finished
779 if (EXPECT_TRUE((state() & ~(Thread_transfer_in_progress | Thread_fpu_owner|Thread_cancel)) == Thread_ready))
781 state_del(Thread_transfer_in_progress);
784 assert_kdb (!(state() & (Thread_ipc_sending_mask)));
786 // abnormal termination?
787 handle_abnormal_termination(regs);
789 state_del(Thread_ipc_mask);
793 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to,
794 Thread::unlock_receiver]
796 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
797 Syscall_frame *sender_regs, unsigned char rights)
799 Syscall_frame* dst_regs = receiver->rcv_regs();
801 bool success = copy_utcb_to(tag, receiver, rights);
802 tag.set_error(!success);
804 dst_regs->from(sender_regs->from_spec());
809 /** Unlock the Receiver locked with ipc_try_lock().
810 If the sender goes to wait for a registered message enable LIPC.
811 @param receiver receiver to unlock
812 @param sender_regs dummy
814 PRIVATE inline NEEDS ["entry_frame.h"]
816 Thread::unlock_receiver(Receiver *receiver, const Syscall_frame*)
818 receiver->ipc_unlock();
823 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
825 buf_desc = u->buf_desc;
826 buf[0] = u->buffers[0];
827 buf[1] = u->buffers[1];
832 Buf_utcb_saver::restore(Utcb *u)
834 u->buf_desc = buf_desc;
835 u->buffers[0] = buf[0];
836 u->buffers[1] = buf[1];
840 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
842 msg[0] = u->values[0];
843 msg[1] = u->values[1];
848 Pf_msg_utcb_saver::restore(Utcb *u)
850 Buf_utcb_saver::restore(u);
851 u->values[0] = msg[0];
852 u->values[1] = msg[1];
857 * \pre must run with local IRQs disabled (CPU lock held)
858 * to ensure that handler does not dissapear meanwhile.
862 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
865 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
869 void *old_utcb_handler = _utcb_handler;
872 // fill registers for IPC
873 Utcb *utcb = access_utcb();
874 Buf_utcb_saver saved_state(utcb);
876 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
877 utcb->buffers[0] = L4_msg_item::map(0).raw();
878 utcb->buffers[1] = L4_fpage::all_spaces().raw();
881 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
882 L4_msg_tag::Label_exception);
887 r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
889 handler->invoke(r.ref(), rights, &r, utcb);
892 saved_state.restore(utcb);
894 if (EXPECT_FALSE(r.tag().has_error()))
896 if (Config::conservative)
898 printf(" exception fault %s error = 0x%lx\n",
899 utcb->error.snd_phase() ? "send" : "rcv",
901 kdb_ke("ipc to pager failed");
904 state_del(Thread_in_exception);
906 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
907 state_add(Thread_dis_alien);
909 // restore original utcb_handler
910 _utcb_handler = old_utcb_handler;
912 // FIXME: handle not existing pager properly
913 // for now, just ignore any errors
917 /* return 1 if exception could be handled
918 * return 0 if not for send_exception and halt thread
920 PUBLIC inline NEEDS["task.h", "trap_state.h",
921 Thread::fast_return_to_user,
922 Thread::save_fpu_state_to_utcb]
924 Thread::send_exception(Trap_state *ts)
926 assert(cpu_lock.test());
928 Vcpu_state *vcpu = access_vcpu();
930 if (vcpu_exceptions_enabled(vcpu))
932 // no not reflect debug exceptions to the VCPU but handle them in
934 if (EXPECT_FALSE(ts->is_debug_exception()
935 && !(vcpu->state & Vcpu_state::F_debug_exc)))
938 if (_exc_cont.valid())
940 vcpu_enter_kernel_mode(vcpu);
942 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
943 Vcpu_log *l = tbe->payload<Vcpu_log>();
945 l->state = vcpu->_saved_state;
948 l->trap = ts->trapno();
949 l->err = ts->error();
950 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
952 memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
953 save_fpu_state_to_utcb(ts, access_utcb());
954 fast_return_to_user(vcpu->_entry_ip, vcpu->_sp);
957 // local IRQs must be disabled because we dereference a Thread_ptr
958 if (EXPECT_FALSE(_exc_handler.is_kernel()))
961 if (!send_exception_arch(ts))
962 return 0; // do not send exception
964 unsigned char rights = 0;
965 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
967 if (EXPECT_FALSE(!pager))
969 /* no pager (anymore), just ignore the exception, return success */
970 LOG_TRACE("Exception invalid handler", "exc", this,
971 __fmt_exception_invalid_handler,
972 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
973 l->cap_idx = _exc_handler.raw());
974 if (EXPECT_FALSE(space() == sigma0_task))
976 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
980 pager = this; // block on ourselves
983 state_change(~Thread_cancel, Thread_in_exception);
985 return exception(pager, ts, rights);
990 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
991 L4_fpage sfp, Mword *rcv_word, Thread* snd,
994 if (buf->b.is_rcv_id())
996 if (snd->space() == rcv->space())
999 rcv_word[-1] = sfp.raw();
1004 unsigned char rights = 0;
1005 Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
1006 Kobject_iface *o = cap.obj();
1007 rights = cap.rights();
1008 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
1011 rcv_word[-1] = o->obj_id() | Mword(rights);
1022 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
1023 Thread *rcv, Utcb *rcv_utcb,
1024 unsigned char rights)
1026 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
1027 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
1028 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
1029 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
1030 L4_snd_item_iter snd_item(snd_utcb, tag.words());
1031 register int items = tag.items();
1032 Mword *rcv_word = rcv_utcb->values + tag.words();
1034 // XXX: damn X-CPU state modification
1035 // snd->prepare_long_ipc(rcv);
1038 for (;items > 0 && snd_item.more();)
1040 if (EXPECT_FALSE(!snd_item.next()))
1042 snd->set_ipc_error(L4_error::Overflow, rcv);
1046 L4_snd_item_iter::Item const *const item = snd_item.get();
1048 if (item->b.is_void())
1049 { // XXX: not sure if void fpages are needed
1050 // skip send item and current rcv_buffer
1055 L4_buf_iter *buf_iter = 0;
1057 switch (item->b.type())
1059 case L4_msg_item::Map:
1060 switch (L4_fpage(item->d).type())
1062 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
1063 case L4_fpage::Io: buf_iter = &io_buffer; break;
1064 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
1072 if (EXPECT_FALSE(!buf_iter))
1074 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
1075 snd->set_ipc_error(L4_error::Overflow, rcv);
1079 L4_buf_iter::Item const *const buf = buf_iter->get();
1081 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
1083 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
1084 snd->set_ipc_error(L4_error::Overflow, rcv);
1089 assert_kdb (item->b.type() == L4_msg_item::Map);
1090 L4_fpage sfp(item->d);
1091 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
1095 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
1097 // we need to do a real mapping¿
1099 // diminish when sending via restricted ipc gates
1100 if (sfp.type() == L4_fpage::Obj)
1101 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
1103 L4_error err = fpage_map(snd->space(), sfp,
1104 rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
1106 if (EXPECT_FALSE(!err.ok()))
1108 snd->set_ipc_error(err, rcv);
1116 if (!item->b.compund())
1120 if (EXPECT_FALSE(items))
1122 snd->set_ipc_error(L4_error::Overflow, rcv);
1131 * \pre Runs on the sender CPU
1133 PRIVATE inline NEEDS[Thread::do_remote_abort_send]
1135 Thread::abort_send(L4_error const &e, Thread *partner)
1137 state_del_dirty(Thread_send_in_progress | Thread_polling | Thread_ipc_in_progress
1138 | Thread_transfer_in_progress);
1140 if (_timeout && _timeout->is_set())
1145 if (partner->cpu() == current_cpu())
1147 if (in_sender_list())
1149 sender_dequeue(partner->sender_list());
1150 partner->vcpu_update_state();
1153 access_utcb()->error = e;
1157 return do_remote_abort_send(e, partner);
1163 * \pre Runs on the sender CPU
1167 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1169 state_add_dirty(Thread_polling);
1171 IPC_timeout timeout;
1173 if (EXPECT_FALSE(snd_t.is_finite()))
1175 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), access_utcb());
1176 // Zero timeout or timeout expired already -- give up
1178 return abort_send(L4_error::Timeout, partner);
1180 set_timeout(&timeout);
1181 timeout.set(tval, cpu());
1186 if ((state() & (Thread_ipc_in_progress | Thread_polling
1187 | Thread_cancel | Thread_transfer_in_progress))
1188 == (Thread_ipc_in_progress | Thread_polling))
1190 state_del_dirty(Thread_ready);
1194 // ipc handshake bit is set
1195 if ((state() & (Thread_transfer_in_progress | Thread_receiving
1196 | Thread_ipc_in_progress))
1197 != Thread_ipc_in_progress)
1200 if (EXPECT_FALSE(state() & Thread_cancel))
1201 return abort_send(L4_error::Canceled, partner);
1203 // FIXME: existence check
1205 if (EXPECT_FALSE(0 && partner->is_invalid()))
1207 state_del_dirty(Thread_send_in_progress | Thread_polling
1208 | Thread_ipc_in_progress | Thread_transfer_in_progress);
1210 if (_timeout && _timeout->is_set())
1215 access_utcb()->error = L4_error::Not_existent;
1220 // Make sure we're really still in IPC
1221 assert_kdb (state() & Thread_ipc_in_progress);
1223 state_add_dirty(Thread_polling);
1226 state_del_dirty(Thread_polling);
1228 if (EXPECT_FALSE((state() & (Thread_send_in_progress | Thread_cancel))
1229 == (Thread_send_in_progress | Thread_cancel)))
1230 return abort_send(L4_error::Canceled, partner);
1232 // reset is only an simple dequeing operation from an double
1233 // linked list, so we dont need an extra preemption point for this
1235 if (EXPECT_FALSE(timeout.has_hit() && (state() & (Thread_send_in_progress
1236 | Thread_ipc_in_progress)) ==
1237 Thread_send_in_progress))
1238 return abort_send(L4_error::Timeout, partner);
1247 //---------------------------------------------------------------------
1248 IMPLEMENTATION [!mp]:
1252 Thread::set_ipc_send_rights(unsigned char)
1255 PRIVATE inline NEEDS ["l4_types.h"]
1257 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1258 bool, L4_timeout, Syscall_frame *, unsigned char)
1260 kdb_ke("Remote IPC in UP kernel");
1266 Thread::ipc_remote_receiver_ready(Receiver *)
1267 { kdb_ke("Remote IPC in UP kernel"); return false; }
1272 Thread::do_remote_abort_send(L4_error const &, Thread *)
1273 { kdb_ke("Remote abort send on UP kernel"); return false; }
1275 //---------------------------------------------------------------------
1278 EXTENSION class Thread
1281 unsigned char _ipc_send_rights;
1284 struct Ipc_remote_request;
1286 struct Ipc_remote_request
1290 Syscall_frame *regs;
1291 unsigned char rights;
1298 struct Ready_queue_request;
1300 struct Ready_queue_request
1306 enum Result { Done, Wrong_cpu, Not_existent };
1310 //---------------------------------------------------------------------
1311 IMPLEMENTATION [mp]:
1316 Thread::set_ipc_send_rights(unsigned char c)
1318 _ipc_send_rights = c;
1323 Thread::schedule_if(bool s)
1325 if (!s || current()->schedule_in_progress())
1328 current()->schedule();
1331 PRIVATE inline NEEDS[Thread::schedule_if]
1333 Thread::do_remote_abort_send(L4_error const &e, Thread *partner)
1335 Ipc_remote_request rq;
1336 rq.partner = partner;
1337 partner->drq(handle_remote_abort_send, &rq);
1338 if (rq.tag.has_error())
1339 access_utcb()->error = e;
1340 schedule_if(handle_drq());
1341 return !rq.tag.has_error();
1346 * Runs on the receiver CPU in the context of recv.
1347 * The 'this' pointer is the sender.
1349 PRIVATE inline NEEDS[Thread::schedule_if]
1351 Thread::ipc_remote_receiver_ready(Receiver *recv)
1353 //printf(" remote ready: %x.%x \n", id().task(), id().lthread());
1354 //LOG_MSG_3VAL(this, "recvr", Mword(recv), 0, 0);
1355 assert_kdb (recv->cpu() == current_cpu());
1357 recv->ipc_init(this);
1359 Syscall_frame *regs = _snd_regs;
1361 recv->vcpu_disable_irqs();
1362 //printf(" transfer to %p\n", recv);
1363 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights);
1364 //printf(" done\n");
1365 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
1366 if (success && partner() == nonull_static_cast<Thread*>(recv))
1367 nonull_static_cast<Thread*>(recv)->set_caller(this, _ipc_send_rights);
1370 recv->state_del_dirty(Thread_ipc_receiving_mask | Thread_ipc_in_progress);
1372 // dequeue sender from receiver's sending queue
1373 sender_dequeue(recv->sender_list());
1374 recv->vcpu_update_state();
1376 Ready_queue_request rq;
1378 rq.state_add = Thread_transfer_in_progress;
1379 if (Receiver::prepared())
1380 { // same as in Receiver::prepare_receive_dirty_2
1381 rq.state_del = Thread_ipc_sending_mask;
1382 rq.state_add |= Thread_receiving;
1387 drq(handle_remote_ready_enqueue, &rq);
1388 schedule_if(current()->handle_drq());
1389 //printf(" wakeup sender done\n");
1394 PRIVATE inline NOEXPORT
1396 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1399 //LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, 0);
1401 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1402 printf("CPU[%u]: remote IPC send ...\n"
1403 " partner=%p [%u]\n"
1404 " sender =%p [%u] regs=%p\n"
1407 rq->partner, rq->partner->cpu(),
1414 switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1417 rq->result = Failed;
1420 rq->result = Queued;
1426 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1427 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1428 // thread code. However, this induces a overhead of two extra IPIs.
1429 if (rq->tag.items())
1431 set_receiver(rq->partner);
1432 sender_enqueue(rq->partner->sender_list(), sched_context()->prio());
1433 rq->partner->vcpu_set_irq_pending();
1435 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1436 rq->result = Queued | Receive_in_progress;
1437 rq->partner->state_add_dirty(Thread_ready);
1438 rq->partner->sched()->deblock(current_cpu());
1441 rq->partner->vcpu_disable_irqs();
1442 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1443 rq->result = success ? Ok : Failed;
1445 if (success && partner() == rq->partner)
1446 rq->partner->set_caller(this, _ipc_send_rights);
1448 rq->partner->state_change_dirty(~(Thread_ipc_receiving_mask | Thread_ipc_in_progress), Thread_ready);
1449 // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1455 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1457 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1458 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1459 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1460 return r ? Drq::Need_resched : 0;
1465 Thread::handle_remote_abort_send(Drq *src, Context *, void *_rq)
1467 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1468 Thread *sender = nonull_static_cast<Thread*>(src->context());
1469 if (sender->in_sender_list())
1471 // really cancled IPC
1472 rq->tag.set_error(true);
1473 sender->sender_dequeue(rq->partner->sender_list());
1474 rq->partner->vcpu_update_state();
1487 Thread::handle_remote_ready_enqueue(Drq *, Context *self, void *_rq)
1489 Ready_queue_request *rq = (Ready_queue_request*)_rq;
1491 //LOG_MSG_3VAL(current(), "rre", rq->state_add, rq->state_del, c->state());
1493 c->state_add_dirty(rq->state_add);
1494 c->state_del_dirty(rq->state_del);
1495 rq->result = Ready_queue_request::Done;
1497 if (EXPECT_FALSE(c->state() & Thread_ready))
1498 return Drq::Need_resched;
1500 c->state_add_dirty(Thread_ready);
1501 // hm, should be done by our lazy queueing: c->ready_enqueue();
1502 return Drq::Need_resched;
1509 * \pre Runs on the sender CPU
1511 PRIVATE //inline NEEDS ["mp_request.h"]
1513 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1515 L4_timeout snd_t, Syscall_frame *regs,
1516 unsigned char rights)
1518 // Flag that there must be no switch in the receive path.
1519 // This flag also prevents the receive path from accessing
1520 // the thread state of a remote sender.
1521 Ipc_remote_request rq;
1523 rq.have_rcv = have_receive;
1524 rq.partner = partner;
1525 rq.timeout = !snd_t.is_zero();
1530 set_receiver(partner);
1532 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
1534 partner->drq(handle_remote_ipc_send, &rq,
1535 remote_prepare_receive);
1543 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1545 Context *c = src->context();
1546 Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1547 //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1549 if (EXPECT_FALSE(rq->result & Queued))
1552 c->state_del(Thread_send_in_progress);
1553 if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1556 Thread *t = nonull_static_cast<Thread*>(c);
1557 t->prepare_receive_dirty_2();
1561 //---------------------------------------------------------------------------
1562 IMPLEMENTATION [debug]:
1566 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1568 Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1569 return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1574 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1576 Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1577 return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);