3 #include "l4_buf_iter.h"
18 struct Log_exc_invalid
23 enum Check_sender_result
27 Receive_in_progress = 4,
31 Syscall_frame *_snd_regs;
37 Buf_utcb_saver(Utcb const *u);
38 void restore(Utcb *u);
45 * Save critical contents of UTCB during nested IPC.
47 class Pf_msg_utcb_saver : public Buf_utcb_saver
50 Pf_msg_utcb_saver(Utcb const *u);
51 void restore(Utcb *u);
56 // ------------------------------------------------------------------------
61 EXTENSION class Thread
64 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
65 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
68 // ------------------------------------------------------------------------
71 // IPC setup, and handling of ``short IPC'' and page-fault IPC
73 // IDEAS for enhancing this implementation:
75 // Volkmar has suggested a possible optimization for
76 // short-flexpage-to-long-message-buffer transfers: Currently, we have
77 // to resort to long IPC in that case because the message buffer might
78 // contain a receive-flexpage option. An easy optimization would be
79 // to cache the receive-flexpage option in the TCB for that case.
80 // This would save us the long-IPC setup because we wouldn't have to
81 // touch the receiver's user memory in that case. Volkmar argues that
82 // cases like that are quite common -- for example, imagine a pager
83 // which at the same time is also a server for ``normal'' requests.
85 // The handling of cancel and timeout conditions could be improved as
86 // follows: Cancel and Timeout should not reset the ipc_in_progress
87 // flag. Instead, they should just set and/or reset a flag of their
88 // own that is checked every time an (IPC) system call wants to go to
89 // sleep. That would mean that IPCs that do not block are not
90 // cancelled or aborted.
93 #include <cstdlib> // panic()
96 #include "l4_msg_item.h"
100 #include "ipc_timeout.h"
101 #include "lock_guard.h"
103 #include "map_util.h"
104 #include "processor.h"
111 Thread::ipc_receiver_aborted()
113 assert_kdb (receiver());
115 sender_dequeue(receiver()->sender_list());
116 receiver()->vcpu_update_state();
119 if (!(state() & Thread_ipc_in_progress))
122 state_add_dirty(Thread_ready);
123 sched()->deblock(cpu());
126 /** Receiver-ready callback.
127 Receivers make sure to call this function on waiting senders when
128 they get ready to receive a message from that sender. Senders need
129 to overwrite this interface.
131 Class Thread's implementation wakes up the sender if it is still in
136 Thread::ipc_receiver_ready(Receiver *recv)
138 if (cpu() == current_cpu())
139 return ipc_local_receiver_ready(recv);
141 return ipc_remote_receiver_ready(recv);
146 Thread::modify_label(Mword const *todo, int cnt)
148 assert_kdb (_snd_regs);
149 Mword l = _snd_regs->from_spec();
150 for (int i = 0; i < cnt*4; i += 4)
152 Mword const test_mask = todo[i];
153 Mword const test = todo[i+1];
154 if ((l & test_mask) == test)
156 Mword const del_mask = todo[i+2];
157 Mword const add_mask = todo[i+3];
159 l = (l & ~del_mask) | add_mask;
168 Thread::ipc_local_receiver_ready(Receiver *recv)
170 assert_kdb (receiver());
171 assert_kdb (receiver() == recv);
172 assert_kdb (receiver() == current());
174 if (!(state() & Thread_ipc_in_progress))
177 if (!recv->sender_ok(this))
180 recv->ipc_init(this);
182 state_add_dirty(Thread_ready | Thread_transfer_in_progress);
184 sched()->deblock(cpu());
185 sender_dequeue(recv->sender_list());
186 recv->vcpu_update_state();
188 // put receiver into sleep
189 receiver()->state_del_dirty(Thread_ready);
196 Thread::snd_regs(Syscall_frame *r)
200 /** Page fault handler.
201 This handler suspends any ongoing IPC, then sets up page-fault IPC.
202 Finally, the ongoing IPC's state (if any) is restored.
203 @param pfa page-fault virtual address
204 @param error_code page-fault error code.
208 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
209 Address pfa, Mword error_code,
210 L4_msg_tag::Protocol protocol)
213 // do not handle user space page faults from kernel mode if we're
214 // already handling a request
215 if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
216 && thread_lock()->test() == Thread_lock::Locked))
218 kdb_ke("Fiasco BUG: page fault, under lock");
219 panic("page fault in locked operation");
223 if (EXPECT_FALSE((state() & Thread_alien)
224 && !(state() & Thread_ipc_in_progress)))
227 Lock_guard<Cpu_lock> guard(&cpu_lock);
229 unsigned char rights;
230 Kobject_iface *pager = _pager.ptr(space(), &rights);
234 WARN ("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
235 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
236 current_cpu(), dbg_id(), pfa, error_code,
237 _pager.raw(), regs()->ip());
240 LOG_TRACE("Page fault invalid pager", "pf", this,
241 __fmt_page_fault_invalid_pager,
242 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
243 l->cap_idx = _pager.raw();
247 pager = this; // block on ourselves
250 // set up a register block used as an IPC parameter block for the
253 Utcb *utcb = this->utcb().access(true);
255 // save the UTCB fields affected by PF IPC
256 Pf_msg_utcb_saver saved_utcb_fields(utcb);
259 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
260 utcb->buffers[0] = L4_msg_item::map(0).raw();
261 utcb->buffers[1] = L4_fpage::all_spaces().raw();
263 utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
264 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
266 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
268 // This might be a page fault in midst of a long-message IPC operation.
269 // Save the current IPC state and restore it later.
270 Sender *orig_partner;
271 Syscall_frame *orig_rcv_regs;
272 save_receiver_state (&orig_partner, &orig_rcv_regs);
274 Receiver *orig_snd_partner = receiver();
275 Timeout *orig_timeout = _timeout;
277 orig_timeout->reset();
279 unsigned orig_ipc_state = state() & Thread_ipc_mask;
281 state_del(orig_ipc_state);
283 timeout = utcb->xfer; // in long IPC -- use pagefault timeout
285 L4_msg_tag tag(2, 0, 0, protocol);
290 r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
291 pager->invoke(r.ref(), rights, &r, utcb);
296 if (EXPECT_FALSE(r.tag().has_error()))
298 if (Config::conservative)
300 printf(" page fault %s error = 0x%lx\n",
301 utcb->error.snd_phase() ? "send" : "rcv",
303 kdb_ke("ipc to pager failed");
306 if (utcb->error.snd_phase()
307 && (utcb->error.error() == L4_error::Not_existent)
308 && PF::is_usermode_error(error_code)
309 && !(state() & Thread_cancel))
316 // If the pager rejects the mapping, it replies -1 in msg.w0
317 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
321 // restore previous IPC state
323 saved_utcb_fields.restore(utcb);
325 set_receiver(orig_snd_partner);
326 restore_receiver_state(orig_partner, orig_rcv_regs);
327 state_add(orig_ipc_state);
330 orig_timeout->set_again(cpu());
337 Thread::check_sender(Thread *sender, bool timeout)
339 if (EXPECT_FALSE(is_invalid()))
341 sender->utcb().access()->error = L4_error::Not_existent;
345 if (EXPECT_FALSE(!sender_ok(sender)))
349 sender->utcb().access()->error = L4_error::Timeout;
353 sender->set_receiver(this);
354 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
355 vcpu_set_irq_pending();
363 PRIVATE inline NEEDS["timer.h"]
364 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
367 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
368 != (Thread_receiving | Thread_ipc_in_progress)))
373 if (EXPECT_FALSE(t.is_finite() && !_timeout))
376 state_del_dirty(Thread_ready);
378 Unsigned64 tval = t.microsecs(Timer::system_clock(), utcb);
380 if (EXPECT_TRUE((tval != 0)))
382 set_timeout(&timeout);
383 timeout.set(tval, cpu());
385 else // timeout already hit
386 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
391 if (EXPECT_TRUE(t.is_never()))
392 state_del_dirty(Thread_ready);
394 state_change_dirty(~Thread_ipc_in_progress, Thread_ready);
398 switch_sched(sched());
402 if (EXPECT_FALSE((long)_timeout))
408 assert_kdb (state() & Thread_ready);
415 * @pre cpu_lock must be held
417 PRIVATE inline NEEDS["logdefs.h"]
419 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
421 assert_kdb(cpu_lock.test());
423 switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
428 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
438 Thread::wake_receiver(Thread *receiver)
440 // If neither IPC partner is delayed, just update the receiver's state
441 if (1) // rt:EXPECT_TRUE(!((state() | receiver->state()) & Thread_delayed_ipc)))
443 receiver->state_change_dirty(~(Thread_ipc_receiving_mask
444 | Thread_ipc_in_progress),
449 // Critical section if either IPC partner is delayed until its next period
450 assert_kdb (cpu_lock.test());
452 // Sender has no receive phase and deadline timeout already hit
453 if ( (state() & (Thread_receiving |
454 Thread_delayed_deadline | Thread_delayed_ipc)) ==
457 state_change_dirty (~Thread_delayed_ipc, 0);
458 switch_sched (sched_context()->next());
459 _deadline_timeout.set (Timer::system_clock() + period(), cpu());
462 // Receiver's deadline timeout already hit
463 if ( (receiver->state() & (Thread_delayed_deadline |
464 Thread_delayed_ipc) ==
467 receiver->state_change_dirty (~Thread_delayed_ipc, 0);
468 receiver->switch_sched (receiver->sched_context()->next());
469 receiver->_deadline_timeout.set (Timer::system_clock() +
470 receiver->period(), receiver->cpu());
473 receiver->state_change_dirty(~(Thread_ipc_mask | Thread_delayed_ipc), Thread_ready);
478 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
480 utcb().access()->error = e;
481 rcv->utcb().access()->error = L4_error(e, L4_error::Rcv);
484 PRIVATE inline NEEDS [Thread::do_send_wait]
486 Thread::do_ipc_send(L4_msg_tag const &tag, Thread *partner,
488 L4_timeout_pair t, Syscall_frame *regs,
489 bool *do_switch, unsigned char rights)
493 state_add_dirty(Thread_send_in_progress);
494 set_ipc_send_rights(rights);
496 if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
497 ((result = handshake_receiver(partner, t.snd)) == Failed
498 && partner->drq_pending()))
501 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
505 if (EXPECT_FALSE(result & Queued))
508 if (result & Receive_in_progress)
509 snd_t = L4_timeout::Never;
513 // set _snd_regs, we may become a remote IPC while waiting
516 if (!do_send_wait(partner, snd_t))
519 else if (EXPECT_FALSE(result == Failed))
521 state_del_dirty(Thread_ipc_sending_mask
522 | Thread_transfer_in_progress
523 | Thread_ipc_in_progress);
527 // Case 1: The handshake told us it was Ok
528 // Case 2: The send_wait told us it had finished w/o error
530 // in The X-CPU IPC case the IPC has been already finished here
531 if (EXPECT_FALSE(partner->cpu() != current_cpu()
532 || (!(state() & Thread_send_in_progress))))
534 state_del_dirty(Thread_ipc_sending_mask | Thread_transfer_in_progress);
538 assert_kdb (!(state() & Thread_polling));
540 partner->ipc_init(this);
542 // mmh, we can reset the receivers timeout
543 // ping pong with timeouts will profit from it, because
544 // it will require much less sorting overhead
545 // if we dont reset the timeout, the possibility is very high
546 // that the receiver timeout is in the timeout queue
547 partner->reset_timeout();
549 bool success = transfer_msg(tag, partner, regs, rights);
551 if (success && this->partner() == partner)
552 partner->set_caller(this, rights);
554 if (!tag.do_switch() || partner->state() & Thread_suspended)
557 if (EXPECT_FALSE(!success || !have_receive))
559 bool do_direct_switch = false;
560 // make the ipc partner ready if still engaged in ipc with us
561 if (partner->in_ipc(this))
563 wake_receiver(partner);
564 do_direct_switch = *do_switch;
567 if (do_direct_switch)
568 check (!switch_exec_locked(partner, Context::Not_Helping));
569 else if (partner->sched()->deblock(current_cpu(), sched(), true))
570 switch_to_locked(partner);
572 state_del(Thread_ipc_sending_mask
573 | Thread_transfer_in_progress
574 | Thread_ipc_in_progress);
579 // possible preemption point
581 if (EXPECT_TRUE(!partner->in_ipc(this)))
583 state_del(Thread_ipc_sending_mask
584 | Thread_transfer_in_progress
585 | Thread_ipc_in_progress);
586 sender_dequeue(partner->sender_list());
587 partner->vcpu_update_state();
588 utcb().access()->error = L4_error::Aborted;
592 wake_receiver(partner);
593 prepare_receive_dirty_2();
597 PRIVATE inline NOEXPORT
599 Thread::handle_abnormal_termination(Syscall_frame *regs)
601 if (EXPECT_TRUE (!(state() & Thread_ipc_receiving_mask)))
604 Utcb *utcb = this->utcb().access(true);
605 // the IPC has not been finished. could be timeout or cancel
606 // XXX should only modify the error-code part of the status code
608 if (EXPECT_FALSE(state() & Thread_cancel))
610 // we've presumably been reset!
611 if (state() & Thread_transfer_in_progress)
612 regs->tag(commit_error(utcb, L4_error::R_aborted, regs->tag()));
614 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
617 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
622 * Send an IPC message.
623 * Block until we can send the message or the timeout hits.
624 * @param partner the receiver of our message
625 * @param t a timeout specifier
626 * @param regs sender's IPC registers
627 * @pre cpu_lock must be held
628 * @return sender's IPC error code
632 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
633 bool have_receive, Sender *sender,
634 L4_timeout_pair t, Syscall_frame *regs,
635 unsigned char rights)
637 assert_kdb (cpu_lock.test());
638 assert_kdb (this == current());
640 bool do_switch = true;
641 //LOG_MSG_3VAL(this, "ipc", (Mword) partner, (Mword) sender, cpu());
642 assert_kdb (!(state() & Thread_ipc_sending_mask));
644 prepare_receive_dirty_1(sender, have_receive ? regs : 0);
648 assert_kdb(!in_sender_list());
649 bool ok = do_ipc_send(tag, partner, have_receive, t, regs, &do_switch, rights);
650 if (EXPECT_FALSE(!ok))
652 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
653 assert_kdb (!in_sender_list());
659 regs->tag(L4_msg_tag(0,0,0,0));
660 assert_kdb (!in_sender_list());
666 assert_kdb (have_receive);
667 prepare_receive_dirty_2();
670 assert_kdb (!in_sender_list());
671 assert_kdb (!(state() & Thread_ipc_sending_mask));
674 ((state() & (Thread_receiving | Thread_ipc_in_progress | Thread_cancel))
675 == (Thread_receiving | Thread_ipc_in_progress)) )
679 if (EXPECT_FALSE((long)sender_list()->head()))
681 if (sender) // closed wait
683 if (sender->in_sender_list()
684 && this == sender->receiver()
685 && sender->ipc_receiver_ready(this))
691 next = Sender::cast(sender_list()->head());
693 assert_kdb (next->in_sender_list());
695 if (!next->ipc_receiver_ready(this))
697 next->sender_dequeue_head(sender_list());
699 Proc::preemption_point();
705 assert_kdb (cpu_lock.test());
707 // XXX: I'm not sure that EXPECT_FALSE ist the right here
708 if (EXPECT_FALSE((long) next))
711 assert_kdb (!(state() & Thread_ipc_in_progress)
712 || !(state() & Thread_ready));
714 // maybe switch_exec should return an bool to avoid testing the
718 assert_kdb (partner);
719 assert_kdb (partner->sched());
721 /* do_switch == false for xCPU */
722 if (EXPECT_TRUE(have_send && do_switch
723 && (partner->state() & Thread_ready)
724 && (next->sender_prio() <= partner->sched()->prio())))
725 switch_exec_schedule_locked(partner, Context::Not_Helping);
728 if (have_send && partner->cpu() == cpu()
729 && (partner->state() & Thread_ready))
730 partner->sched()->deblock(cpu());
734 assert_kdb (state() & Thread_ready);
738 if (EXPECT_TRUE(have_send && partner->cpu() == cpu()
739 && (partner->state() & Thread_ready)))
744 switch_exec_locked(partner, Context::Not_Helping);
745 // We have to retry if there are possible senders in our
746 // sender queue, because a sender from a remote CPU may
747 // have been enqueued in handle_drq, in switch_exec_locked
751 partner->sched()->deblock(cpu());
754 goto_sleep(t.rcv, sender, utcb().access(true));
756 // LOG_MSG_3VAL(this, "ipcrw", Mword(sender), state(), 0);
760 assert_kdb (!(state() & Thread_ipc_sending_mask));
762 // if the receive operation was canceled/finished before we
763 // switched to the old receiver, finish the send
764 if (have_send && partner->cpu() == cpu()
765 && (partner->state() & Thread_ready))
767 if (do_switch && EXPECT_TRUE(partner != this))
768 switch_exec_schedule_locked(partner, Context::Not_Helping);
770 partner->sched()->deblock(cpu());
773 // fast out if ipc is already finished
774 if (EXPECT_TRUE((state() & ~(Thread_transfer_in_progress | Thread_fpu_owner|Thread_cancel)) == Thread_ready))
776 state_del(Thread_transfer_in_progress);
779 assert_kdb (!(state() & (Thread_ipc_sending_mask)));
781 // abnormal termination?
782 handle_abnormal_termination(regs);
784 state_del(Thread_ipc_mask);
788 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to]
790 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
791 Syscall_frame *sender_regs, unsigned char rights)
793 Syscall_frame* dst_regs = receiver->rcv_regs();
795 bool success = copy_utcb_to(tag, receiver, rights);
796 tag.set_error(!success);
798 dst_regs->from(sender_regs->from_spec());
805 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
807 buf_desc = u->buf_desc;
808 buf[0] = u->buffers[0];
809 buf[1] = u->buffers[1];
814 Buf_utcb_saver::restore(Utcb *u)
816 u->buf_desc = buf_desc;
817 u->buffers[0] = buf[0];
818 u->buffers[1] = buf[1];
822 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
824 msg[0] = u->values[0];
825 msg[1] = u->values[1];
830 Pf_msg_utcb_saver::restore(Utcb *u)
832 Buf_utcb_saver::restore(u);
833 u->values[0] = msg[0];
834 u->values[1] = msg[1];
839 * \pre must run with local IRQs disabled (CPU lock held)
840 * to ensure that handler does not dissapear meanwhile.
844 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
847 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
851 void *old_utcb_handler = _utcb_handler;
854 // fill registers for IPC
855 Utcb *utcb = this->utcb().access(true);
856 Buf_utcb_saver saved_state(utcb);
858 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
859 utcb->buffers[0] = L4_msg_item::map(0).raw();
860 utcb->buffers[1] = L4_fpage::all_spaces().raw();
863 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
864 L4_msg_tag::Label_exception);
869 r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
871 handler->invoke(r.ref(), rights, &r, utcb);
874 saved_state.restore(utcb);
876 if (EXPECT_FALSE(r.tag().has_error()))
878 if (Config::conservative)
880 printf(" exception fault %s error = 0x%lx\n",
881 utcb->error.snd_phase() ? "send" : "rcv",
883 kdb_ke("ipc to pager failed");
886 state_del(Thread_in_exception);
888 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
889 state_add(Thread_dis_alien);
891 // restore original utcb_handler
892 _utcb_handler = old_utcb_handler;
894 // FIXME: handle not existing pager properly
895 // for now, just ignore any errors
899 /* return 1 if exception could be handled
900 * return 0 if not for send_exception and halt thread
902 PUBLIC inline NEEDS["task.h", "trap_state.h",
903 Thread::fast_return_to_user,
904 Thread::save_fpu_state_to_utcb]
906 Thread::send_exception(Trap_state *ts)
908 assert(cpu_lock.test());
910 Vcpu_state *vcpu = vcpu_state().access();
912 if (vcpu_exceptions_enabled(vcpu))
914 // do not reflect debug exceptions to the VCPU but handle them in
916 if (EXPECT_FALSE(ts->is_debug_exception()
917 && !(vcpu->state & Vcpu_state::F_debug_exc)))
920 if (_exc_cont.valid())
922 vcpu_enter_kernel_mode(vcpu);
924 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
925 Vcpu_log *l = tbe->payload<Vcpu_log>();
927 l->state = vcpu->_saved_state;
930 l->trap = ts->trapno();
931 l->err = ts->error();
932 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
934 memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
935 save_fpu_state_to_utcb(ts, utcb().access());
936 fast_return_to_user(vcpu->_entry_ip, vcpu->_sp, vcpu_state().usr().get());
939 // local IRQs must be disabled because we dereference a Thread_ptr
940 if (EXPECT_FALSE(_exc_handler.is_kernel()))
943 if (!send_exception_arch(ts))
944 return 0; // do not send exception
946 unsigned char rights = 0;
947 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
949 if (EXPECT_FALSE(!pager))
951 /* no pager (anymore), just ignore the exception, return success */
952 LOG_TRACE("Exception invalid handler", "exc", this,
953 __fmt_exception_invalid_handler,
954 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
955 l->cap_idx = _exc_handler.raw());
956 if (EXPECT_FALSE(space() == sigma0_task))
958 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
962 pager = this; // block on ourselves
965 state_change(~Thread_cancel, Thread_in_exception);
967 return exception(pager, ts, rights);
972 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
973 L4_fpage sfp, Mword *rcv_word, Thread* snd,
976 if (buf->b.is_rcv_id())
978 if (snd->space() == rcv->space())
981 rcv_word[-1] = sfp.raw();
986 unsigned char rights = 0;
987 Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
988 Kobject_iface *o = cap.obj();
989 rights = cap.rights();
990 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
993 rcv_word[-1] = o->obj_id() | Mword(rights);
1004 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
1005 Thread *rcv, Utcb *rcv_utcb,
1006 unsigned char rights)
1008 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
1009 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
1010 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
1011 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
1012 L4_snd_item_iter snd_item(snd_utcb, tag.words());
1013 register int items = tag.items();
1014 Mword *rcv_word = rcv_utcb->values + tag.words();
1016 // XXX: damn X-CPU state modification
1017 // snd->prepare_long_ipc(rcv);
1020 for (;items > 0 && snd_item.more();)
1022 if (EXPECT_FALSE(!snd_item.next()))
1024 snd->set_ipc_error(L4_error::Overflow, rcv);
1028 L4_snd_item_iter::Item const *const item = snd_item.get();
1030 if (item->b.is_void())
1031 { // XXX: not sure if void fpages are needed
1032 // skip send item and current rcv_buffer
1037 L4_buf_iter *buf_iter = 0;
1039 switch (item->b.type())
1041 case L4_msg_item::Map:
1042 switch (L4_fpage(item->d).type())
1044 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
1045 case L4_fpage::Io: buf_iter = &io_buffer; break;
1046 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
1054 if (EXPECT_FALSE(!buf_iter))
1056 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
1057 snd->set_ipc_error(L4_error::Overflow, rcv);
1061 L4_buf_iter::Item const *const buf = buf_iter->get();
1063 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
1065 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
1066 snd->set_ipc_error(L4_error::Overflow, rcv);
1071 assert_kdb (item->b.type() == L4_msg_item::Map);
1072 L4_fpage sfp(item->d);
1073 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
1077 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
1079 // we need to do a real mapping¿
1081 // diminish when sending via restricted ipc gates
1082 if (sfp.type() == L4_fpage::Obj)
1083 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
1085 L4_error err = fpage_map(snd->space(), sfp,
1086 rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
1088 if (EXPECT_FALSE(!err.ok()))
1090 snd->set_ipc_error(err, rcv);
1098 if (!item->b.compund())
1102 if (EXPECT_FALSE(items))
1104 snd->set_ipc_error(L4_error::Overflow, rcv);
1113 * \pre Runs on the sender CPU
1115 PRIVATE inline NEEDS[Thread::do_remote_abort_send]
1117 Thread::abort_send(L4_error const &e, Thread *partner)
1119 state_del_dirty(Thread_send_in_progress | Thread_polling | Thread_ipc_in_progress
1120 | Thread_transfer_in_progress);
1122 if (_timeout && _timeout->is_set())
1127 if (partner->cpu() == current_cpu())
1129 if (in_sender_list())
1131 sender_dequeue(partner->sender_list());
1132 partner->vcpu_update_state();
1135 utcb().access()->error = e;
1139 return do_remote_abort_send(e, partner);
1145 * \pre Runs on the sender CPU
1149 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1151 state_add_dirty(Thread_polling);
1153 IPC_timeout timeout;
1155 if (EXPECT_FALSE(snd_t.is_finite()))
1157 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), utcb().access(true));
1158 // Zero timeout or timeout expired already -- give up
1160 return abort_send(L4_error::Timeout, partner);
1162 set_timeout(&timeout);
1163 timeout.set(tval, cpu());
1168 if ((state() & (Thread_ipc_in_progress | Thread_polling
1169 | Thread_cancel | Thread_transfer_in_progress))
1170 == (Thread_ipc_in_progress | Thread_polling))
1172 state_del_dirty(Thread_ready);
1176 // ipc handshake bit is set
1177 if ((state() & (Thread_transfer_in_progress | Thread_receiving
1178 | Thread_ipc_in_progress))
1179 != Thread_ipc_in_progress)
1182 if (EXPECT_FALSE(state() & Thread_cancel))
1183 return abort_send(L4_error::Canceled, partner);
1185 // FIXME: existence check
1187 if (EXPECT_FALSE(0 && partner->is_invalid()))
1189 state_del_dirty(Thread_send_in_progress | Thread_polling
1190 | Thread_ipc_in_progress | Thread_transfer_in_progress);
1192 if (_timeout && _timeout->is_set())
1197 utcb().access()->error = L4_error::Not_existent;
1202 // Make sure we're really still in IPC
1203 assert_kdb (state() & Thread_ipc_in_progress);
1205 state_add_dirty(Thread_polling);
1208 state_del_dirty(Thread_polling);
1210 if (EXPECT_FALSE((state() & (Thread_send_in_progress | Thread_cancel))
1211 == (Thread_send_in_progress | Thread_cancel)))
1212 return abort_send(L4_error::Canceled, partner);
1214 // reset is only an simple dequeing operation from an double
1215 // linked list, so we dont need an extra preemption point for this
1217 if (EXPECT_FALSE(timeout.has_hit() && (state() & (Thread_send_in_progress
1218 | Thread_ipc_in_progress)) ==
1219 Thread_send_in_progress))
1220 return abort_send(L4_error::Timeout, partner);
1229 //---------------------------------------------------------------------
1230 IMPLEMENTATION [!mp]:
1234 Thread::set_ipc_send_rights(unsigned char)
1237 PRIVATE inline NEEDS ["l4_types.h"]
1239 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1240 bool, L4_timeout, Syscall_frame *, unsigned char)
1242 kdb_ke("Remote IPC in UP kernel");
1248 Thread::ipc_remote_receiver_ready(Receiver *)
1249 { kdb_ke("Remote IPC in UP kernel"); return false; }
1254 Thread::do_remote_abort_send(L4_error const &, Thread *)
1255 { kdb_ke("Remote abort send on UP kernel"); return false; }
1257 //---------------------------------------------------------------------
1260 EXTENSION class Thread
1263 unsigned char _ipc_send_rights;
1266 struct Ipc_remote_request;
1268 struct Ipc_remote_request
1272 Syscall_frame *regs;
1273 unsigned char rights;
1280 struct Ready_queue_request;
1282 struct Ready_queue_request
1288 enum Result { Done, Wrong_cpu, Not_existent };
1292 //---------------------------------------------------------------------
1293 IMPLEMENTATION [mp]:
1298 Thread::set_ipc_send_rights(unsigned char c)
1300 _ipc_send_rights = c;
1306 Thread::do_remote_abort_send(L4_error const &e, Thread *partner)
1308 if (partner->Receiver::abort_send(current_thread()))
1311 utcb().access()->error = e;
1312 schedule_if(handle_drq());
1318 * Runs on the receiver CPU in the context of recv.
1319 * The 'this' pointer is the sender.
1323 Thread::ipc_remote_receiver_ready(Receiver *recv)
1325 //printf(" remote ready: %x.%x \n", id().task(), id().lthread());
1326 //LOG_MSG_3VAL(this, "recvr", Mword(recv), 0, 0);
1327 assert_kdb (recv->cpu() == current_cpu());
1329 recv->ipc_init(this);
1331 Syscall_frame *regs = _snd_regs;
1333 recv->vcpu_disable_irqs();
1334 //printf(" transfer to %p\n", recv);
1335 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights);
1336 //printf(" done\n");
1337 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
1338 if (success && partner() == nonull_static_cast<Thread*>(recv))
1339 nonull_static_cast<Thread*>(recv)->set_caller(this, _ipc_send_rights);
1342 recv->state_del_dirty(Thread_ipc_receiving_mask | Thread_ipc_in_progress);
1344 // dequeue sender from receiver's sending queue
1345 sender_dequeue(recv->sender_list());
1346 recv->vcpu_update_state();
1348 Ready_queue_request rq;
1350 rq.state_add = Thread_transfer_in_progress;
1351 if (Receiver::prepared())
1352 { // same as in Receiver::prepare_receive_dirty_2
1353 rq.state_del = Thread_ipc_sending_mask;
1354 rq.state_add |= Thread_receiving;
1359 drq(handle_remote_ready_enqueue, &rq);
1360 current()->schedule_if(current()->handle_drq());
1361 //printf(" wakeup sender done\n");
1366 PRIVATE inline NOEXPORT
1368 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1371 //LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, 0);
1373 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1374 printf("CPU[%u]: remote IPC send ...\n"
1375 " partner=%p [%u]\n"
1376 " sender =%p [%u] regs=%p\n"
1379 rq->partner, rq->partner->cpu(),
1386 switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1389 rq->result = Failed;
1392 rq->result = Queued;
1398 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1399 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1400 // thread code. However, this induces a overhead of two extra IPIs.
1401 if (rq->tag.items())
1403 set_receiver(rq->partner);
1404 sender_enqueue(rq->partner->sender_list(), sched_context()->prio());
1405 rq->partner->vcpu_set_irq_pending();
1407 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1408 rq->result = Queued | Receive_in_progress;
1409 rq->partner->state_add_dirty(Thread_ready);
1410 rq->partner->sched()->deblock(current_cpu());
1413 rq->partner->vcpu_disable_irqs();
1414 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1415 rq->result = success ? Ok : Failed;
1417 if (success && partner() == rq->partner)
1418 rq->partner->set_caller(this, _ipc_send_rights);
1420 rq->partner->state_change_dirty(~(Thread_ipc_receiving_mask | Thread_ipc_in_progress), Thread_ready);
1421 // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1427 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1429 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1430 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1431 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1432 return r ? Drq::Need_resched : 0;
1438 Thread::handle_remote_ready_enqueue(Drq *, Context *self, void *_rq)
1440 Ready_queue_request *rq = (Ready_queue_request*)_rq;
1442 //LOG_MSG_3VAL(current(), "rre", rq->state_add, rq->state_del, c->state());
1444 c->state_add_dirty(rq->state_add);
1445 c->state_del_dirty(rq->state_del);
1446 rq->result = Ready_queue_request::Done;
1448 if (EXPECT_FALSE(c->state() & Thread_ready))
1449 return Drq::Need_resched;
1451 c->state_add_dirty(Thread_ready);
1452 // hm, should be done by our lazy queueing: c->ready_enqueue();
1453 return Drq::Need_resched;
1460 * \pre Runs on the sender CPU
1462 PRIVATE //inline NEEDS ["mp_request.h"]
1464 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1466 L4_timeout snd_t, Syscall_frame *regs,
1467 unsigned char rights)
1469 // Flag that there must be no switch in the receive path.
1470 // This flag also prevents the receive path from accessing
1471 // the thread state of a remote sender.
1472 Ipc_remote_request rq;
1474 rq.have_rcv = have_receive;
1475 rq.partner = partner;
1476 rq.timeout = !snd_t.is_zero();
1481 set_receiver(partner);
1483 state_add_dirty(Thread_send_in_progress | Thread_ipc_in_progress);
1485 partner->drq(handle_remote_ipc_send, &rq,
1486 remote_prepare_receive);
1494 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1496 Context *c = src->context();
1497 Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1498 //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1500 if (EXPECT_FALSE(rq->result & Queued))
1503 c->state_del(Thread_send_in_progress);
1504 if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1507 Thread *t = nonull_static_cast<Thread*>(c);
1508 t->prepare_receive_dirty_2();
1512 //---------------------------------------------------------------------------
1513 IMPLEMENTATION [debug]:
1517 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1519 Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1520 return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1525 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1527 Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1528 return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);