8 struct Log_pf_invalid : public Tb_entry
13 void print(String_buffer *buf) const;
16 struct Log_exc_invalid : public Tb_entry
19 void print(String_buffer *buf) const;
25 #include "l4_buf_iter.h"
30 EXTENSION class Thread
33 enum class Check_sender
41 struct Ipc_remote_request
46 L4_fpage::Rights rights;
50 Thread::Check_sender result;
53 Syscall_frame *_snd_regs;
54 L4_fpage::Rights _ipc_send_rights;
60 Buf_utcb_saver(Utcb const *u);
61 void restore(Utcb *u);
68 * Save critical contents of UTCB during nested IPC.
70 class Pf_msg_utcb_saver : public Buf_utcb_saver
73 Pf_msg_utcb_saver(Utcb const *u);
74 void restore(Utcb *u);
80 // ------------------------------------------------------------------------
85 EXTENSION class Thread
88 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
89 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
92 // ------------------------------------------------------------------------
95 // IPC setup, and handling of ``short IPC'' and page-fault IPC
97 // IDEAS for enhancing this implementation:
99 // Volkmar has suggested a possible optimization for
100 // short-flexpage-to-long-message-buffer transfers: Currently, we have
101 // to resort to long IPC in that case because the message buffer might
102 // contain a receive-flexpage option. An easy optimization would be
103 // to cache the receive-flexpage option in the TCB for that case.
104 // This would save us the long-IPC setup because we wouldn't have to
105 // touch the receiver's user memory in that case. Volkmar argues that
106 // cases like that are quite common -- for example, imagine a pager
107 // which at the same time is also a server for ``normal'' requests.
109 // The handling of cancel and timeout conditions could be improved as
110 // follows: Cancel and Timeout should not reset the ipc_in_progress
111 // flag. Instead, they should just set and/or reset a flag of their
112 // own that is checked every time an (IPC) system call wants to go to
113 // sleep. That would mean that IPCs that do not block are not
114 // cancelled or aborted.
117 #include <cstdlib> // panic()
119 #include "l4_types.h"
120 #include "l4_msg_item.h"
123 #include "cpu_lock.h"
124 #include "ipc_timeout.h"
125 #include "lock_guard.h"
127 #include "map_util.h"
128 #include "processor.h"
134 Thread::ipc_receiver_aborted()
136 assert (wait_queue());
144 Thread::ipc_send_msg(Receiver *recv)
146 Syscall_frame *regs = _snd_regs;
147 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs,
149 sender_dequeue(recv->sender_list());
150 recv->vcpu_update_state();
152 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
154 Mword state_del = Thread_ipc_mask | Thread_ipc_transfer;
155 Mword state_add = Thread_ready;
156 if (Receiver::prepared())
157 // same as in Receiver::prepare_receive_dirty_2
158 state_add |= Thread_receive_wait;
160 if (xcpu_state_change(~state_del, state_add, true))
161 recv->switch_to_locked(this);
166 Thread::modify_label(Mword const *todo, int cnt)
169 Mword l = _snd_regs->from_spec();
170 for (int i = 0; i < cnt*4; i += 4)
172 Mword const test_mask = todo[i];
173 Mword const test = todo[i+1];
174 if ((l & test_mask) == test)
176 Mword const del_mask = todo[i+2];
177 Mword const add_mask = todo[i+3];
179 l = (l & ~del_mask) | add_mask;
188 Thread::snd_regs(Syscall_frame *r)
192 /** Page fault handler.
193 This handler suspends any ongoing IPC, then sets up page-fault IPC.
194 Finally, the ongoing IPC's state (if any) is restored.
195 @param pfa page-fault virtual address
196 @param error_code page-fault error code.
200 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
201 Address pfa, Mword error_code,
202 L4_msg_tag::Protocol protocol)
204 if (EXPECT_FALSE((state() & Thread_alien)))
207 auto guard = lock_guard(cpu_lock);
209 L4_fpage::Rights rights;
210 Kobject_iface *pager = _pager.ptr(space(), &rights);
214 WARN("CPU%u: Pager of %lx is invalid (pfa=" L4_PTR_FMT
215 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
216 cxx::int_value<Cpu_number>(current_cpu()), dbg_id(), pfa,
217 error_code, cxx::int_value<Cap_index>(_pager.raw()), regs()->ip());
220 LOG_TRACE("Page fault invalid pager", "ipfh", this, Log_pf_invalid,
221 l->cap_idx = _pager.raw();
225 pager = this; // block on ourselves
228 // set up a register block used as an IPC parameter block for the
232 // save the UTCB fields affected by PF IPC
233 Mword vcpu_irqs = vcpu_disable_irqs();
235 Utcb *utcb = this->utcb().access(true);
236 Pf_msg_utcb_saver saved_utcb_fields(utcb);
239 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
240 utcb->buffers[0] = L4_msg_item::map(0).raw();
241 utcb->buffers[1] = L4_fpage::all_spaces().raw();
243 utcb->values[0] = PF::addr_to_msgword0(pfa, error_code);
244 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
246 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
248 L4_msg_tag tag(2, 0, 0, protocol);
253 r.ref(L4_obj_ref(_pager.raw(), L4_obj_ref::Ipc_call_ipc));
254 pager->invoke(r.ref(), rights, &r, utcb);
259 if (EXPECT_FALSE(r.tag().has_error()))
261 if (utcb->error.snd_phase()
262 && (utcb->error.error() == L4_error::Not_existent)
263 && PF::is_usermode_error(error_code)
264 && !(state() & Thread_cancel))
271 // If the pager rejects the mapping, it replies -1 in msg.w0
272 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
276 // restore previous IPC state
278 saved_utcb_fields.restore(utcb);
280 vcpu_restore_irqs(vcpu_irqs);
286 Thread::check_sender(Thread *sender, bool timeout)
288 if (EXPECT_FALSE(is_invalid()))
290 sender->utcb().access()->error = L4_error::Not_existent;
291 return Check_sender::Failed;
294 if (EXPECT_FALSE(!sender_ok(sender)))
298 sender->utcb().access()->error = L4_error::Timeout;
299 return Check_sender::Failed;
302 sender->set_wait_queue(sender_list());
303 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
304 vcpu_set_irq_pending();
305 return Check_sender::Queued;
308 return Check_sender::Ok;
312 * Setup a IPC-like timer for the given timeout.
313 * \param timeout The L4 ABI timeout value that shall be used
314 * \param utcb The UTCB that might contain an absolute timeout
315 * \param timer The timeout/timer object that shall be queued.
317 * This function does nothing if the timeout is *never*.
318 * Sets Thread_ready and Thread_timeout in the thread state
319 * if the timeout is zero or has already hit (is in the past).
320 * Or enqueues the given timer object with the finite timeout calculated
323 PUBLIC inline NEEDS["timer.h"]
325 Thread::setup_timer(L4_timeout timeout, Utcb const *utcb, Timeout *timer)
327 if (EXPECT_TRUE(timeout.is_never()))
330 if (EXPECT_FALSE(timeout.is_zero()))
332 state_add_dirty(Thread_ready | Thread_timeout);
338 Unsigned64 sysclock = Timer::system_clock();
339 Unsigned64 tval = timeout.microsecs(sysclock, utcb);
341 if (EXPECT_TRUE((tval > sysclock)))
342 set_timeout(timer, tval);
343 else // timeout already hit
344 state_add_dirty(Thread_ready | Thread_timeout);
348 PRIVATE inline NEEDS["timer.h"]
349 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
353 state_del_dirty(Thread_ready);
354 setup_timer(t, utcb, &timeout);
357 switch_sched(sched(), &Sched_context::rq.current());
363 assert (state() & Thread_ready);
369 * @pre cpu_lock must be held
371 PRIVATE inline NEEDS["logdefs.h"]
373 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
375 assert(cpu_lock.test());
377 switch (expect(partner->check_sender(this, !snd_t.is_zero()), Check_sender::Ok))
379 case Check_sender::Failed:
380 return Check_sender::Failed;
381 case Check_sender::Queued:
382 state_add_dirty(Thread_send_wait);
383 return Check_sender::Queued;
385 partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
386 return Check_sender::Ok;
392 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
394 utcb().access()->error = e;
395 rcv->utcb().access()->error = L4_error(e, L4_error::Rcv);
401 Thread::get_next_sender(Sender *sender)
403 if (!sender_list()->empty())
405 if (sender) // closed wait
407 if (EXPECT_TRUE(sender->in_sender_list())
408 && EXPECT_TRUE(sender_list() == sender->wait_queue()))
414 Sender *next = Sender::cast(sender_list()->first());
415 assert (next->in_sender_list());
425 Thread::activate_ipc_partner(Thread *partner, Cpu_number current_cpu,
426 bool do_switch, bool closed_wait)
428 if (partner->home_cpu() == current_cpu)
430 auto &rq = Sched_context::rq.current();
431 Sched_context *cs = rq.current_sched();
432 do_switch = do_switch && (closed_wait || cs != sched());
433 partner->state_change_dirty(~Thread_ipc_transfer, Thread_ready);
436 schedule_if(switch_exec_locked(partner, Not_Helping) != Switch::Ok);
440 return deblock_and_schedule(partner);
443 partner->xcpu_state_change(~Thread_ipc_transfer, Thread_ready);
448 * Send an IPC message.
449 * Block until we can send the message or the timeout hits.
450 * @param partner the receiver of our message
451 * @param t a timeout specifier
452 * @param regs sender's IPC registers
453 * @pre cpu_lock must be held
454 * @return sender's IPC error code
456 * @todo review closed wait handling of sender during possible
457 * quiescent states and blocking.
461 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
462 bool have_receive, Sender *sender,
463 L4_timeout_pair t, Syscall_frame *regs,
464 L4_fpage::Rights rights)
466 assert (cpu_lock.test());
467 assert (this == current());
469 bool do_switch = false;
471 assert (!(state() & Thread_ipc_mask));
473 prepare_receive(sender, have_receive ? regs : 0);
474 bool activate_partner = false;
475 Cpu_number current_cpu = ::current_cpu();
479 assert(!in_sender_list());
480 do_switch = tag.do_switch();
485 set_ipc_send_rights(rights);
487 if (EXPECT_TRUE(current_cpu == partner->home_cpu()))
488 result = handshake_receiver(partner, t.snd);
491 // we have either per se X-CPU IPC or we ran into a
492 // IPC during migration (indicated by the pending DRQ)
494 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
497 // this may block, so we could have been migrated here
498 current_cpu = ::current_cpu();
501 switch (expect(result, Check_sender::Ok))
503 case Check_sender::Done:
507 case Check_sender::Queued:
508 // set _snd_regs, to enable active receiving
510 ok = do_send_wait(partner, t.snd); // --- blocking point ---
511 current_cpu = ::current_cpu();
514 case Check_sender::Failed:
515 state_del_dirty(Thread_ipc_mask);
520 // mmh, we can reset the receivers timeout
521 // ping pong with timeouts will profit from it, because
522 // it will require much less sorting overhead
523 // if we dont reset the timeout, the possibility is very high
524 // that the receiver timeout is in the timeout queue
525 if (EXPECT_TRUE(current_cpu == partner->home_cpu()))
526 partner->reset_timeout();
528 ok = transfer_msg(tag, partner, regs, rights);
530 // transfer is also a possible migration point
531 current_cpu = ::current_cpu();
533 // switch to receiving state
534 state_del_dirty(Thread_ipc_mask);
535 if (ok && have_receive)
536 state_add_dirty(Thread_receive_wait);
538 activate_partner = partner != this;
542 if (EXPECT_FALSE(!ok))
544 // send failed, so do not switch to receiver directly and skip receive phase
545 have_receive = false;
546 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
551 assert (have_receive);
552 state_add_dirty(Thread_receive_wait);
555 // only do direct switch on closed wait (call) or if we run on a foreign
556 // scheduling context
559 have_receive = state() & Thread_receive_wait;
563 assert (!in_sender_list());
564 assert (!(state() & Thread_send_wait));
565 next = get_next_sender(sender);
569 && activate_ipc_partner(partner, current_cpu, do_switch && !next,
570 have_receive && sender))
572 // blocked so might have a new sender queued
573 have_receive = state() & Thread_receive_wait;
574 if (have_receive && !next)
575 next = get_next_sender(sender);
580 state_change_dirty(~Thread_ipc_mask, Thread_receive_in_progress);
581 next->ipc_send_msg(this);
582 state_del_dirty(Thread_ipc_mask);
584 else if (have_receive)
586 if ((state() & Thread_full_ipc_mask) == Thread_receive_wait)
587 goto_sleep(t.rcv, sender, utcb().access(true));
589 if (sender && sender == partner && partner->caller() == this)
590 partner->reset_caller();
593 Mword state = this->state();
595 if (EXPECT_TRUE (!(state & Thread_full_ipc_mask)))
598 while (EXPECT_FALSE(state & Thread_ipc_transfer))
600 state_del_dirty(Thread_ready);
602 state = this->state();
605 if (EXPECT_TRUE (!(state & Thread_full_ipc_mask)))
608 if (state & Thread_ipc_mask)
610 Utcb *utcb = this->utcb().access(true);
611 // the IPC has not been finished. could be timeout or cancel
612 // XXX should only modify the error-code part of the status code
614 if (EXPECT_FALSE(state & Thread_cancel))
616 // we've presumably been reset!
617 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
620 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
622 state_del(Thread_full_ipc_mask);
626 PRIVATE inline NEEDS [Thread::copy_utcb_to]
628 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
629 Syscall_frame *sender_regs, L4_fpage::Rights rights)
631 Syscall_frame* dst_regs = receiver->rcv_regs();
633 bool success = copy_utcb_to(tag, receiver, rights);
634 tag.set_error(!success);
636 dst_regs->from(sender_regs->from_spec());
638 // setup the reply capability in case of a call
639 if (success && partner() == receiver)
640 receiver->set_caller(this, rights);
648 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
650 buf_desc = u->buf_desc;
651 buf[0] = u->buffers[0];
652 buf[1] = u->buffers[1];
657 Buf_utcb_saver::restore(Utcb *u)
659 u->buf_desc = buf_desc;
660 u->buffers[0] = buf[0];
661 u->buffers[1] = buf[1];
665 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
667 msg[0] = u->values[0];
668 msg[1] = u->values[1];
673 Pf_msg_utcb_saver::restore(Utcb *u)
675 Buf_utcb_saver::restore(u);
676 u->values[0] = msg[0];
677 u->values[1] = msg[1];
682 * \pre must run with local IRQs disabled (CPU lock held)
683 * to ensure that handler does not dissapear meanwhile.
687 Thread::exception(Kobject_iface *handler, Trap_state *ts, L4_fpage::Rights rights)
690 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
694 Mword vcpu_irqs = vcpu_disable_irqs();
697 void *old_utcb_handler = _utcb_handler;
700 // fill registers for IPC
701 Utcb *utcb = this->utcb().access(true);
702 Buf_utcb_saver saved_state(utcb);
704 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
705 utcb->buffers[0] = L4_msg_item::map(0).raw();
706 utcb->buffers[1] = L4_fpage::all_spaces().raw();
709 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
710 L4_msg_tag::Label_exception);
715 r.ref(L4_obj_ref(_exc_handler.raw(), L4_obj_ref::Ipc_call_ipc));
717 handler->invoke(r.ref(), rights, &r, utcb);
720 saved_state.restore(utcb);
722 if (EXPECT_FALSE(r.tag().has_error()))
723 state_del(Thread_in_exception);
724 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
725 state_add(Thread_dis_alien);
727 // restore original utcb_handler
728 _utcb_handler = old_utcb_handler;
730 vcpu_restore_irqs(vcpu_irqs);
732 // FIXME: handle not existing pager properly
733 // for now, just ignore any errors
737 /* return 1 if exception could be handled
738 * return 0 if not for send_exception and halt thread
740 PUBLIC inline NEEDS["task.h", "trap_state.h",
741 Thread::fast_return_to_user,
742 Thread::save_fpu_state_to_utcb]
744 Thread::send_exception(Trap_state *ts)
746 assert(cpu_lock.test());
748 Vcpu_state *vcpu = vcpu_state().access();
750 if (vcpu_exceptions_enabled(vcpu))
752 // do not reflect debug exceptions to the VCPU but handle them in
754 if (EXPECT_FALSE(ts->is_debug_exception()
755 && !(vcpu->state & Vcpu_state::F_debug_exc)))
758 if (_exc_cont.valid(ts))
761 // before entering kernel mode to have original fpu state before
763 save_fpu_state_to_utcb(ts, utcb().access());
767 if (vcpu_enter_kernel_mode(vcpu))
769 // enter_kernel_mode has switched the address space from user to
770 // kernel space, so reevaluate the address of the VCPU state area
771 vcpu = vcpu_state().access();
774 LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
776 l->state = vcpu->_saved_state;
779 l->trap = ts->trapno();
780 l->err = ts->error();
781 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
784 fast_return_to_user(vcpu->_entry_ip, vcpu->_sp, vcpu_state().usr().get());
787 // local IRQs must be disabled because we dereference a Thread_ptr
788 if (EXPECT_FALSE(_exc_handler.is_kernel()))
791 if (!send_exception_arch(ts))
792 return 0; // do not send exception
794 L4_fpage::Rights rights = L4_fpage::Rights(0);
795 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
797 if (EXPECT_FALSE(!pager))
799 /* no pager (anymore), just ignore the exception, return success */
800 LOG_TRACE("Exception invalid handler", "ieh", this, Log_exc_invalid,
801 l->cap_idx = _exc_handler.raw());
802 if (EXPECT_FALSE(space()->is_sigma0()))
805 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
809 pager = this; // block on ourselves
812 state_change(~Thread_cancel, Thread_in_exception);
814 return exception(pager, ts, rights);
819 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
820 L4_fpage sfp, Mword *rcv_word, Thread* snd,
823 if (buf->b.is_rcv_id())
825 if (snd->space() == rcv->space())
828 rcv_word[-1] = sfp.raw();
833 Obj_space::Capability cap = snd->space()->lookup(sfp.obj_index());
834 Kobject_iface *o = cap.obj();
835 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
837 Mword rights = cap.rights()
838 & cxx::int_value<L4_fpage::Rights>(sfp.rights());
840 rcv_word[-1] = o->obj_id() | rights;
848 PRIVATE static inline
849 bool FIASCO_WARN_RESULT
850 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
851 L4_fpage::Rights rights)
853 assert (cpu_lock.test());
855 Utcb *snd_utcb = snd->utcb().access();
856 Utcb *rcv_utcb = rcv->utcb().access();
857 Mword s = tag.words();
858 Mword r = Utcb::Max_words;
860 Mem::memcpy_mwords(rcv_utcb->values, snd_utcb->values, r < s ? r : s);
864 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
867 && tag.transfer_fpu()
868 && rcv_utcb->inherit_fpu()
869 && (rights & L4_fpage::Rights::W()))
870 snd->transfer_fpu(rcv);
876 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
877 Thread::copy_ts_to_utcb]
878 bool FIASCO_WARN_RESULT
879 Thread::copy_utcb_to(L4_msg_tag tag, Thread* receiver,
880 L4_fpage::Rights rights)
882 // we cannot copy trap state to trap state!
883 assert (!this->_utcb_handler || !receiver->_utcb_handler);
884 if (EXPECT_FALSE(this->_utcb_handler != 0))
885 return copy_ts_to_utcb(tag, this, receiver, rights);
886 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
887 return copy_utcb_to_ts(tag, this, receiver, rights);
889 return copy_utcb_to_utcb(tag, this, receiver, rights);
894 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
895 Thread *rcv, Utcb *rcv_utcb,
896 L4_fpage::Rights rights)
898 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
899 Task *const rcv_t = nonull_static_cast<Task*>(rcv->space());
900 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
901 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
902 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
903 L4_snd_item_iter snd_item(snd_utcb, tag.words());
904 int items = tag.items();
905 Mword *rcv_word = rcv_utcb->values + tag.words();
907 // XXX: damn X-CPU state modification
908 // snd->prepare_long_ipc(rcv);
911 for (;items > 0 && snd_item.more();)
913 if (EXPECT_FALSE(!snd_item.next()))
915 snd->set_ipc_error(L4_error::Overflow, rcv);
919 L4_snd_item_iter::Item const *const item = snd_item.get();
921 if (item->b.is_void())
922 { // XXX: not sure if void fpages are needed
923 // skip send item and current rcv_buffer
928 L4_buf_iter *buf_iter = 0;
930 switch (item->b.type())
932 case L4_msg_item::Map:
933 switch (L4_fpage(item->d).type())
935 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
936 case L4_fpage::Io: buf_iter = &io_buffer; break;
937 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
945 if (EXPECT_FALSE(!buf_iter))
947 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
948 snd->set_ipc_error(L4_error::Overflow, rcv);
952 L4_buf_iter::Item const *const buf = buf_iter->get();
954 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
956 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
957 snd->set_ipc_error(L4_error::Overflow, rcv);
962 assert (item->b.type() == L4_msg_item::Map);
963 L4_fpage sfp(item->d);
964 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
968 // diminish when sending via restricted ipc gates
969 if (sfp.type() == L4_fpage::Obj)
970 sfp.mask_rights(rights | L4_fpage::Rights::CRW() | L4_fpage::Rights::CD());
972 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
974 // we need to do a real mapping
978 // We take the existence_lock for syncronizing maps...
979 // This is kind of coarse grained
980 auto sp_lock = lock_guard_dont_lock(rcv_t->existence_lock);
981 if (!sp_lock.check_and_lock(&rcv_t->existence_lock))
983 snd->set_ipc_error(L4_error::Overflow, rcv);
987 auto c_lock = lock_guard<Lock_guard_inverse_policy>(cpu_lock);
988 err = fpage_map(snd->space(), sfp,
989 rcv->space(), L4_fpage(buf->d), item->b, &rl);
992 if (EXPECT_FALSE(!err.ok()))
994 snd->set_ipc_error(err, rcv);
1002 if (!item->b.compound())
1006 if (EXPECT_FALSE(items))
1008 snd->set_ipc_error(L4_error::Overflow, rcv);
1017 * \pre Runs on the sender CPU
1021 Thread::abort_send(L4_error const &e, Thread *partner)
1023 state_del_dirty(Thread_full_ipc_mask);
1025 if (_timeout && _timeout->is_set())
1029 Abort_state abt = Abt_ipc_done;
1031 if (partner->home_cpu() == current_cpu())
1033 if (in_sender_list())
1035 sender_dequeue(partner->sender_list());
1036 partner->vcpu_update_state();
1037 abt = Abt_ipc_cancel;
1039 else if (partner->in_ipc(this))
1040 abt = Abt_ipc_in_progress;
1043 abt = partner->Receiver::abort_send(this);
1050 case Abt_ipc_cancel:
1051 utcb().access()->error = e;
1053 case Abt_ipc_in_progress:
1054 state_add_dirty(Thread_ipc_transfer);
1055 while (state() & Thread_ipc_transfer)
1057 state_del_dirty(Thread_ready);
1067 * \pre Runs on the sender CPU
1071 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1073 IPC_timeout timeout;
1075 if (EXPECT_FALSE(snd_t.is_finite()))
1077 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), utcb().access(true));
1078 // Zero timeout or timeout expired already -- give up
1080 return abort_send(L4_error::Timeout, partner);
1082 set_timeout(&timeout, tval);
1087 while (((ipc_state = state() & (Thread_send_wait | Thread_ipc_abort_mask))) == Thread_send_wait)
1089 state_del_dirty(Thread_ready);
1093 if (EXPECT_FALSE(ipc_state == (Thread_cancel | Thread_send_wait)))
1094 return abort_send(L4_error::Canceled, partner);
1096 if (EXPECT_FALSE(ipc_state == (Thread_timeout | Thread_send_wait)))
1097 return abort_send(L4_error::Timeout, partner);
1106 Thread::set_ipc_send_rights(L4_fpage::Rights c)
1108 _ipc_send_rights = c;
1111 PRIVATE inline NOEXPORT
1113 Thread::remote_ipc_send(Ipc_remote_request *rq)
1117 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1118 printf("CPU[%u]: remote IPC send ...\n"
1119 " partner=%p [%u]\n"
1120 " sender =%p [%u] regs=%p\n"
1123 rq->partner, rq->partner->cpu(),
1129 switch (expect(rq->partner->check_sender(this, rq->timeout), Check_sender::Ok))
1131 case Check_sender::Failed:
1132 xcpu_state_change(~Thread_ipc_mask, 0);
1133 rq->result = Check_sender::Failed;
1135 case Check_sender::Queued:
1136 rq->result = Check_sender::Queued;
1142 if (rq->tag.transfer_fpu() && rq->partner->_utcb_handler || rq->partner->utcb().access()->inherit_fpu())
1143 rq->partner->spill_fpu_if_owner();
1145 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1146 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1147 // thread code. However, this induces a overhead of two extra IPIs.
1148 if (rq->tag.items())
1150 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1151 xcpu_state_change(~Thread_send_wait, Thread_ready);
1152 rq->partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
1153 rq->result = Check_sender::Ok;
1156 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1157 if (success && rq->have_rcv)
1158 xcpu_state_change(~Thread_send_wait, Thread_receive_wait);
1160 xcpu_state_change(~Thread_ipc_mask, 0);
1162 rq->result = success ? Check_sender::Done : Check_sender::Failed;
1163 rq->partner->state_change_dirty(~Thread_ipc_mask, Thread_ready);
1164 if (rq->partner->home_cpu() == current_cpu() && current() != rq->partner)
1165 Sched_context::rq.current().ready_enqueue(rq->partner->sched());
1171 Context::Drq::Result
1172 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1174 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1175 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(rq);
1176 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1177 return r ? Drq::need_resched() : Drq::done();
1181 * \pre Runs on the sender CPU
1183 PRIVATE //inline NEEDS ["mp_request.h"]
1184 Thread::Check_sender
1185 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1187 L4_timeout snd_t, Syscall_frame *regs,
1188 L4_fpage::Rights rights)
1190 // Flag that there must be no switch in the receive path.
1191 // This flag also prevents the receive path from accessing
1192 // the thread state of a remote sender.
1193 Ipc_remote_request rq;
1195 rq.have_rcv = have_receive;
1196 rq.partner = partner;
1197 rq.timeout = !snd_t.is_zero();
1202 set_wait_queue(partner->sender_list());
1204 state_add_dirty(Thread_send_wait);
1206 if (tag.transfer_fpu())
1207 spill_fpu_if_owner();
1209 partner->drq(handle_remote_ipc_send, &rq);
1214 //---------------------------------------------------------------------------
1215 IMPLEMENTATION [debug]:
1217 #include "string_buffer.h"
1221 Thread::Log_pf_invalid::print(String_buffer *buf) const
1223 buf->printf("InvCap C:%lx pfa=%lx err=%lx",
1224 cxx::int_value<Cap_index>(cap_idx), pfa, err);
1229 Thread::Log_exc_invalid::print(String_buffer *buf) const
1231 buf->printf("InvCap C:%lx", cxx::int_value<Cap_index>(cap_idx));