8 struct Log_pf_invalid : public Tb_entry
13 void print(String_buffer *buf) const;
16 struct Log_exc_invalid : public Tb_entry
19 void print(String_buffer *buf) const;
25 #include "l4_buf_iter.h"
30 EXTENSION class Thread
33 enum Check_sender_result
41 Syscall_frame *_snd_regs;
42 L4_fpage::Rights _ipc_send_rights;
48 Buf_utcb_saver(Utcb const *u);
49 void restore(Utcb *u);
56 * Save critical contents of UTCB during nested IPC.
58 class Pf_msg_utcb_saver : public Buf_utcb_saver
61 Pf_msg_utcb_saver(Utcb const *u);
62 void restore(Utcb *u);
67 struct Ipc_remote_request;
69 struct Ipc_remote_request
74 L4_fpage::Rights rights;
81 struct Ready_queue_request
87 enum Result { Done, Wrong_cpu, Not_existent };
92 // ------------------------------------------------------------------------
97 EXTENSION class Thread
100 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
101 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
104 // ------------------------------------------------------------------------
107 // IPC setup, and handling of ``short IPC'' and page-fault IPC
109 // IDEAS for enhancing this implementation:
111 // Volkmar has suggested a possible optimization for
112 // short-flexpage-to-long-message-buffer transfers: Currently, we have
113 // to resort to long IPC in that case because the message buffer might
114 // contain a receive-flexpage option. An easy optimization would be
115 // to cache the receive-flexpage option in the TCB for that case.
116 // This would save us the long-IPC setup because we wouldn't have to
117 // touch the receiver's user memory in that case. Volkmar argues that
118 // cases like that are quite common -- for example, imagine a pager
119 // which at the same time is also a server for ``normal'' requests.
121 // The handling of cancel and timeout conditions could be improved as
122 // follows: Cancel and Timeout should not reset the ipc_in_progress
123 // flag. Instead, they should just set and/or reset a flag of their
124 // own that is checked every time an (IPC) system call wants to go to
125 // sleep. That would mean that IPCs that do not block are not
126 // cancelled or aborted.
129 #include <cstdlib> // panic()
131 #include "l4_types.h"
132 #include "l4_msg_item.h"
135 #include "cpu_lock.h"
136 #include "ipc_timeout.h"
137 #include "lock_guard.h"
139 #include "map_util.h"
140 #include "processor.h"
147 Thread::ipc_receiver_aborted()
149 assert_kdb (wait_queue());
152 // remote_ready_enqueue(): is only for mp
158 Thread::ipc_send_msg(Receiver *recv)
160 Syscall_frame *regs = _snd_regs;
161 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs,
163 sender_dequeue(recv->sender_list());
164 recv->vcpu_update_state();
166 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
168 Mword state_del = Thread_ipc_mask | Thread_ipc_transfer;
169 Mword state_add = Thread_ready;
170 if (Receiver::prepared())
171 // same as in Receiver::prepare_receive_dirty_2
172 state_add |= Thread_receive_wait;
174 if (home_cpu() == current_cpu())
176 state_change_dirty(~state_del, state_add);
177 auto &rq = Sched_context::rq.current();
178 Sched_context *cs = rq.current_sched();
179 if (rq.deblock(cs, cs, true))
180 recv->switch_to_locked(this);
184 drq_state_change(~state_del, state_add);
185 current()->schedule_if(current()->handle_drq());
191 Thread::modify_label(Mword const *todo, int cnt)
193 assert_kdb (_snd_regs);
194 Mword l = _snd_regs->from_spec();
195 for (int i = 0; i < cnt*4; i += 4)
197 Mword const test_mask = todo[i];
198 Mword const test = todo[i+1];
199 if ((l & test_mask) == test)
201 Mword const del_mask = todo[i+2];
202 Mword const add_mask = todo[i+3];
204 l = (l & ~del_mask) | add_mask;
213 Thread::snd_regs(Syscall_frame *r)
217 /** Page fault handler.
218 This handler suspends any ongoing IPC, then sets up page-fault IPC.
219 Finally, the ongoing IPC's state (if any) is restored.
220 @param pfa page-fault virtual address
221 @param error_code page-fault error code.
225 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
226 Address pfa, Mword error_code,
227 L4_msg_tag::Protocol protocol)
229 if (EXPECT_FALSE((state() & Thread_alien)))
232 auto guard = lock_guard(cpu_lock);
234 L4_fpage::Rights rights;
235 Kobject_iface *pager = _pager.ptr(space(), &rights);
239 WARN("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
240 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
241 cxx::int_value<Cpu_number>(current_cpu()), dbg_id(), pfa,
242 error_code, cxx::int_value<Cap_index>(_pager.raw()), regs()->ip());
245 LOG_TRACE("Page fault invalid pager", "pf", this, Log_pf_invalid,
246 l->cap_idx = _pager.raw();
250 pager = this; // block on ourselves
253 // set up a register block used as an IPC parameter block for the
257 // save the UTCB fields affected by PF IPC
258 Mword vcpu_irqs = vcpu_disable_irqs();
260 Utcb *utcb = this->utcb().access(true);
261 Pf_msg_utcb_saver saved_utcb_fields(utcb);
264 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
265 utcb->buffers[0] = L4_msg_item::map(0).raw();
266 utcb->buffers[1] = L4_fpage::all_spaces().raw();
268 utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
269 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
271 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
273 L4_msg_tag tag(2, 0, 0, protocol);
278 r.ref(L4_obj_ref(_pager.raw(), L4_obj_ref::Ipc_call_ipc));
279 pager->invoke(r.ref(), rights, &r, utcb);
284 if (EXPECT_FALSE(r.tag().has_error()))
286 if (utcb->error.snd_phase()
287 && (utcb->error.error() == L4_error::Not_existent)
288 && PF::is_usermode_error(error_code)
289 && !(state() & Thread_cancel))
296 // If the pager rejects the mapping, it replies -1 in msg.w0
297 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
301 // restore previous IPC state
303 saved_utcb_fields.restore(utcb);
305 vcpu_restore_irqs(vcpu_irqs);
311 Thread::check_sender(Thread *sender, bool timeout)
313 if (EXPECT_FALSE(is_invalid()))
315 sender->utcb().access()->error = L4_error::Not_existent;
319 if (EXPECT_FALSE(!sender_ok(sender)))
323 sender->utcb().access()->error = L4_error::Timeout;
327 sender->set_wait_queue(sender_list());
328 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
329 vcpu_set_irq_pending();
337 PRIVATE inline NEEDS["timer.h"]
338 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
342 if (EXPECT_FALSE(t.is_finite() && !_timeout))
344 state_del_dirty(Thread_ready);
346 Unsigned64 sysclock = Timer::system_clock();
347 Unsigned64 tval = t.microsecs(sysclock, utcb);
349 if (EXPECT_TRUE((tval > sysclock)))
351 set_timeout(&timeout);
352 timeout.set(tval, current_cpu());
354 else // timeout already hit
355 state_change_dirty(~Thread_ipc_mask, Thread_ready | Thread_timeout);
359 if (EXPECT_TRUE(t.is_never()))
360 state_del_dirty(Thread_ready);
362 state_change_dirty(~Thread_ipc_mask, Thread_ready | Thread_timeout);
366 switch_sched(sched(), &Sched_context::rq.current());
370 if (EXPECT_FALSE((long)_timeout))
376 assert_kdb (state() & Thread_ready);
382 * @pre cpu_lock must be held
384 PRIVATE inline NEEDS["logdefs.h"]
386 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
388 assert_kdb(cpu_lock.test());
390 switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
395 state_add_dirty(Thread_send_wait);
398 partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
405 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
407 utcb().access()->error = e;
408 rcv->utcb().access()->error = L4_error(e, L4_error::Rcv);
414 Thread::get_next_sender(Sender *sender)
416 if (!sender_list()->empty())
418 if (sender) // closed wait
420 if (sender->in_sender_list() && sender_list() == sender->wait_queue())
425 Sender *next = Sender::cast(sender_list()->first());
426 assert_kdb (next->in_sender_list());
436 * Send an IPC message.
437 * Block until we can send the message or the timeout hits.
438 * @param partner the receiver of our message
439 * @param t a timeout specifier
440 * @param regs sender's IPC registers
441 * @pre cpu_lock must be held
442 * @return sender's IPC error code
446 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
447 bool have_receive, Sender *sender,
448 L4_timeout_pair t, Syscall_frame *regs,
449 L4_fpage::Rights rights)
451 assert_kdb (cpu_lock.test());
452 assert_kdb (this == current());
454 bool do_switch = false;
456 assert_kdb (!(state() & Thread_ipc_mask));
458 prepare_receive(sender, have_receive ? regs : 0);
459 bool activate_partner = false;
463 assert_kdb(!in_sender_list());
464 do_switch = tag.do_switch();
469 set_ipc_send_rights(rights);
471 if (EXPECT_FALSE(partner->home_cpu() != current_cpu()) ||
472 ((result = handshake_receiver(partner, t.snd)) == Failed
473 && partner->drq_pending()))
475 // we have either per se X-CPU IPC or we ran into a
476 // IPC during migration (indicated by the pending DRQ)
478 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
482 switch (__builtin_expect(result, Ok))
489 // set _snd_regs, to enable active receiving
491 ok = do_send_wait(partner, t.snd); // --- blocking point ---
495 state_del_dirty(Thread_ipc_mask);
500 // mmh, we can reset the receivers timeout
501 // ping pong with timeouts will profit from it, because
502 // it will require much less sorting overhead
503 // if we dont reset the timeout, the possibility is very high
504 // that the receiver timeout is in the timeout queue
505 partner->reset_timeout();
507 ok = transfer_msg(tag, partner, regs, rights);
509 // switch to receiving state
510 state_del_dirty(Thread_ipc_mask);
511 if (ok && have_receive)
512 state_add_dirty(Thread_receive_wait);
514 activate_partner = partner != this;
518 if (EXPECT_FALSE(!ok))
520 // send failed, so do not switch to receiver directly and skip receive phase
521 have_receive = false;
522 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
527 assert_kdb (have_receive);
528 state_add_dirty(Thread_receive_wait);
531 // only do direct switch on closed wait (call) or if we run on a foreign
532 // scheduling context
535 have_receive = state() & Thread_receive_wait;
539 assert_kdb (!in_sender_list());
540 assert_kdb (!(state() & Thread_send_wait));
541 next = get_next_sender(sender);
544 if (activate_partner)
546 if (partner->home_cpu() == current_cpu())
548 auto &rq = Sched_context::rq.current();
549 Sched_context *cs = rq.current_sched();
550 do_switch = do_switch && ((have_receive && sender) || cs->context() != this) && !next;
551 partner->state_change_dirty(~Thread_ipc_transfer, Thread_ready);
556 rq.deblock(partner->sched(), cs, false);
560 schedule_if(switch_exec_locked(partner, Context::Not_Helping));
562 else if (rq.deblock(partner->sched(), cs, true))
563 switch_to_locked(partner);
566 partner->drq_state_change(~Thread_ipc_transfer, Thread_ready);
571 state_change_dirty(~Thread_ipc_mask, Thread_receive_in_progress);
572 next->ipc_send_msg(this);
573 state_del_dirty(Thread_ipc_mask);
575 else if (have_receive)
577 if ((state() & Thread_full_ipc_mask) == Thread_receive_wait)
578 goto_sleep(t.rcv, sender, utcb().access(true));
581 if (EXPECT_TRUE (!(state() & Thread_full_ipc_mask)))
584 while (EXPECT_FALSE(state() & Thread_ipc_transfer))
586 state_del_dirty(Thread_ready);
590 if (EXPECT_TRUE (!(state() & Thread_full_ipc_mask)))
593 Utcb *utcb = this->utcb().access(true);
594 // the IPC has not been finished. could be timeout or cancel
595 // XXX should only modify the error-code part of the status code
597 if (EXPECT_FALSE(state() & Thread_cancel))
599 // we've presumably been reset!
600 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
603 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
604 state_del(Thread_full_ipc_mask);
608 PRIVATE inline NEEDS [Thread::copy_utcb_to]
610 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
611 Syscall_frame *sender_regs, L4_fpage::Rights rights)
613 Syscall_frame* dst_regs = receiver->rcv_regs();
615 bool success = copy_utcb_to(tag, receiver, rights);
616 tag.set_error(!success);
618 dst_regs->from(sender_regs->from_spec());
620 // setup the reply capability in case of a call
621 if (success && partner() == receiver)
622 receiver->set_caller(this, rights);
630 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
632 buf_desc = u->buf_desc;
633 buf[0] = u->buffers[0];
634 buf[1] = u->buffers[1];
639 Buf_utcb_saver::restore(Utcb *u)
641 u->buf_desc = buf_desc;
642 u->buffers[0] = buf[0];
643 u->buffers[1] = buf[1];
647 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
649 msg[0] = u->values[0];
650 msg[1] = u->values[1];
655 Pf_msg_utcb_saver::restore(Utcb *u)
657 Buf_utcb_saver::restore(u);
658 u->values[0] = msg[0];
659 u->values[1] = msg[1];
664 * \pre must run with local IRQs disabled (CPU lock held)
665 * to ensure that handler does not dissapear meanwhile.
669 Thread::exception(Kobject_iface *handler, Trap_state *ts, L4_fpage::Rights rights)
672 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
676 Mword vcpu_irqs = vcpu_disable_irqs();
679 void *old_utcb_handler = _utcb_handler;
682 // fill registers for IPC
683 Utcb *utcb = this->utcb().access(true);
684 Buf_utcb_saver saved_state(utcb);
686 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
687 utcb->buffers[0] = L4_msg_item::map(0).raw();
688 utcb->buffers[1] = L4_fpage::all_spaces().raw();
691 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
692 L4_msg_tag::Label_exception);
697 r.ref(L4_obj_ref(_exc_handler.raw(), L4_obj_ref::Ipc_call_ipc));
699 handler->invoke(r.ref(), rights, &r, utcb);
702 saved_state.restore(utcb);
704 if (EXPECT_FALSE(r.tag().has_error()))
705 state_del(Thread_in_exception);
706 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
707 state_add(Thread_dis_alien);
709 // restore original utcb_handler
710 _utcb_handler = old_utcb_handler;
712 vcpu_restore_irqs(vcpu_irqs);
714 // FIXME: handle not existing pager properly
715 // for now, just ignore any errors
719 /* return 1 if exception could be handled
720 * return 0 if not for send_exception and halt thread
722 PUBLIC inline NEEDS["task.h", "trap_state.h",
723 Thread::fast_return_to_user,
724 Thread::save_fpu_state_to_utcb]
726 Thread::send_exception(Trap_state *ts)
728 assert(cpu_lock.test());
730 Vcpu_state *vcpu = vcpu_state().access();
732 if (vcpu_exceptions_enabled(vcpu))
734 // do not reflect debug exceptions to the VCPU but handle them in
736 if (EXPECT_FALSE(ts->is_debug_exception()
737 && !(vcpu->state & Vcpu_state::F_debug_exc)))
740 if (_exc_cont.valid())
743 // before entering kernel mode to have original fpu state before
745 save_fpu_state_to_utcb(ts, utcb().access());
749 if (vcpu_enter_kernel_mode(vcpu))
751 // enter_kernel_mode has switched the address space from user to
752 // kernel space, so reevaluate the address of the VCPU state area
753 vcpu = vcpu_state().access();
756 LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
758 l->state = vcpu->_saved_state;
761 l->trap = ts->trapno();
762 l->err = ts->error();
763 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
765 memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
766 fast_return_to_user(vcpu->_entry_ip, vcpu->_sp, vcpu_state().usr().get());
769 // local IRQs must be disabled because we dereference a Thread_ptr
770 if (EXPECT_FALSE(_exc_handler.is_kernel()))
773 if (!send_exception_arch(ts))
774 return 0; // do not send exception
776 L4_fpage::Rights rights = L4_fpage::Rights(0);
777 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
779 if (EXPECT_FALSE(!pager))
781 /* no pager (anymore), just ignore the exception, return success */
782 LOG_TRACE("Exception invalid handler", "exc", this, Log_exc_invalid,
783 l->cap_idx = _exc_handler.raw());
784 if (EXPECT_FALSE(space()->is_sigma0()))
787 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
791 pager = this; // block on ourselves
794 state_change(~Thread_cancel, Thread_in_exception);
796 return exception(pager, ts, rights);
801 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
802 L4_fpage sfp, Mword *rcv_word, Thread* snd,
805 if (buf->b.is_rcv_id())
807 if (snd->space() == rcv->space())
810 rcv_word[-1] = sfp.raw();
815 unsigned char rights = 0;
816 Obj_space::Capability cap = snd->space()->lookup(sfp.obj_index());
817 Kobject_iface *o = cap.obj();
818 rights = cap.rights();
819 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
822 rcv_word[-1] = o->obj_id() | Mword(rights);
830 PRIVATE static inline
831 bool FIASCO_WARN_RESULT
832 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
833 L4_fpage::Rights rights)
835 assert (cpu_lock.test());
837 Utcb *snd_utcb = snd->utcb().access();
838 Utcb *rcv_utcb = rcv->utcb().access();
839 Mword s = tag.words();
840 Mword r = Utcb::Max_words;
842 Mem::memcpy_mwords(rcv_utcb->values, snd_utcb->values, r < s ? r : s);
846 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
848 if (tag.transfer_fpu() && rcv_utcb->inherit_fpu() && (rights & L4_fpage::Rights::W()))
849 snd->transfer_fpu(rcv);
855 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
856 Thread::copy_ts_to_utcb]
857 bool FIASCO_WARN_RESULT
858 Thread::copy_utcb_to(L4_msg_tag const &tag, Thread* receiver,
859 L4_fpage::Rights rights)
861 // we cannot copy trap state to trap state!
862 assert_kdb (!this->_utcb_handler || !receiver->_utcb_handler);
863 if (EXPECT_FALSE(this->_utcb_handler != 0))
864 return copy_ts_to_utcb(tag, this, receiver, rights);
865 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
866 return copy_utcb_to_ts(tag, this, receiver, rights);
868 return copy_utcb_to_utcb(tag, this, receiver, rights);
873 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
874 Thread *rcv, Utcb *rcv_utcb,
875 L4_fpage::Rights rights)
877 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
878 Task *const rcv_t = nonull_static_cast<Task*>(rcv->space());
879 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
880 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
881 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
882 L4_snd_item_iter snd_item(snd_utcb, tag.words());
883 register int items = tag.items();
884 Mword *rcv_word = rcv_utcb->values + tag.words();
886 // XXX: damn X-CPU state modification
887 // snd->prepare_long_ipc(rcv);
890 for (;items > 0 && snd_item.more();)
892 if (EXPECT_FALSE(!snd_item.next()))
894 snd->set_ipc_error(L4_error::Overflow, rcv);
898 L4_snd_item_iter::Item const *const item = snd_item.get();
900 if (item->b.is_void())
901 { // XXX: not sure if void fpages are needed
902 // skip send item and current rcv_buffer
907 L4_buf_iter *buf_iter = 0;
909 switch (item->b.type())
911 case L4_msg_item::Map:
912 switch (L4_fpage(item->d).type())
914 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
915 case L4_fpage::Io: buf_iter = &io_buffer; break;
916 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
924 if (EXPECT_FALSE(!buf_iter))
926 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
927 snd->set_ipc_error(L4_error::Overflow, rcv);
931 L4_buf_iter::Item const *const buf = buf_iter->get();
933 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
935 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
936 snd->set_ipc_error(L4_error::Overflow, rcv);
941 assert_kdb (item->b.type() == L4_msg_item::Map);
942 L4_fpage sfp(item->d);
943 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
947 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
949 // we need to do a real mapping¿
951 // diminish when sending via restricted ipc gates
952 if (sfp.type() == L4_fpage::Obj)
953 sfp.mask_rights(rights | L4_fpage::Rights::CRW() | L4_fpage::Rights::CD());
958 // We take the existence_lock for syncronizing maps...
959 // This is kind of coarse grained
960 Lock_guard<decltype(rcv_t->existence_lock)> sp_lock;
961 if (!sp_lock.check_and_lock(&rcv_t->existence_lock))
963 snd->set_ipc_error(L4_error::Overflow, rcv);
967 auto c_lock = lock_guard<Lock_guard_inverse_policy>(cpu_lock);
968 err = fpage_map(snd->space(), sfp,
969 rcv->space(), L4_fpage(buf->d), item->b, &rl);
972 if (EXPECT_FALSE(!err.ok()))
974 snd->set_ipc_error(err, rcv);
982 if (!item->b.compound())
986 if (EXPECT_FALSE(items))
988 snd->set_ipc_error(L4_error::Overflow, rcv);
997 * \pre Runs on the sender CPU
1001 Thread::abort_send(L4_error const &e, Thread *partner)
1003 state_del_dirty(Thread_full_ipc_mask);
1005 if (_timeout && _timeout->is_set())
1009 Abort_state abt = Abt_ipc_done;
1011 if (partner->home_cpu() == current_cpu())
1013 if (in_sender_list())
1015 sender_dequeue(partner->sender_list());
1016 partner->vcpu_update_state();
1017 abt = Abt_ipc_cancel;
1020 else if (partner->in_ipc(this))
1021 abt = Abt_ipc_in_progress;
1024 abt = partner->Receiver::abort_send(this);
1031 case Abt_ipc_cancel:
1032 utcb().access()->error = e;
1034 case Abt_ipc_in_progress:
1035 state_add_dirty(Thread_ipc_transfer);
1036 while (state() & Thread_ipc_transfer)
1038 state_del_dirty(Thread_ready);
1048 * \pre Runs on the sender CPU
1052 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1054 IPC_timeout timeout;
1056 if (EXPECT_FALSE(snd_t.is_finite()))
1058 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), utcb().access(true));
1059 // Zero timeout or timeout expired already -- give up
1061 return abort_send(L4_error::Timeout, partner);
1063 set_timeout(&timeout);
1064 timeout.set(tval, current_cpu());
1067 register Mword ipc_state;
1069 while (((ipc_state = state() & (Thread_send_wait | Thread_ipc_abort_mask))) == Thread_send_wait)
1071 state_del_dirty(Thread_ready);
1075 if (EXPECT_FALSE(ipc_state == (Thread_cancel | Thread_send_wait)))
1076 return abort_send(L4_error::Canceled, partner);
1078 if (EXPECT_FALSE(ipc_state == (Thread_timeout | Thread_send_wait)))
1079 return abort_send(L4_error::Timeout, partner);
1089 Thread::set_ipc_send_rights(L4_fpage::Rights c)
1091 _ipc_send_rights = c;
1094 PRIVATE inline NOEXPORT
1096 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1099 // LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, (Mword)this);
1101 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1102 printf("CPU[%u]: remote IPC send ...\n"
1103 " partner=%p [%u]\n"
1104 " sender =%p [%u] regs=%p\n"
1107 rq->partner, rq->partner->cpu(),
1113 switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1116 rq->result = Failed;
1119 rq->result = Queued;
1125 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1126 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1127 // thread code. However, this induces a overhead of two extra IPIs.
1128 if (rq->tag.items())
1130 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1131 rq->partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
1135 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1136 rq->result = success ? Done : Failed;
1138 rq->partner->state_change_dirty(~Thread_ipc_mask, Thread_ready);
1139 // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1144 Context::Drq::Result
1145 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1147 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1148 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1149 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1150 return r ? Drq::need_resched() : Drq::done();
1154 * \pre Runs on the sender CPU
1156 PRIVATE //inline NEEDS ["mp_request.h"]
1158 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1160 L4_timeout snd_t, Syscall_frame *regs,
1161 L4_fpage::Rights rights)
1163 // Flag that there must be no switch in the receive path.
1164 // This flag also prevents the receive path from accessing
1165 // the thread state of a remote sender.
1166 Ipc_remote_request rq;
1168 rq.have_rcv = have_receive;
1169 rq.partner = partner;
1170 rq.timeout = !snd_t.is_zero();
1175 set_wait_queue(partner->sender_list());
1177 state_add_dirty(Thread_send_wait);
1179 partner->drq(handle_remote_ipc_send, &rq,
1180 remote_prepare_receive);
1186 Context::Drq::Result
1187 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1189 Context *c = src->context();
1190 Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1191 //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1193 // No atomic switch to receive state if we are queued, or the IPC must be done by
1195 if (EXPECT_FALSE(rq->result == Queued || rq->result == Ok))
1198 c->state_del(Thread_ipc_mask);
1199 if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1202 c->state_add_dirty(Thread_receive_wait);
1206 //---------------------------------------------------------------------------
1207 IMPLEMENTATION [debug]:
1209 #include "string_buffer.h"
1213 Thread::Log_pf_invalid::print(String_buffer *buf) const
1215 buf->printf("InvCap C:%lx pfa=%lx err=%lx",
1216 cxx::int_value<Cap_index>(cap_idx), pfa, err);
1221 Thread::Log_exc_invalid::print(String_buffer *buf) const
1223 buf->printf("InvCap C:%lx", cxx::int_value<Cap_index>(cap_idx));