3 #include "l4_buf_iter.h"
18 struct Log_exc_invalid
23 enum Check_sender_result
31 Syscall_frame *_snd_regs;
32 unsigned char _ipc_send_rights;
38 Buf_utcb_saver(Utcb const *u);
39 void restore(Utcb *u);
46 * Save critical contents of UTCB during nested IPC.
48 class Pf_msg_utcb_saver : public Buf_utcb_saver
51 Pf_msg_utcb_saver(Utcb const *u);
52 void restore(Utcb *u);
57 // ------------------------------------------------------------------------
62 EXTENSION class Thread
65 static unsigned log_fmt_pf_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_page_fault_invalid_pager");
66 static unsigned log_fmt_exc_invalid(Tb_entry *, int max, char *buf) asm ("__fmt_exception_invalid_handler");
69 // ------------------------------------------------------------------------
72 // IPC setup, and handling of ``short IPC'' and page-fault IPC
74 // IDEAS for enhancing this implementation:
76 // Volkmar has suggested a possible optimization for
77 // short-flexpage-to-long-message-buffer transfers: Currently, we have
78 // to resort to long IPC in that case because the message buffer might
79 // contain a receive-flexpage option. An easy optimization would be
80 // to cache the receive-flexpage option in the TCB for that case.
81 // This would save us the long-IPC setup because we wouldn't have to
82 // touch the receiver's user memory in that case. Volkmar argues that
83 // cases like that are quite common -- for example, imagine a pager
84 // which at the same time is also a server for ``normal'' requests.
86 // The handling of cancel and timeout conditions could be improved as
87 // follows: Cancel and Timeout should not reset the ipc_in_progress
88 // flag. Instead, they should just set and/or reset a flag of their
89 // own that is checked every time an (IPC) system call wants to go to
90 // sleep. That would mean that IPCs that do not block are not
91 // cancelled or aborted.
94 #include <cstdlib> // panic()
97 #include "l4_msg_item.h"
100 #include "cpu_lock.h"
101 #include "ipc_timeout.h"
102 #include "lock_guard.h"
104 #include "map_util.h"
105 #include "processor.h"
112 Thread::ipc_receiver_aborted()
114 assert_kdb (receiver());
116 sender_dequeue(receiver()->sender_list());
117 receiver()->vcpu_update_state();
120 remote_ready_enqueue();
125 Thread::ipc_receiver_ready()
128 state_change_dirty(~Thread_ipc_mask, Thread_receive_in_progress);
133 Thread::ipc_send_msg(Receiver *recv)
135 Syscall_frame *regs = _snd_regs;
136 bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs,
138 sender_dequeue(recv->sender_list());
139 recv->vcpu_update_state();
141 regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error));
143 Mword state_del = Thread_ipc_mask | Thread_ipc_transfer;
144 Mword state_add = Thread_ready;
145 if (Receiver::prepared())
146 // same as in Receiver::prepare_receive_dirty_2
147 state_add |= Thread_receive_wait;
149 if (cpu() == current_cpu())
151 state_change_dirty(~state_del, state_add);
152 if (current_sched()->deblock(cpu(), current_sched(), true))
153 recv->switch_to_locked(this);
157 drq_state_change(~state_del, state_add);
158 current()->schedule_if(current()->handle_drq());
164 Thread::modify_label(Mword const *todo, int cnt)
166 assert_kdb (_snd_regs);
167 Mword l = _snd_regs->from_spec();
168 for (int i = 0; i < cnt*4; i += 4)
170 Mword const test_mask = todo[i];
171 Mword const test = todo[i+1];
172 if ((l & test_mask) == test)
174 Mword const del_mask = todo[i+2];
175 Mword const add_mask = todo[i+3];
177 l = (l & ~del_mask) | add_mask;
186 Thread::snd_regs(Syscall_frame *r)
190 /** Page fault handler.
191 This handler suspends any ongoing IPC, then sets up page-fault IPC.
192 Finally, the ongoing IPC's state (if any) is restored.
193 @param pfa page-fault virtual address
194 @param error_code page-fault error code.
198 Thread::handle_page_fault_pager(Thread_ptr const &_pager,
199 Address pfa, Mword error_code,
200 L4_msg_tag::Protocol protocol)
203 // do not handle user space page faults from kernel mode if we're
204 // already handling a request
205 if (EXPECT_FALSE(!PF::is_usermode_error(error_code)
206 && thread_lock()->test() == Thread_lock::Locked))
208 kdb_ke("Fiasco BUG: page fault, under lock");
209 panic("page fault in locked operation");
213 if (EXPECT_FALSE((state() & Thread_alien)))
216 Lock_guard<Cpu_lock> guard(&cpu_lock);
218 unsigned char rights;
219 Kobject_iface *pager = _pager.ptr(space(), &rights);
223 WARN("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT
224 ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n",
225 current_cpu(), dbg_id(), pfa, error_code,
226 _pager.raw(), regs()->ip());
229 LOG_TRACE("Page fault invalid pager", "pf", this,
230 __fmt_page_fault_invalid_pager,
231 Log_pf_invalid *l = tbe->payload<Log_pf_invalid>();
232 l->cap_idx = _pager.raw();
236 pager = this; // block on ourselves
239 // set up a register block used as an IPC parameter block for the
242 Utcb *utcb = this->utcb().access(true);
244 // save the UTCB fields affected by PF IPC
245 Pf_msg_utcb_saver saved_utcb_fields(utcb);
248 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
249 utcb->buffers[0] = L4_msg_item::map(0).raw();
250 utcb->buffers[1] = L4_fpage::all_spaces().raw();
252 utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code);
253 utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code));
255 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
257 L4_msg_tag tag(2, 0, 0, protocol);
262 r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
263 pager->invoke(r.ref(), rights, &r, utcb);
268 if (EXPECT_FALSE(r.tag().has_error()))
270 if (Config::conservative)
272 printf(" page fault %s error = 0x%lx\n",
273 utcb->error.snd_phase() ? "send" : "rcv",
275 kdb_ke("ipc to pager failed");
278 if (utcb->error.snd_phase()
279 && (utcb->error.error() == L4_error::Not_existent)
280 && PF::is_usermode_error(error_code)
281 && !(state() & Thread_cancel))
288 // If the pager rejects the mapping, it replies -1 in msg.w0
289 if (EXPECT_FALSE (utcb->values[0] == Mword(-1)))
293 // restore previous IPC state
295 saved_utcb_fields.restore(utcb);
301 Thread::check_sender(Thread *sender, bool timeout)
303 if (EXPECT_FALSE(is_invalid()))
305 sender->utcb().access()->error = L4_error::Not_existent;
309 if (EXPECT_FALSE(!sender_ok(sender)))
313 sender->utcb().access()->error = L4_error::Timeout;
317 sender->set_receiver(this);
318 sender->sender_enqueue(sender_list(), sender->sched_context()->prio());
319 vcpu_set_irq_pending();
327 PRIVATE inline NEEDS["timer.h"]
328 void Thread::goto_sleep(L4_timeout const &t, Sender *sender, Utcb *utcb)
332 if (EXPECT_FALSE(t.is_finite() && !_timeout))
334 state_del_dirty(Thread_ready);
336 Unsigned64 sysclock = Timer::system_clock();
337 Unsigned64 tval = t.microsecs(sysclock, utcb);
339 if (EXPECT_TRUE((tval > sysclock)))
341 set_timeout(&timeout);
342 timeout.set(tval, cpu());
344 else // timeout already hit
345 state_change_dirty(~Thread_ipc_mask, Thread_ready | Thread_timeout);
349 if (EXPECT_TRUE(t.is_never()))
350 state_del_dirty(Thread_ready);
352 state_change_dirty(~Thread_ipc_mask, Thread_ready | Thread_timeout);
356 switch_sched(sched());
360 if (EXPECT_FALSE((long)_timeout))
366 assert_kdb (state() & Thread_ready);
372 * @pre cpu_lock must be held
374 PRIVATE inline NEEDS["logdefs.h"]
376 Thread::handshake_receiver(Thread *partner, L4_timeout snd_t)
378 assert_kdb(cpu_lock.test());
380 switch (__builtin_expect(partner->check_sender(this, !snd_t.is_zero()), Ok))
385 state_add_dirty(Thread_send_wait);
388 partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
395 Thread::set_ipc_error(L4_error const &e, Thread *rcv)
397 utcb().access()->error = e;
398 rcv->utcb().access()->error = L4_error(e, L4_error::Rcv);
404 Thread::get_next_sender(Sender *sender)
406 if (sender_list()->head())
408 if (sender) // closed wait
410 if (sender->in_sender_list() && this == sender->receiver())
415 Sender *next = Sender::cast(sender_list()->head());
416 assert_kdb (next->in_sender_list());
426 * Send an IPC message.
427 * Block until we can send the message or the timeout hits.
428 * @param partner the receiver of our message
429 * @param t a timeout specifier
430 * @param regs sender's IPC registers
431 * @pre cpu_lock must be held
432 * @return sender's IPC error code
436 Thread::do_ipc(L4_msg_tag const &tag, bool have_send, Thread *partner,
437 bool have_receive, Sender *sender,
438 L4_timeout_pair t, Syscall_frame *regs,
439 unsigned char rights)
441 assert_kdb (cpu_lock.test());
442 assert_kdb (this == current());
444 bool do_switch = false;
446 assert_kdb (!(state() & Thread_ipc_mask));
448 prepare_receive(sender, have_receive ? regs : 0);
449 bool activate_partner = false;
453 assert_kdb(!in_sender_list());
454 do_switch = tag.do_switch();
459 set_ipc_send_rights(rights);
461 if (EXPECT_FALSE(partner->cpu() != current_cpu()) ||
462 ((result = handshake_receiver(partner, t.snd)) == Failed
463 && partner->drq_pending()))
465 // we have either per se X-CPU IPC or we ran into a
466 // IPC during migration (indicated by the pending DRQ)
468 result = remote_handshake_receiver(tag, partner, have_receive, t.snd,
472 switch (__builtin_expect(result, Ok))
479 // set _snd_regs, to enable active receiving
481 ok = do_send_wait(partner, t.snd); // --- blocking point ---
485 state_del_dirty(Thread_ipc_mask);
490 // mmh, we can reset the receivers timeout
491 // ping pong with timeouts will profit from it, because
492 // it will require much less sorting overhead
493 // if we dont reset the timeout, the possibility is very high
494 // that the receiver timeout is in the timeout queue
495 partner->reset_timeout();
497 ok = transfer_msg(tag, partner, regs, rights);
499 // switch to receiving state
500 state_del_dirty(Thread_ipc_mask);
501 if (ok && have_receive)
502 state_add_dirty(Thread_receive_wait);
504 activate_partner = partner != this;
508 if (EXPECT_FALSE(!ok))
510 // send failed, so do not switch to receiver directly and skip receive phase
511 have_receive = false;
512 regs->tag(L4_msg_tag(0, 0, L4_msg_tag::Error, 0));
517 assert_kdb (have_receive);
518 state_add_dirty(Thread_receive_wait);
521 // only do direct switch on closed wait (call) or if we run on a foreign
522 // scheduling context
525 have_receive = state() & Thread_receive_wait;
529 assert_kdb (!in_sender_list());
530 assert_kdb (!(state() & Thread_send_wait));
531 next = get_next_sender(sender);
534 if (activate_partner)
536 if (partner->cpu() == current_cpu())
538 Sched_context *cs = Sched_context::rq(cpu()).current_sched();
539 do_switch = do_switch && ((have_receive && sender) || cs->context() != this)
540 && !(next && current_sched()->dominates(cs));
541 partner->state_change_dirty(~Thread_ipc_transfer, Thread_ready);
543 schedule_if(handle_drq() || switch_exec_locked(partner, Context::Not_Helping));
544 else if (partner->current_sched()->deblock(current_cpu(), current_sched(), true))
545 switch_to_locked(partner);
548 partner->drq_state_change(~Thread_ipc_transfer, Thread_ready);
553 ipc_receiver_ready();
554 next->ipc_send_msg(this);
555 state_del_dirty(Thread_ipc_mask);
557 else if (have_receive)
559 if ((state() & Thread_full_ipc_mask) == Thread_receive_wait)
560 goto_sleep(t.rcv, sender, utcb().access(true));
563 if (EXPECT_TRUE (!(state() & Thread_full_ipc_mask)))
566 while (EXPECT_FALSE(state() & Thread_ipc_transfer))
568 state_del_dirty(Thread_ready);
572 if (EXPECT_TRUE (!(state() & Thread_full_ipc_mask)))
575 Utcb *utcb = this->utcb().access(true);
576 // the IPC has not been finished. could be timeout or cancel
577 // XXX should only modify the error-code part of the status code
579 if (EXPECT_FALSE(state() & Thread_cancel))
581 // we've presumably been reset!
582 regs->tag(commit_error(utcb, L4_error::R_canceled, regs->tag()));
585 regs->tag(commit_error(utcb, L4_error::R_timeout, regs->tag()));
586 state_del(Thread_full_ipc_mask);
590 PRIVATE inline NEEDS ["map_util.h", Thread::copy_utcb_to]
592 Thread::transfer_msg(L4_msg_tag tag, Thread *receiver,
593 Syscall_frame *sender_regs, unsigned char rights)
595 Syscall_frame* dst_regs = receiver->rcv_regs();
597 bool success = copy_utcb_to(tag, receiver, rights);
598 tag.set_error(!success);
600 dst_regs->from(sender_regs->from_spec());
602 // setup the reply capability in case of a call
603 if (success && partner() == receiver)
604 receiver->set_caller(this, rights);
612 Buf_utcb_saver::Buf_utcb_saver(const Utcb *u)
614 buf_desc = u->buf_desc;
615 buf[0] = u->buffers[0];
616 buf[1] = u->buffers[1];
621 Buf_utcb_saver::restore(Utcb *u)
623 u->buf_desc = buf_desc;
624 u->buffers[0] = buf[0];
625 u->buffers[1] = buf[1];
629 Pf_msg_utcb_saver::Pf_msg_utcb_saver(Utcb const *u) : Buf_utcb_saver(u)
631 msg[0] = u->values[0];
632 msg[1] = u->values[1];
637 Pf_msg_utcb_saver::restore(Utcb *u)
639 Buf_utcb_saver::restore(u);
640 u->values[0] = msg[0];
641 u->values[1] = msg[1];
646 * \pre must run with local IRQs disabled (CPU lock held)
647 * to ensure that handler does not dissapear meanwhile.
651 Thread::exception(Kobject_iface *handler, Trap_state *ts, Mword rights)
654 L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never);
658 void *old_utcb_handler = _utcb_handler;
661 // fill registers for IPC
662 Utcb *utcb = this->utcb().access(true);
663 Buf_utcb_saver saved_state(utcb);
665 utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu);
666 utcb->buffers[0] = L4_msg_item::map(0).raw();
667 utcb->buffers[1] = L4_fpage::all_spaces().raw();
670 L4_msg_tag tag(L4_exception_ipc::Msg_size, 0, L4_msg_tag::Transfer_fpu,
671 L4_msg_tag::Label_exception);
676 r.ref(L4_obj_ref(_exc_handler.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc));
678 handler->invoke(r.ref(), rights, &r, utcb);
681 saved_state.restore(utcb);
683 if (EXPECT_FALSE(r.tag().has_error()))
685 if (Config::conservative)
687 printf(" exception fault %s error = 0x%lx\n",
688 utcb->error.snd_phase() ? "send" : "rcv",
690 kdb_ke("ipc to pager failed");
693 state_del(Thread_in_exception);
695 else if (r.tag().proto() == L4_msg_tag::Label_allow_syscall)
696 state_add(Thread_dis_alien);
698 // restore original utcb_handler
699 _utcb_handler = old_utcb_handler;
701 // FIXME: handle not existing pager properly
702 // for now, just ignore any errors
706 /* return 1 if exception could be handled
707 * return 0 if not for send_exception and halt thread
709 PUBLIC inline NEEDS["task.h", "trap_state.h",
710 Thread::fast_return_to_user,
711 Thread::save_fpu_state_to_utcb]
713 Thread::send_exception(Trap_state *ts)
715 assert(cpu_lock.test());
717 Vcpu_state *vcpu = vcpu_state().access();
719 if (vcpu_exceptions_enabled(vcpu))
721 // do not reflect debug exceptions to the VCPU but handle them in
723 if (EXPECT_FALSE(ts->is_debug_exception()
724 && !(vcpu->state & Vcpu_state::F_debug_exc)))
727 if (_exc_cont.valid())
729 if (vcpu_enter_kernel_mode(vcpu))
731 // enter_kernel_mode has switched the address space from user to
732 // kernel space, so reevaluate the address of the VCPU state area
733 vcpu = vcpu_state().access();
737 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
738 Vcpu_log *l = tbe->payload<Vcpu_log>();
740 l->state = vcpu->_saved_state;
743 l->trap = ts->trapno();
744 l->err = ts->error();
745 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
747 memcpy(&vcpu->_ts, ts, sizeof(Trap_state));
748 save_fpu_state_to_utcb(ts, utcb().access());
749 fast_return_to_user(vcpu->_entry_ip, vcpu->_sp, vcpu_state().usr().get());
752 // local IRQs must be disabled because we dereference a Thread_ptr
753 if (EXPECT_FALSE(_exc_handler.is_kernel()))
756 if (!send_exception_arch(ts))
757 return 0; // do not send exception
759 unsigned char rights = 0;
760 Kobject_iface *pager = _exc_handler.ptr(space(), &rights);
762 if (EXPECT_FALSE(!pager))
764 /* no pager (anymore), just ignore the exception, return success */
765 LOG_TRACE("Exception invalid handler", "exc", this,
766 __fmt_exception_invalid_handler,
767 Log_exc_invalid *l = tbe->payload<Log_exc_invalid>();
768 l->cap_idx = _exc_handler.raw());
769 if (EXPECT_FALSE(space() == sigma0_task))
771 WARNX(Error, "Sigma0 raised an exception --> HALT\n");
775 pager = this; // block on ourselves
778 state_change(~Thread_cancel, Thread_in_exception);
780 return exception(pager, ts, rights);
785 Thread::try_transfer_local_id(L4_buf_iter::Item const *const buf,
786 L4_fpage sfp, Mword *rcv_word, Thread* snd,
789 if (buf->b.is_rcv_id())
791 if (snd->space() == rcv->space())
794 rcv_word[-1] = sfp.raw();
799 unsigned char rights = 0;
800 Obj_space::Capability cap = snd->space()->obj_space()->lookup(sfp.obj_index());
801 Kobject_iface *o = cap.obj();
802 rights = cap.rights();
803 if (EXPECT_TRUE(o && o->is_local(rcv->space())))
806 rcv_word[-1] = o->obj_id() | Mword(rights);
814 PRIVATE static inline
815 bool FIASCO_WARN_RESULT
816 Thread::copy_utcb_to_utcb(L4_msg_tag const &tag, Thread *snd, Thread *rcv,
817 unsigned char rights)
819 assert (cpu_lock.test());
821 Utcb *snd_utcb = snd->utcb().access();
822 Utcb *rcv_utcb = rcv->utcb().access();
823 Mword s = tag.words();
824 Mword r = Utcb::Max_words;
826 Mem::memcpy_mwords(rcv_utcb->values, snd_utcb->values, r < s ? r : s);
830 success = transfer_msg_items(tag, snd, snd_utcb, rcv, rcv_utcb, rights);
832 if (tag.transfer_fpu() && rcv_utcb->inherit_fpu() && (rights & L4_fpage::W))
833 snd->transfer_fpu(rcv);
839 PUBLIC inline NEEDS[Thread::copy_utcb_to_ts, Thread::copy_utcb_to_utcb,
840 Thread::copy_ts_to_utcb]
841 bool FIASCO_WARN_RESULT
842 Thread::copy_utcb_to(L4_msg_tag const &tag, Thread* receiver,
843 unsigned char rights)
845 // we cannot copy trap state to trap state!
846 assert_kdb (!this->_utcb_handler || !receiver->_utcb_handler);
847 if (EXPECT_FALSE(this->_utcb_handler != 0))
848 return copy_ts_to_utcb(tag, this, receiver, rights);
849 else if (EXPECT_FALSE(receiver->_utcb_handler != 0))
850 return copy_utcb_to_ts(tag, this, receiver, rights);
852 return copy_utcb_to_utcb(tag, this, receiver, rights);
857 Thread::transfer_msg_items(L4_msg_tag const &tag, Thread* snd, Utcb *snd_utcb,
858 Thread *rcv, Utcb *rcv_utcb,
859 unsigned char rights)
861 // LOG_MSG_3VAL(current(), "map bd=", rcv_utcb->buf_desc.raw(), 0, 0);
862 L4_buf_iter mem_buffer(rcv_utcb, rcv_utcb->buf_desc.mem());
863 L4_buf_iter io_buffer(rcv_utcb, rcv_utcb->buf_desc.io());
864 L4_buf_iter obj_buffer(rcv_utcb, rcv_utcb->buf_desc.obj());
865 L4_snd_item_iter snd_item(snd_utcb, tag.words());
866 register int items = tag.items();
867 Mword *rcv_word = rcv_utcb->values + tag.words();
869 // XXX: damn X-CPU state modification
870 // snd->prepare_long_ipc(rcv);
873 for (;items > 0 && snd_item.more();)
875 if (EXPECT_FALSE(!snd_item.next()))
877 snd->set_ipc_error(L4_error::Overflow, rcv);
881 L4_snd_item_iter::Item const *const item = snd_item.get();
883 if (item->b.is_void())
884 { // XXX: not sure if void fpages are needed
885 // skip send item and current rcv_buffer
890 L4_buf_iter *buf_iter = 0;
892 switch (item->b.type())
894 case L4_msg_item::Map:
895 switch (L4_fpage(item->d).type())
897 case L4_fpage::Memory: buf_iter = &mem_buffer; break;
898 case L4_fpage::Io: buf_iter = &io_buffer; break;
899 case L4_fpage::Obj: buf_iter = &obj_buffer; break;
907 if (EXPECT_FALSE(!buf_iter))
909 // LOG_MSG_3VAL(snd, "lIPCm0", 0, 0, 0);
910 snd->set_ipc_error(L4_error::Overflow, rcv);
914 L4_buf_iter::Item const *const buf = buf_iter->get();
916 if (EXPECT_FALSE(buf->b.is_void() || buf->b.type() != item->b.type()))
918 // LOG_MSG_3VAL(snd, "lIPCm1", buf->b.raw(), item->b.raw(), 0);
919 snd->set_ipc_error(L4_error::Overflow, rcv);
924 assert_kdb (item->b.type() == L4_msg_item::Map);
925 L4_fpage sfp(item->d);
926 *rcv_word = (item->b.raw() & ~0x0ff7) | (sfp.raw() & 0x0ff0);
930 if (!try_transfer_local_id(buf, sfp, rcv_word, snd, rcv))
932 // we need to do a real mapping¿
934 // diminish when sending via restricted ipc gates
935 if (sfp.type() == L4_fpage::Obj)
936 sfp.mask_rights(L4_fpage::Rights(rights | L4_fpage::RX));
938 L4_error err = fpage_map(snd->space(), sfp,
939 rcv->space(), L4_fpage(buf->d), item->b.raw(), &rl);
941 if (EXPECT_FALSE(!err.ok()))
943 snd->set_ipc_error(err, rcv);
951 if (!item->b.compund())
955 if (EXPECT_FALSE(items))
957 snd->set_ipc_error(L4_error::Overflow, rcv);
966 * \pre Runs on the sender CPU
970 Thread::abort_send(L4_error const &e, Thread *partner)
972 state_del_dirty(Thread_full_ipc_mask);
974 if (_timeout && _timeout->is_set())
978 Abort_state abt = Abt_ipc_done;
980 if (partner->cpu() == current_cpu())
982 if (in_sender_list())
984 sender_dequeue(partner->sender_list());
985 partner->vcpu_update_state();
986 abt = Abt_ipc_cancel;
989 else if (partner->in_ipc(this))
990 abt = Abt_ipc_in_progress;
993 abt = partner->Receiver::abort_send(this);
1000 case Abt_ipc_cancel:
1001 utcb().access()->error = e;
1003 case Abt_ipc_in_progress:
1004 state_add_dirty(Thread_ipc_transfer);
1005 while (state() & Thread_ipc_transfer)
1007 state_del_dirty(Thread_ready);
1017 * \pre Runs on the sender CPU
1021 Thread::do_send_wait(Thread *partner, L4_timeout snd_t)
1023 IPC_timeout timeout;
1025 if (EXPECT_FALSE(snd_t.is_finite()))
1027 Unsigned64 tval = snd_t.microsecs(Timer::system_clock(), utcb().access(true));
1028 // Zero timeout or timeout expired already -- give up
1030 return abort_send(L4_error::Timeout, partner);
1032 set_timeout(&timeout);
1033 timeout.set(tval, cpu());
1036 register Mword ipc_state;
1038 while (((ipc_state = state() & (Thread_send_wait | Thread_ipc_abort_mask))) == Thread_send_wait)
1040 state_del_dirty(Thread_ready);
1044 if (EXPECT_FALSE(ipc_state == (Thread_cancel | Thread_send_wait)))
1045 return abort_send(L4_error::Canceled, partner);
1047 if (EXPECT_FALSE(ipc_state == (Thread_timeout | Thread_send_wait)))
1048 return abort_send(L4_error::Timeout, partner);
1058 Thread::set_ipc_send_rights(unsigned char c)
1060 _ipc_send_rights = c;
1063 //---------------------------------------------------------------------
1064 IMPLEMENTATION [!mp]:
1066 PRIVATE inline NEEDS ["l4_types.h"]
1068 Thread::remote_handshake_receiver(L4_msg_tag const &, Thread *,
1069 bool, L4_timeout, Syscall_frame *, unsigned char)
1071 kdb_ke("Remote IPC in UP kernel");
1075 //---------------------------------------------------------------------
1078 struct Ipc_remote_request;
1080 struct Ipc_remote_request
1084 Syscall_frame *regs;
1085 unsigned char rights;
1092 struct Ready_queue_request
1098 enum Result { Done, Wrong_cpu, Not_existent };
1102 //---------------------------------------------------------------------
1103 IMPLEMENTATION [mp]:
1106 PRIVATE inline NOEXPORT
1108 Thread::remote_ipc_send(Context *src, Ipc_remote_request *rq)
1111 // LOG_MSG_3VAL(this, "rse", current_cpu(), (Mword)src, (Mword)this);
1113 LOG_MSG_3VAL(this, "rsend", (Mword)src, 0, 0);
1114 printf("CPU[%u]: remote IPC send ...\n"
1115 " partner=%p [%u]\n"
1116 " sender =%p [%u] regs=%p\n"
1119 rq->partner, rq->partner->cpu(),
1125 switch (__builtin_expect(rq->partner->check_sender(this, rq->timeout), Ok))
1128 rq->result = Failed;
1131 rq->result = Queued;
1137 // trigger remote_ipc_receiver_ready path, because we may need to grab locks
1138 // and this is forbidden in a DRQ handler. So transfer the IPC in usual
1139 // thread code. However, this induces a overhead of two extra IPIs.
1140 if (rq->tag.items())
1142 //LOG_MSG_3VAL(rq->partner, "pull", dbg_id(), 0, 0);
1143 rq->partner->state_change_dirty(~(Thread_ipc_mask | Thread_ready), Thread_ipc_transfer);
1147 rq->partner->vcpu_disable_irqs();
1148 bool success = transfer_msg(rq->tag, rq->partner, rq->regs, _ipc_send_rights);
1149 rq->result = success ? Done : Failed;
1151 rq->partner->state_change_dirty(~Thread_ipc_mask, Thread_ready);
1152 // hm, should be done by lazy queueing: rq->partner->ready_enqueue();
1158 Thread::handle_remote_ipc_send(Drq *src, Context *, void *_rq)
1160 Ipc_remote_request *rq = (Ipc_remote_request*)_rq;
1161 bool r = nonull_static_cast<Thread*>(src->context())->remote_ipc_send(src->context(), rq);
1162 //LOG_MSG_3VAL(src, "rse<", current_cpu(), (Mword)src, r);
1163 return r ? Drq::Need_resched : 0;
1167 * \pre Runs on the sender CPU
1169 PRIVATE //inline NEEDS ["mp_request.h"]
1171 Thread::remote_handshake_receiver(L4_msg_tag const &tag, Thread *partner,
1173 L4_timeout snd_t, Syscall_frame *regs,
1174 unsigned char rights)
1176 // Flag that there must be no switch in the receive path.
1177 // This flag also prevents the receive path from accessing
1178 // the thread state of a remote sender.
1179 Ipc_remote_request rq;
1181 rq.have_rcv = have_receive;
1182 rq.partner = partner;
1183 rq.timeout = !snd_t.is_zero();
1188 set_receiver(partner);
1190 state_add_dirty(Thread_send_wait);
1192 partner->drq(handle_remote_ipc_send, &rq,
1193 remote_prepare_receive);
1200 Thread::remote_prepare_receive(Drq *src, Context *, void *arg)
1202 Context *c = src->context();
1203 Ipc_remote_request *rq = (Ipc_remote_request*)arg;
1204 //printf("CPU[%2u:%p]: remote_prepare_receive (err=%x)\n", current_cpu(), c, rq->err.error());
1206 // No atomic switch to receive state if we are queued, or the IPC must be done by
1208 if (EXPECT_FALSE(rq->result == Queued || rq->result == Ok))
1211 c->state_del(Thread_ipc_mask);
1212 if (EXPECT_FALSE((rq->result & Failed) || !rq->have_rcv))
1215 c->state_add_dirty(Thread_receive_wait);
1219 //---------------------------------------------------------------------------
1220 IMPLEMENTATION [debug]:
1224 Thread::log_fmt_pf_invalid(Tb_entry *e, int max, char *buf)
1226 Log_pf_invalid *l = e->payload<Log_pf_invalid>();
1227 return snprintf(buf, max, "InvCap C:%lx pfa=%lx err=%lx", l->cap_idx, l->pfa, l->err);
1232 Thread::log_fmt_exc_invalid(Tb_entry *e, int max, char *buf)
1234 Log_exc_invalid *l = e->payload<Log_exc_invalid>();
1235 return snprintf(buf, max, "InvCap C:%lx", l->cap_idx);