3 #include "ipc_sender.h"
5 #include "kobject_helper.h"
6 #include "member_offs.h"
14 /** Hardware interrupts. This class encapsulates handware IRQs. Also,
15 it provides a registry that ensures that only one receiver can sign up
16 to receive interrupt IPC messages.
18 class Irq : public Irq_base, public cxx::Dyn_castable<Irq, Kobject>
21 typedef Slab_cache Allocator;
26 Op_eoi_1 = 0, // Irq_sender + Irq_semaphore
28 Op_trigger = 2, // Irq_sender + Irq_mux + Irq_semaphore
30 Op_eoi_2 = 4, // Icu + Irq_sender + Irq_semaphore
41 * IRQ Kobject to send IPC messages to a receiving thread.
44 : public Kobject_h<Irq_sender, Irq>,
45 public Ipc_sender<Irq_sender>
63 * IRQ Kobject to broadcast IRQs to multiple other IRQ objects.
65 * This is useful for PCI shared IRQs.
67 class Irq_muxer : public Kobject_h<Irq_muxer, Irq>, private Irq_chip
74 int set_mode(Mword, Irq_chip::Mode) { return 0; }
75 bool is_edge_triggered(Mword) const { return false; }
76 void switch_mode(bool)
78 // the irq object is assumed to be always handled as
82 void set_cpu(Mword, Cpu_number)
84 // don't know what to do here, may be multiple targets on different
90 char const *chip_type() const { return "Bcast"; }
94 Spin_lock<> _mux_lock;
97 //-----------------------------------------------------------------------------
100 #include "assert_opt.h"
103 #include "cpu_lock.h"
104 #include "entry_frame.h"
106 #include "ipc_sender.h"
107 #include "kmem_slab.h"
108 #include "lock_guard.h"
110 #include "std_macros.h"
111 #include "thread_object.h"
112 #include "thread_state.h"
113 #include "l4_buf_iter.h"
117 static Irq_base *irq_base_dcast(Kobject_iface *o)
118 { return cxx::dyn_cast<Irq*>(o); }
123 { Irq_base::dcast = &irq_base_dcast; }
126 static Irq_base_cast register_irq_base_cast;
131 Irq::dispatch_irq_proto(Unsigned16 op, bool may_unmask)
139 return L4_msg_tag(L4_msg_tag::Schedule); // no reply
144 return L4_msg_tag(L4_msg_tag::Schedule); // no reply
147 return commit_result(-L4_err::ENosys);
153 Irq_muxer::unmask(Mword)
158 while (!mp_cas(&_mask_cnt, old, old - 1));
167 Irq_muxer::mask(Mword)
172 while (!mp_cas(&_mask_cnt, old, old + 1));
181 Irq_muxer::unbind(Irq_base *irq)
185 auto g = lock_guard(_mux_lock);
186 for (n = this; n->_next && n->_next != irq; n = n->_next)
190 return; // someone else was faster
193 n->_next = n->_next->_next;
197 static_cast<Irq_chip&>(*this).unmask(0);
199 Irq_chip::unbind(irq);
205 Irq_muxer::mask_and_ack(Mword)
210 Irq_muxer::handle(Upstream_irq const *ui)
212 assert (cpu_lock.test());
213 Irq_base::mask_and_ack();
216 if (EXPECT_FALSE (!Irq_base::_next))
220 for (Irq_base *n = Irq_base::_next; n;)
224 n = n->Irq_base::_next;
231 while (!mp_cas(&_mask_cnt, old, old + irqs));
234 for (Irq_base *n = Irq_base::_next; n;)
236 Irq *i = nonull_static_cast<Irq*>(n);
238 n = i->Irq_base::_next;
243 Irq_muxer::Irq_muxer(Ram_quota *q = 0)
244 : Kobject_h<Irq_muxer, Irq>(q), _mask_cnt(0),
245 _mux_lock(Spin_lock<>::Unlocked)
247 hit_func = &handler_wrapper<Irq_muxer>;
252 Irq_muxer::destroy(Kobject ***rl)
254 while (Irq_base *n = Irq_base::_next)
256 auto g = lock_guard(n->irq_lock());
257 if (n->chip() == this)
266 Irq_muxer::sys_attach(L4_msg_tag tag, Utcb const *utcb, Syscall_frame *)
269 Irq *irq = Ko::deref<Irq>(&tag, utcb, &rights);
273 auto g = lock_guard(irq->irq_lock());
281 while (!mp_cas(&_mask_cnt, old, old + 1));
286 auto mg = lock_guard(_mux_lock);
287 irq->Irq_base::_next = Irq_base::_next;
288 Irq_base::_next = irq;
290 return commit_result(0);
295 Irq_muxer::kinvoke(L4_obj_ref, L4_fpage::Rights /*rights*/, Syscall_frame *f,
296 Utcb const *utcb, Utcb *)
298 L4_msg_tag tag = f->tag();
300 if (EXPECT_FALSE(tag.words() < 1))
301 return commit_result(-L4_err::EInval);
303 Unsigned16 op = access_once(utcb->values + 0) & 0xffff;
307 case L4_msg_tag::Label_irq:
308 // start BACKWARD COMPAT
311 case Op_compat_chain:
312 printf("KERNEL: backward compat IRQ-MUX chain, recompile your user code");
313 return sys_attach(tag, utcb, f);
317 // end BACKWARD COMPAT
318 return dispatch_irq_proto(op, false);
320 case L4_msg_tag::Label_irq_mux:
324 return sys_attach(tag, utcb, f);
327 return commit_result(-L4_err::ENosys);
331 return commit_result(-L4_err::EBadproto);
335 /** Bind a receiver to this device interrupt.
336 @param t the receiver that wants to receive IPC messages for this IRQ
337 @return true if the binding could be established
339 PUBLIC inline NEEDS ["atomic.h", "cpu_lock.h", "lock_guard.h"]
341 Irq_sender::alloc(Thread *t)
343 bool ret = mp_cas(&_irq_thread, reinterpret_cast<Thread*>(0), t);
347 if (EXPECT_TRUE(t != 0))
350 if (Cpu::online(t->home_cpu()))
351 _chip->set_cpu(pin(), t->home_cpu());
362 Irq_sender::owner() const { return _irq_thread; }
364 /** Release an device interrupt.
365 @param t the receiver that ownes the IRQ
366 @return true if t really was the owner of the IRQ and operation was
371 Irq_sender::free(Thread *t, Kobject ***rl)
373 bool ret = mp_cas(&_irq_thread, t, reinterpret_cast<Thread*>(0));
377 auto guard = lock_guard(cpu_lock);
380 if (EXPECT_TRUE(t != 0))
382 t->Receiver::abort_send(this);
384 // release cpu-lock early, actually before delete
395 Irq_sender::Irq_sender(Ram_quota *q = 0)
396 : Kobject_h<Irq_sender, Irq>(q), _queued(0), _irq_thread(0), _irq_id(~0UL)
398 hit_func = &hit_level_irq;
403 Irq_sender::switch_mode(bool is_edge_triggered)
405 hit_func = is_edge_triggered ? &hit_edge_irq : &hit_level_irq;
410 Irq_sender::destroy(Kobject ***rl)
412 auto g = lock_guard(cpu_lock);
413 auto t = access_once(&_irq_thread);
421 /** Consume one interrupt.
422 @return number of IRQs that are still pending.
424 PRIVATE inline NEEDS ["atomic.h"]
426 Irq_sender::consume()
434 while (!mp_cas (&_queued, old, old - 1));
436 if (old == 2 && hit_func == &hit_edge_irq)
451 * Predicate used to figure out if the sender shall be enqueued
452 * for sending a second message after sending the first.
454 PUBLIC inline NEEDS[Irq_sender::consume]
456 Irq_sender::requeue_sender()
457 { return consume() > 0; }
460 * Predicate used to figure out if the sender shall be deqeued after
461 * sending the request.
463 PUBLIC inline NEEDS[Irq_sender::consume]
465 Irq_sender::dequeue_sender()
466 { return consume() < 1; }
470 Irq_sender::transfer_msg(Receiver *recv)
472 Syscall_frame* dst_regs = recv->rcv_regs();
474 // set ipc return value: OK
475 dst_regs->tag(L4_msg_tag(0));
477 // set ipc source thread id
478 dst_regs->from(_irq_id);
484 Irq_sender::modify_label(Mword const *todo, int cnt)
486 for (int i = 0; i < cnt*4; i += 4)
488 Mword const test_mask = todo[i];
489 Mword const test = todo[i+1];
490 if ((_irq_id & test_mask) == test)
492 Mword const set_mask = todo[i+2];
493 Mword const set = todo[i+3];
495 _irq_id = (_irq_id & ~set_mask) | set;
504 Irq_sender::handle_remote_hit(Context::Drq *, Context *, void *arg)
506 Irq_sender *irq = (Irq_sender*)arg;
507 irq->set_cpu(current_cpu());
508 if (EXPECT_TRUE(irq->send_msg(irq->_irq_thread, false)))
509 return Context::Drq::no_answer_resched();
510 return Context::Drq::no_answer();
520 while (!mp_cas(&_queued, old, old + 1));
527 Irq_sender::count_and_send(Smword queued)
529 if (EXPECT_TRUE (queued == 0) && EXPECT_TRUE(_irq_thread != 0)) // increase hit counter
531 if (EXPECT_FALSE(_irq_thread->home_cpu() != current_cpu()))
532 _irq_thread->drq(&_drq, handle_remote_hit, this,
533 Context::Drq::Target_ctxt, Context::Drq::No_wait);
535 send_msg(_irq_thread, true);
540 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
542 Irq_sender::_hit_level_irq(Upstream_irq const *ui)
544 // We're entered holding the kernel lock, which also means irqs are
545 // disabled on this CPU (XXX always correct?). We never enable irqs
546 // in this stack frame (except maybe in a nonnested invocation of
547 // switch_exec() -> switchin_context()) -- they will be re-enabled
548 // once we return from it (iret in entry.S:all_irqs) or we switch to
549 // a different thread.
551 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
553 assert (cpu_lock.test());
556 count_and_send(queue());
561 Irq_sender::hit_level_irq(Irq_base *i, Upstream_irq const *ui)
562 { nonull_static_cast<Irq_sender*>(i)->_hit_level_irq(ui); }
564 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
566 Irq_sender::_hit_edge_irq(Upstream_irq const *ui)
568 // We're entered holding the kernel lock, which also means irqs are
569 // disabled on this CPU (XXX always correct?). We never enable irqs
570 // in this stack frame (except maybe in a nonnested invocation of
571 // switch_exec() -> switchin_context()) -- they will be re-enabled
572 // once we return from it (iret in entry.S:all_irqs) or we switch to
573 // a different thread.
575 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
577 assert (cpu_lock.test());
580 // if we get a second edge triggered IRQ before the first is
581 // handled we can mask the IRQ. The consume function will
582 // unmask the IRQ when the last IRQ is dequeued.
594 Irq_sender::hit_edge_irq(Irq_base *i, Upstream_irq const *ui)
595 { nonull_static_cast<Irq_sender*>(i)->_hit_edge_irq(ui); }
600 Irq_sender::sys_attach(L4_msg_tag tag, Utcb const *utcb,
608 thread = Ko::deref<Thread>(&tag, utcb, &rights);
613 thread = current_thread();
617 _irq_id = utcb->values[1];
618 return commit_result(0);
621 return commit_result(-L4_err::EInval);
626 Irq_sender::sys_detach()
629 free(_irq_thread, rl.list());
634 return commit_result(0);
640 Irq_sender::kinvoke(L4_obj_ref, L4_fpage::Rights /*rights*/, Syscall_frame *f,
641 Utcb const *utcb, Utcb *)
643 L4_msg_tag tag = f->tag();
645 if (EXPECT_FALSE(tag.words() < 1))
646 return commit_result(-L4_err::EInval);
648 Unsigned16 op = access_once(utcb->values + 0);
652 case L4_msg_tag::Label_irq:
653 // start BACKWARD COMPAT
656 case Op_compat_attach:
657 printf("KERNEL: backward compat IRQ attach, recompile your user code\n");
658 return sys_attach(tag, utcb, f);
659 case Op_compat_detach:
660 printf("KERNEL: backward compat IRQ detach, recompile your user code\n");
665 // end BACKWARD COMPAT
666 return dispatch_irq_proto(op, _queued < 1);
668 case L4_msg_tag::Label_irq_sender:
672 return sys_attach(tag, utcb, f);
678 return commit_result(-L4_err::ENosys);
681 return commit_result(-L4_err::EBadproto);
687 Irq_sender::obj_id() const
692 // Irq implementation
694 static Kmem_slab _irq_allocator(max(sizeof (Irq_sender), sizeof(Irq_muxer)),
695 __alignof__ (Irq), "Irq");
700 { return &_irq_allocator; }
704 Irq::operator new (size_t, void *p)
709 Irq::operator delete (void *_l)
711 Irq *l = reinterpret_cast<Irq*>(_l);
713 l->_q->free(sizeof(Irq));
715 allocator()->free(l);
718 PUBLIC template<typename T> inline NEEDS[Irq::allocator, Irq::operator new]
721 Irq::allocate(Ram_quota *q)
723 void *nq =allocator()->q_alloc(q);
725 return new (nq) T(q);
731 PUBLIC explicit inline
732 Irq::Irq(Ram_quota *q = 0) : _q(q) {}
736 Irq::destroy(Kobject ***rl)
739 Kobject::destroy(rl);
743 static Kobject_iface * FIASCO_FLATTEN
744 irq_sender_factory(Ram_quota *q, Space *,
745 L4_msg_tag, Utcb const *,
748 *err = L4_err::ENomem;
749 return Irq::allocate<Irq_sender>(q);
752 static Kobject_iface * FIASCO_FLATTEN
753 irq_mux_factory(Ram_quota *q, Space *,
754 L4_msg_tag, Utcb const *,
757 *err = L4_err::ENomem;
758 return Irq::allocate<Irq_muxer>(q);
761 static inline void __attribute__((constructor)) FIASCO_INIT
764 Kobject_iface::set_factory(L4_msg_tag::Label_irq_sender, irq_sender_factory);
765 Kobject_iface::set_factory(L4_msg_tag::Label_irq_mux, irq_mux_factory);