3 #include "ipc_sender.h"
5 #include "kobject_helper.h"
6 #include "member_offs.h"
15 /** Hardware interrupts. This class encapsulates handware IRQs. Also,
16 it provides a registry that ensures that only one receiver can sign up
17 to receive interrupt IPC messages.
19 class Irq : public Irq_base, public Kobject
22 FIASCO_DECLARE_KOBJ();
25 typedef slab_cache_anon Allocator;
59 : public Kobject_h<Irq_sender, Irq>,
60 public Ipc_sender<Irq_sender>
63 Mword kobject_size() const { return sizeof(*this); }
66 Irq_sender(Irq_sender &);
70 Receiver *_irq_thread;
76 class Irq_muxer : public Kobject_h<Irq_muxer, Irq>
78 friend class Chain_irq_pin;
84 class Irq_debugger : public Irq_base
89 class Chain_irq_pin : public Sw_irq_pin
92 //-----------------------------------------------------------------------------
105 static unsigned irq_log_fmt(Tb_entry *, int, char *)
106 asm ("__irq_log_fmt");
111 //-----------------------------------------------------------------------------
116 #include "cpu_lock.h"
117 #include "entry_frame.h"
119 #include "ipc_sender.h"
121 #include "kmem_slab.h"
122 #include "lock_guard.h"
124 #include "receiver.h"
125 #include "std_macros.h"
126 #include "thread_object.h"
127 #include "thread_state.h"
128 #include "l4_buf_iter.h"
130 FIASCO_DEFINE_KOBJ(Irq);
133 static Irq_base *irq_base_dcast(Kobject_iface *o)
134 { return Kobject::dcast<Irq*>(o); }
139 { Irq_base::dcast = &irq_base_dcast; }
142 static Irq_base_cast register_irq_base_cast;
147 Irq::self(Irq_pin const *pin)
149 #define MYoffsetof(TYPE, MEMBER) (((size_t) &((TYPE *)10)->MEMBER) - 10)
150 return reinterpret_cast<Irq*>(reinterpret_cast<Mword>(pin)
151 - MYoffsetof(Irq, _pin));
157 PUBLIC inline explicit
158 Chain_irq_pin::Chain_irq_pin(Irq_muxer *i)
159 { payload()[0] = Mword(i); }
163 Chain_irq_pin::irq() const
164 { return (Irq_muxer*)payload()[0]; }
168 Chain_irq_pin::do_unmask()
172 old = irq()->_mask_cnt;
173 while (!mp_cas(&irq()->_mask_cnt, old, old - 1));
176 irq()->pin()->unmask();
182 Chain_irq_pin::do_mask()
186 old = irq()->_mask_cnt;
187 while (!mp_cas(&irq()->_mask_cnt, old, old + 1));
190 irq()->pin()->mask();
196 Chain_irq_pin::unbind_irq()
198 Irq_base *self = Irq::self(this);
200 for (n = irq(); n->_next && n->_next != self; n = n->_next)
203 assert (n->_next == self);
204 n->_next = n->_next->_next;
208 replace<Sw_irq_pin>();
214 Chain_irq_pin::do_mask_and_ack()
221 Irq::operator new (size_t, void *p)
226 Irq::operator delete (void *_l)
228 Irq *l = reinterpret_cast<Irq*>(_l);
230 l->_q->free(sizeof(Irq));
232 allocator()->free(l);
235 PUBLIC template<typename T> inline NEEDS[Irq::allocator, Irq::operator new]
238 Irq::allocate(Ram_quota *q)
240 void *nq =allocator()->q_alloc(q);
242 return new (nq) T(q);
247 static Kmem_slab _irq_allocator(max(sizeof (Irq_sender), sizeof(Irq_muxer)),
248 __alignof__ (Irq), "Irq");
253 { return &_irq_allocator; }
256 /** Bind a receiver to this device interrupt.
257 @param t the receiver that wants to receive IPC messages for this IRQ
258 @return true if the binding could be established
260 PUBLIC inline NEEDS ["atomic.h", "cpu_lock.h", "lock_guard.h"]
262 Irq_sender::alloc(Receiver *t)
264 bool ret = mp_cas(&_irq_thread, reinterpret_cast<Receiver*>(0), t);
268 if (EXPECT_TRUE(t != 0))
271 pin()->set_cpu(t->cpu());
282 Irq_sender::owner() const { return _irq_thread; }
284 /** Release an device interrupt.
285 @param t the receiver that ownes the IRQ
286 @return true if t really was the owner of the IRQ and operation was
291 Irq_sender::free(Receiver *t)
293 bool ret = mp_cas(&_irq_thread, t, reinterpret_cast<Receiver*>(0));
297 Lock_guard<Cpu_lock> guard(&cpu_lock);
300 if (EXPECT_TRUE(t != 0))
304 // release cpu-lock early, actually before delete
307 if (t->dec_ref() == 0)
316 PUBLIC explicit inline
317 Irq::Irq(Ram_quota *q = 0)
320 new (pin()) Sw_irq_pin();
326 Irq::destroy(Kobject ***rl)
329 Kobject::destroy(rl);
333 Irq_sender::Irq_sender(Ram_quota *q = 0)
334 : Kobject_h<Irq_sender, Irq>(q), _queued(0), _irq_thread(0), _irq_id(~0UL)
339 Irq_sender::destroy(Kobject ***rl)
341 Lock_guard<Cpu_lock> g(&cpu_lock);
351 { return pin()->payload()[0]; }
353 /** Consume one interrupt.
354 @return number of IRQs that are still pending.
356 PRIVATE inline NEEDS ["atomic.h"]
358 Irq_sender::consume()
366 while (!mp_cas (&_queued, old, old - 1));
380 * Predicate used to figure out if the sender shall be enqueued
381 * for sending a second message after sending the first.
383 PUBLIC inline NEEDS[Irq_sender::consume]
385 Irq_sender::requeue_sender()
386 { return consume() > 0; }
389 * Predicate used to figure out if the sender shall be deqeued after
390 * sending the request.
392 PUBLIC inline NEEDS[Irq_sender::consume]
394 Irq_sender::dequeue_sender()
395 { return consume() < 1; }
399 Irq_sender::transfer_msg(Receiver *recv)
401 Syscall_frame* dst_regs = recv->rcv_regs();
403 // set ipc return value: OK
404 dst_regs->tag(L4_msg_tag(0));
406 // set ipc source thread id
407 dst_regs->from(_irq_id);
413 Irq_sender::modify_label(Mword const *todo, int cnt)
415 for (int i = 0; i < cnt*4; i += 4)
417 Mword const test_mask = todo[i];
418 Mword const test = todo[i+1];
419 if ((_irq_id & test_mask) == test)
421 Mword const set_mask = todo[i+2];
422 Mword const set = todo[i+3];
424 _irq_id = (_irq_id & ~set_mask) | set;
433 Irq_sender::handle_remote_hit(Context::Drq *, Context *, void *arg)
435 Irq_sender *irq = (Irq_sender*)arg;
436 irq->pin()->set_cpu(current_cpu());
437 irq->send_msg(irq->_irq_thread);
438 return Context::Drq::No_answer;
445 assert (cpu_lock.test());
446 pin()->mask_and_ack();
448 if (EXPECT_FALSE (!Irq_base::_next))
452 for (Irq_base *n = Irq_base::_next; n;)
454 Irq *i = nonull_static_cast<Irq*>(n);
457 n = i->Irq_base::_next;
464 while (!mp_cas(&_mask_cnt, old, old + irqs));
467 for (Irq_base *n = Irq_base::_next; n;)
469 Irq *i = nonull_static_cast<Irq*>(n);
471 n = i->Irq_base::_next;
480 assert (cpu_lock.test());
481 pin()->mask_and_ack();
483 #if defined(CONFIG_KDB) || defined(CONFIG_JDB)
484 if (pin()->check_debug_irq())
495 // We're entered holding the kernel lock, which also means irqs are
496 // disabled on this CPU (XXX always correct?). We never enable irqs
497 // in this stack frame (except maybe in a nonnested invocation of
498 // switch_exec() -> switchin_context()) -- they will be re-enabled
499 // once we return from it (iret in entry.S:all_irqs) or we switch to
500 // a different thread.
502 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
504 assert (cpu_lock.test());
505 pin()->mask_and_ack();
507 if (EXPECT_FALSE (!_irq_thread))
509 else if (EXPECT_FALSE (_irq_thread == (void*)-1))
511 // debugger attached to IRQ
512 #if defined(CONFIG_KDB) || defined(CONFIG_JDB)
513 if (pin()->check_debug_irq())
524 while (!mp_cas(&_queued, old, old + 1));
526 if (EXPECT_TRUE (old == 0)) // increase hit counter
528 if (EXPECT_FALSE(_irq_thread->cpu() != current_cpu()))
529 _irq_thread->drq(&_drq, handle_remote_hit, this, 0,
530 Context::Drq::Target_ctxt, Context::Drq::No_wait);
532 send_msg(_irq_thread);
539 Irq_sender::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
542 L4_snd_item_iter snd_items(utcb, tag.words());
544 Receiver *thread = 0;
545 unsigned mode = utcb->values[0] >> 16;
547 if (tag.items() == 0)
550 if (mode & Set_irq_mode)
551 printf("DEPRECATED SET IRQ MODE\n");
552 //pin()->set_mode(mode);
558 return commit_result(0);
561 if (tag.items() && snd_items.next())
563 L4_fpage bind_thread(snd_items.get()->d);
564 if (EXPECT_FALSE(!bind_thread.is_objpage()))
565 return commit_error(utcb, L4_error::Overflow);
567 thread = Kobject::dcast<Thread_object*>(o_space->lookup_local(bind_thread.obj_index()));
571 thread = current_thread();
575 if (mode & Set_irq_mode)
576 printf("DEPRECATED SET IRQ MODE\n");
577 _irq_id = utcb->values[1];
578 return commit_result(0);
581 return commit_result(-L4_err::EInval);
585 Irq_muxer::Irq_muxer(Ram_quota *q = 0)
586 : Kobject_h<Irq_muxer, Irq>(q), _mask_cnt(0)
591 Irq_muxer::destroy(Kobject ***rl)
593 // FIXME: unchain IRQs
600 Irq_muxer::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
603 L4_snd_item_iter snd_items(utcb, tag.words());
606 unsigned mode = utcb->values[0] >> 16;
608 if (tag.items() == 0)
609 return commit_result(-L4_err::EInval);
611 if (tag.items() && snd_items.next())
613 L4_fpage bind_irq(snd_items.get()->d);
614 if (EXPECT_FALSE(!bind_irq.is_objpage()))
615 return commit_error(utcb, L4_error::Overflow);
617 irq = Kobject::dcast<Irq*>(o_space->lookup_local(bind_irq.obj_index()));
621 return commit_result(-L4_err::EInval);
623 if (mode & Set_irq_mode)
624 printf("DEPRECATED SET IRQ MODE\n");
625 //pin()->set_mode(mode);
627 irq->pin()->unbind_irq();
629 if (!irq->pin()->masked())
634 while (!mp_cas(&_mask_cnt, old, old + 1));
637 irq->pin()->replace<Chain_irq_pin>(this);
639 irq->Irq_base::_next = Irq_base::_next;
640 Irq_base::_next = irq;
642 return commit_result(0);
647 Irq_muxer::kinvoke(L4_obj_ref, Mword /*rights*/, Syscall_frame *f,
648 Utcb const *utcb, Utcb *)
650 register Context *const c_thread = ::current();
651 register Space *const c_space = c_thread->space();
652 register Obj_space *const o_space = c_space->obj_space();
654 L4_msg_tag tag = f->tag();
656 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
657 return commit_result(-L4_err::EBadproto);
659 if (EXPECT_FALSE(tag.words() < 1))
660 return commit_result(-L4_err::EInval);
662 switch ((utcb->values[0] & 0xffff))
665 return sys_attach(tag, utcb, f, o_space);
668 Irq::log_irq(this, 0);
671 return commit_result(-L4_err::EInval);
677 Irq_sender::kinvoke(L4_obj_ref, Mword /*rights*/, Syscall_frame *f,
678 Utcb const *utcb, Utcb *)
680 register Context *const c_thread = ::current();
681 register Space *const c_space = c_thread->space();
682 register Obj_space *const o_space = c_space->obj_space();
684 L4_msg_tag tag = f->tag();
686 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
687 return commit_result(-L4_err::EBadproto);
689 if (EXPECT_FALSE(tag.words() < 1))
690 return commit_result(-L4_err::EInval);
692 switch ((utcb->values[0] & 0xffff))
698 case Op_attach: /* ATTACH, DETACH */
699 return sys_attach(tag, utcb, f, o_space);
702 Irq::log_irq(this, 0);
705 return commit_result(-L4_err::EInval);
711 Irq_sender::obj_id() const
715 // --------------------------------------------------------------------------
716 IMPLEMENTATION [debug]:
722 Chain_irq_pin::pin_type() const
723 { return "CHAIN IRQ"; }
727 Irq::irq_log_fmt(Tb_entry *e, int maxlen, char *buf)
729 Irq_log *l = e->payload<Irq_log>();
730 return snprintf(buf, maxlen, "0x%x/%u D:%lx userip=%lx",
731 l->irq_number, l->irq_number,
732 l->irq_obj, l->user_ip);
737 Irq::log_irq(Irq *irq, int nr)
739 Context *c = current();
740 LOG_TRACE("IRQ-Object triggers", "irq", c, __irq_log_fmt,
741 Irq::Irq_log *l = tbe->payload<Irq::Irq_log>();
743 l->user_ip = c->regs()->ip();
744 l->irq_obj = irq ? irq->dbg_id() : ~0UL;
748 PUBLIC static inline NEEDS["config.h"]
750 Irq::log_timer_irq(int nr)
752 Context *c = current();
753 LOG_TRACE("Timer IRQs (kernel scheduling)", "timer", c, __irq_log_fmt,
754 Irq::Irq_log *l = tbe->payload<Irq::Irq_log>();
756 l->user_ip = c->regs()->ip(),
761 // --------------------------------------------------------------------------
762 IMPLEMENTATION [!debug]:
766 Irq::log_irq(Irq *, int)
771 Irq::log_timer_irq(int)