3 #include "ipc_sender.h"
5 #include "kobject_helper.h"
6 #include "member_offs.h"
14 /** Hardware interrupts. This class encapsulates handware IRQs. Also,
15 it provides a registry that ensures that only one receiver can sign up
16 to receive interrupt IPC messages.
18 class Irq : public Irq_base, public Kobject
21 FIASCO_DECLARE_KOBJ();
24 typedef Slab_cache Allocator;
46 * IRQ Kobject to send IPC messages to a receiving thread.
49 : public Kobject_h<Irq_sender, Irq>,
50 public Ipc_sender<Irq_sender>
53 Mword kobject_size() const { return sizeof(*this); }
56 Irq_sender(Irq_sender &);
68 * IRQ Kobject to broadcast IRQs to multiple other IRQ objects.
70 * This is useful for PCI shared IRQs.
72 class Irq_muxer : public Kobject_h<Irq_muxer, Irq>, private Irq_chip
75 int set_mode(Mword, Irq_chip::Mode) { return 0; }
76 bool is_edge_triggered(Mword) const { return false; }
77 void switch_mode(bool)
79 // the irq object is assumed to be always handled as
83 void set_cpu(Mword, Cpu_number)
85 // don't know what to do here, may be multiple targets on different
91 char const *chip_type() const { return "Bcast"; }
97 //-----------------------------------------------------------------------------
100 #include "assert_opt.h"
103 #include "cpu_lock.h"
104 #include "entry_frame.h"
106 #include "ipc_sender.h"
108 #include "kmem_slab.h"
109 #include "lock_guard.h"
111 #include "std_macros.h"
112 #include "thread_object.h"
113 #include "thread_state.h"
114 #include "l4_buf_iter.h"
117 FIASCO_DEFINE_KOBJ(Irq);
120 static Irq_base *irq_base_dcast(Kobject_iface *o)
121 { return Kobject::dcast<Irq*>(o); }
126 { Irq_base::dcast = &irq_base_dcast; }
129 static Irq_base_cast register_irq_base_cast;
135 Irq_muxer::unmask(Mword)
140 while (!mp_cas(&_mask_cnt, old, old - 1));
149 Irq_muxer::mask(Mword)
154 while (!mp_cas(&_mask_cnt, old, old + 1));
163 Irq_muxer::unbind(Irq_base *irq)
166 for (n = this; n->_next && n->_next != irq; n = n->_next)
169 assert (n->_next == irq);
170 n->_next = n->_next->_next;
172 static_cast<Irq_chip&>(*this).unmask(0);
174 Irq_chip::unbind(irq);
180 Irq_muxer::mask_and_ack(Mword)
185 Irq_muxer::handle(Upstream_irq const *ui)
187 assert (cpu_lock.test());
188 Irq_base::mask_and_ack();
191 if (EXPECT_FALSE (!Irq_base::_next))
195 for (Irq_base *n = Irq_base::_next; n;)
199 n = n->Irq_base::_next;
206 while (!mp_cas(&_mask_cnt, old, old + irqs));
209 for (Irq_base *n = Irq_base::_next; n;)
211 Irq *i = nonull_static_cast<Irq*>(n);
213 n = i->Irq_base::_next;
218 Irq_muxer::Irq_muxer(Ram_quota *q = 0)
219 : Kobject_h<Irq_muxer, Irq>(q), _mask_cnt(0)
221 hit_func = &handler_wrapper<Irq_muxer>;
226 Irq_muxer::destroy(Kobject ***rl)
228 // FIXME: unchain IRQs
235 Irq_muxer::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
238 L4_snd_item_iter snd_items(utcb, tag.words());
242 if (tag.items() == 0)
243 return commit_result(-L4_err::EInval);
245 if (tag.items() && snd_items.next())
247 L4_fpage bind_irq(snd_items.get()->d);
248 if (EXPECT_FALSE(!bind_irq.is_objpage()))
249 return commit_error(utcb, L4_error::Overflow);
251 irq = Kobject::dcast<Irq*>(o_space->lookup_local(bind_irq.obj_index()));
255 return commit_result(-L4_err::EInval);
264 while (!mp_cas(&_mask_cnt, old, old + 1));
269 irq->Irq_base::_next = Irq_base::_next;
270 Irq_base::_next = irq;
272 return commit_result(0);
277 Irq_muxer::kinvoke(L4_obj_ref, L4_fpage::Rights /*rights*/, Syscall_frame *f,
278 Utcb const *utcb, Utcb *)
280 register Context *const c_thread = ::current();
281 assert_opt (c_thread);
282 register Space *const c_space = c_thread->space();
283 assert_opt (c_space);
285 L4_msg_tag tag = f->tag();
287 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
288 return commit_result(-L4_err::EBadproto);
290 if (EXPECT_FALSE(tag.words() < 1))
291 return commit_result(-L4_err::EInval);
293 switch ((utcb->values[0] & 0xffff))
296 return sys_attach(tag, utcb, f, c_space);
302 return commit_result(-L4_err::EInval);
308 /** Bind a receiver to this device interrupt.
309 @param t the receiver that wants to receive IPC messages for this IRQ
310 @return true if the binding could be established
312 PUBLIC inline NEEDS ["atomic.h", "cpu_lock.h", "lock_guard.h"]
314 Irq_sender::alloc(Thread *t)
316 bool ret = mp_cas(&_irq_thread, reinterpret_cast<Thread*>(0), t);
320 if (EXPECT_TRUE(t != 0))
323 if (Cpu::online(t->home_cpu()))
324 _chip->set_cpu(pin(), t->home_cpu());
335 Irq_sender::owner() const { return _irq_thread; }
337 /** Release an device interrupt.
338 @param t the receiver that ownes the IRQ
339 @return true if t really was the owner of the IRQ and operation was
344 Irq_sender::free(Thread *t, Kobject ***rl)
346 bool ret = mp_cas(&_irq_thread, t, reinterpret_cast<Thread*>(0));
350 auto guard = lock_guard(cpu_lock);
353 if (EXPECT_TRUE(t != 0))
355 t->Receiver::abort_send(this);
357 // release cpu-lock early, actually before delete
360 if (t->dec_ref() == 0)
361 t->initiate_deletion(rl);
369 Irq_sender::Irq_sender(Ram_quota *q = 0)
370 : Kobject_h<Irq_sender, Irq>(q), _queued(0), _irq_thread(0), _irq_id(~0UL)
372 hit_func = &hit_level_irq;
377 Irq_sender::switch_mode(bool is_edge_triggered)
379 hit_func = is_edge_triggered ? &hit_edge_irq : &hit_level_irq;
384 Irq_sender::destroy(Kobject ***rl)
386 auto g = lock_guard(cpu_lock);
387 auto t = access_once(&_irq_thread);
395 /** Consume one interrupt.
396 @return number of IRQs that are still pending.
398 PRIVATE inline NEEDS ["atomic.h"]
400 Irq_sender::consume()
408 while (!mp_cas (&_queued, old, old - 1));
410 if (old == 2 && hit_func == &hit_edge_irq)
425 * Predicate used to figure out if the sender shall be enqueued
426 * for sending a second message after sending the first.
428 PUBLIC inline NEEDS[Irq_sender::consume]
430 Irq_sender::requeue_sender()
431 { return consume() > 0; }
434 * Predicate used to figure out if the sender shall be deqeued after
435 * sending the request.
437 PUBLIC inline NEEDS[Irq_sender::consume]
439 Irq_sender::dequeue_sender()
440 { return consume() < 1; }
444 Irq_sender::transfer_msg(Receiver *recv)
446 Syscall_frame* dst_regs = recv->rcv_regs();
448 // set ipc return value: OK
449 dst_regs->tag(L4_msg_tag(0));
451 // set ipc source thread id
452 dst_regs->from(_irq_id);
458 Irq_sender::modify_label(Mword const *todo, int cnt)
460 for (int i = 0; i < cnt*4; i += 4)
462 Mword const test_mask = todo[i];
463 Mword const test = todo[i+1];
464 if ((_irq_id & test_mask) == test)
466 Mword const set_mask = todo[i+2];
467 Mword const set = todo[i+3];
469 _irq_id = (_irq_id & ~set_mask) | set;
478 Irq_sender::handle_remote_hit(Context::Drq *, Context *, void *arg)
480 Irq_sender *irq = (Irq_sender*)arg;
481 irq->set_cpu(current_cpu());
482 irq->send_msg(irq->_irq_thread);
483 return Context::Drq::no_answer();
493 while (!mp_cas(&_queued, old, old + 1));
500 Irq_sender::count_and_send(Smword queued)
502 if (EXPECT_TRUE (queued == 0) && EXPECT_TRUE(_irq_thread != 0)) // increase hit counter
504 if (EXPECT_FALSE(_irq_thread->home_cpu() != current_cpu()))
505 _irq_thread->drq(&_drq, handle_remote_hit, this, 0,
506 Context::Drq::Target_ctxt, Context::Drq::No_wait);
508 send_msg(_irq_thread);
513 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
515 Irq_sender::_hit_level_irq(Upstream_irq const *ui)
517 // We're entered holding the kernel lock, which also means irqs are
518 // disabled on this CPU (XXX always correct?). We never enable irqs
519 // in this stack frame (except maybe in a nonnested invocation of
520 // switch_exec() -> switchin_context()) -- they will be re-enabled
521 // once we return from it (iret in entry.S:all_irqs) or we switch to
522 // a different thread.
524 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
526 assert (cpu_lock.test());
529 count_and_send(queue());
534 Irq_sender::hit_level_irq(Irq_base *i, Upstream_irq const *ui)
535 { nonull_static_cast<Irq_sender*>(i)->_hit_level_irq(ui); }
537 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
539 Irq_sender::_hit_edge_irq(Upstream_irq const *ui)
541 // We're entered holding the kernel lock, which also means irqs are
542 // disabled on this CPU (XXX always correct?). We never enable irqs
543 // in this stack frame (except maybe in a nonnested invocation of
544 // switch_exec() -> switchin_context()) -- they will be re-enabled
545 // once we return from it (iret in entry.S:all_irqs) or we switch to
546 // a different thread.
548 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
550 assert (cpu_lock.test());
553 // if we get a second edge triggered IRQ before the first is
554 // handled we can mask the IRQ. The consume function will
555 // unmask the IRQ when the last IRQ is dequeued.
567 Irq_sender::hit_edge_irq(Irq_base *i, Upstream_irq const *ui)
568 { nonull_static_cast<Irq_sender*>(i)->_hit_edge_irq(ui); }
573 Irq_sender::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
576 L4_snd_item_iter snd_items(utcb, tag.words());
580 if (tag.items() == 0)
584 free(_irq_thread, rl.list());
589 return commit_result(0);
592 if (tag.items() && snd_items.next())
594 L4_fpage bind_thread(snd_items.get()->d);
595 if (EXPECT_FALSE(!bind_thread.is_objpage()))
596 return commit_error(utcb, L4_error::Overflow);
598 thread = Kobject::dcast<Thread_object*>(o_space->lookup_local(bind_thread.obj_index()));
602 thread = current_thread();
606 _irq_id = utcb->values[1];
607 return commit_result(0);
610 return commit_result(-L4_err::EInval);
616 Irq_sender::kinvoke(L4_obj_ref, L4_fpage::Rights /*rights*/, Syscall_frame *f,
617 Utcb const *utcb, Utcb *)
619 register Context *const c_thread = ::current();
620 assert_opt (c_thread);
621 register Space *const c_space = c_thread->space();
622 assert_opt (c_space);
624 L4_msg_tag tag = f->tag();
626 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
627 return commit_result(-L4_err::EBadproto);
629 if (EXPECT_FALSE(tag.words() < 1))
630 return commit_result(-L4_err::EInval);
632 switch ((utcb->values[0] & 0xffff))
640 case Op_attach: /* ATTACH, DETACH */
641 return sys_attach(tag, utcb, f, c_space);
647 return commit_result(-L4_err::EInval);
653 Irq_sender::obj_id() const
658 // Irq implementation
660 static Kmem_slab _irq_allocator(max(sizeof (Irq_sender), sizeof(Irq_muxer)),
661 __alignof__ (Irq), "Irq");
666 { return &_irq_allocator; }
670 Irq::operator new (size_t, void *p)
675 Irq::operator delete (void *_l)
677 Irq *l = reinterpret_cast<Irq*>(_l);
679 l->_q->free(sizeof(Irq));
681 allocator()->free(l);
684 PUBLIC template<typename T> inline NEEDS[Irq::allocator, Irq::operator new]
687 Irq::allocate(Ram_quota *q)
689 void *nq =allocator()->q_alloc(q);
691 return new (nq) T(q);
697 PUBLIC explicit inline
698 Irq::Irq(Ram_quota *q = 0) : _q(q) {}
702 Irq::destroy(Kobject ***rl)
705 Kobject::destroy(rl);