3 #include "ipc_sender.h"
5 #include "kobject_helper.h"
6 #include "member_offs.h"
14 /** Hardware interrupts. This class encapsulates handware IRQs. Also,
15 it provides a registry that ensures that only one receiver can sign up
16 to receive interrupt IPC messages.
18 class Irq : public Irq_base, public Kobject
21 FIASCO_DECLARE_KOBJ();
24 typedef Slab_cache Allocator;
46 * IRQ Kobject to send IPC messages to a receiving thread.
49 : public Kobject_h<Irq_sender, Irq>,
50 public Ipc_sender<Irq_sender>
53 Mword kobject_size() const { return sizeof(*this); }
56 Irq_sender(Irq_sender &);
60 Receiver *_irq_thread;
68 * IRQ Kobject to broadcast IRQs to multiple other IRQ objects.
70 * This is useful for PCI shared IRQs.
72 class Irq_muxer : public Kobject_h<Irq_muxer, Irq>, private Irq_chip
75 unsigned set_mode(Mword, unsigned mode) { return mode; }
76 void switch_mode(unsigned)
78 // the irq object is assumed to be always handled as
82 void set_cpu(Mword, unsigned)
84 // don't know what to do here, may be multiple targets on different
90 char const *chip_type() const { return "Bcast"; }
96 //-----------------------------------------------------------------------------
99 #include "assert_opt.h"
102 #include "cpu_lock.h"
103 #include "entry_frame.h"
105 #include "ipc_sender.h"
107 #include "kmem_slab.h"
108 #include "lock_guard.h"
110 #include "receiver.h"
111 #include "std_macros.h"
112 #include "thread_object.h"
113 #include "thread_state.h"
114 #include "l4_buf_iter.h"
117 FIASCO_DEFINE_KOBJ(Irq);
120 static Irq_base *irq_base_dcast(Kobject_iface *o)
121 { return Kobject::dcast<Irq*>(o); }
126 { Irq_base::dcast = &irq_base_dcast; }
129 static Irq_base_cast register_irq_base_cast;
135 Irq_muxer::unmask(Mword)
140 while (!mp_cas(&_mask_cnt, old, old - 1));
149 Irq_muxer::mask(Mword)
154 while (!mp_cas(&_mask_cnt, old, old + 1));
163 Irq_muxer::unbind(Irq_base *irq)
166 for (n = this; n->_next && n->_next != irq; n = n->_next)
169 assert (n->_next == irq);
170 n->_next = n->_next->_next;
172 static_cast<Irq_chip&>(*this).unmask(0);
174 Irq_chip::unbind(irq);
180 Irq_muxer::mask_and_ack(Mword)
185 Irq_muxer::handle(Upstream_irq const *ui)
187 assert (cpu_lock.test());
188 Irq_base::mask_and_ack();
191 if (EXPECT_FALSE (!Irq_base::_next))
195 for (Irq_base *n = Irq_base::_next; n;)
199 n = n->Irq_base::_next;
206 while (!mp_cas(&_mask_cnt, old, old + irqs));
209 for (Irq_base *n = Irq_base::_next; n;)
211 Irq *i = nonull_static_cast<Irq*>(n);
213 n = i->Irq_base::_next;
218 Irq_muxer::Irq_muxer(Ram_quota *q = 0)
219 : Kobject_h<Irq_muxer, Irq>(q), _mask_cnt(0)
221 hit_func = &handler_wrapper<Irq_muxer>;
226 Irq_muxer::destroy(Kobject ***rl)
228 // FIXME: unchain IRQs
235 Irq_muxer::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
238 L4_snd_item_iter snd_items(utcb, tag.words());
241 unsigned mode = utcb->values[0] >> 16;
243 if (tag.items() == 0)
244 return commit_result(-L4_err::EInval);
246 if (tag.items() && snd_items.next())
248 L4_fpage bind_irq(snd_items.get()->d);
249 if (EXPECT_FALSE(!bind_irq.is_objpage()))
250 return commit_error(utcb, L4_error::Overflow);
252 irq = Kobject::dcast<Irq*>(o_space->lookup_local(bind_irq.obj_index()));
256 return commit_result(-L4_err::EInval);
258 if (mode & Set_irq_mode)
259 printf("DEPRECATED SET IRQ MODE\n");
260 //pin()->set_mode(mode);
269 while (!mp_cas(&_mask_cnt, old, old + 1));
274 irq->Irq_base::_next = Irq_base::_next;
275 Irq_base::_next = irq;
277 return commit_result(0);
282 Irq_muxer::kinvoke(L4_obj_ref, Mword /*rights*/, Syscall_frame *f,
283 Utcb const *utcb, Utcb *)
285 register Context *const c_thread = ::current();
286 assert_opt (c_thread);
287 register Space *const c_space = c_thread->space();
288 assert_opt (c_space);
290 L4_msg_tag tag = f->tag();
292 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
293 return commit_result(-L4_err::EBadproto);
295 if (EXPECT_FALSE(tag.words() < 1))
296 return commit_result(-L4_err::EInval);
298 switch ((utcb->values[0] & 0xffff))
301 return sys_attach(tag, utcb, f, c_space);
307 return commit_result(-L4_err::EInval);
313 /** Bind a receiver to this device interrupt.
314 @param t the receiver that wants to receive IPC messages for this IRQ
315 @return true if the binding could be established
317 PUBLIC inline NEEDS ["atomic.h", "cpu_lock.h", "lock_guard.h"]
319 Irq_sender::alloc(Receiver *t)
321 bool ret = mp_cas(&_irq_thread, reinterpret_cast<Receiver*>(0), t);
325 if (EXPECT_TRUE(t != 0))
328 _chip->set_cpu(pin(), t->cpu());
339 Irq_sender::owner() const { return _irq_thread; }
341 /** Release an device interrupt.
342 @param t the receiver that ownes the IRQ
343 @return true if t really was the owner of the IRQ and operation was
348 Irq_sender::free(Receiver *t)
350 bool ret = mp_cas(&_irq_thread, t, reinterpret_cast<Receiver*>(0));
354 auto guard = lock_guard(cpu_lock);
357 if (EXPECT_TRUE(t != 0))
361 // release cpu-lock early, actually before delete
364 if (t->dec_ref() == 0)
373 Irq_sender::Irq_sender(Ram_quota *q = 0)
374 : Kobject_h<Irq_sender, Irq>(q), _queued(0), _irq_thread(0), _irq_id(~0UL)
376 hit_func = &hit_level_irq;
381 Irq_sender::switch_mode(unsigned mode)
383 if ((mode & Trigger_mask) == Trigger_edge)
384 hit_func = &hit_edge_irq;
386 hit_func = &hit_level_irq;
391 Irq_sender::destroy(Kobject ***rl)
393 auto g = lock_guard(cpu_lock);
401 /** Consume one interrupt.
402 @return number of IRQs that are still pending.
404 PRIVATE inline NEEDS ["atomic.h"]
406 Irq_sender::consume()
414 while (!mp_cas (&_queued, old, old - 1));
416 if (old == 2 && hit_func == &hit_edge_irq)
431 * Predicate used to figure out if the sender shall be enqueued
432 * for sending a second message after sending the first.
434 PUBLIC inline NEEDS[Irq_sender::consume]
436 Irq_sender::requeue_sender()
437 { return consume() > 0; }
440 * Predicate used to figure out if the sender shall be deqeued after
441 * sending the request.
443 PUBLIC inline NEEDS[Irq_sender::consume]
445 Irq_sender::dequeue_sender()
446 { return consume() < 1; }
450 Irq_sender::transfer_msg(Receiver *recv)
452 Syscall_frame* dst_regs = recv->rcv_regs();
454 // set ipc return value: OK
455 dst_regs->tag(L4_msg_tag(0));
457 // set ipc source thread id
458 dst_regs->from(_irq_id);
464 Irq_sender::modify_label(Mword const *todo, int cnt)
466 for (int i = 0; i < cnt*4; i += 4)
468 Mword const test_mask = todo[i];
469 Mword const test = todo[i+1];
470 if ((_irq_id & test_mask) == test)
472 Mword const set_mask = todo[i+2];
473 Mword const set = todo[i+3];
475 _irq_id = (_irq_id & ~set_mask) | set;
484 Irq_sender::handle_remote_hit(Context::Drq *, Context *, void *arg)
486 Irq_sender *irq = (Irq_sender*)arg;
487 irq->set_cpu(current_cpu());
488 irq->send_msg(irq->_irq_thread);
489 return Context::Drq::No_answer;
499 while (!mp_cas(&_queued, old, old + 1));
506 Irq_sender::count_and_send(Smword queued)
508 if (EXPECT_TRUE (queued == 0) && EXPECT_TRUE(_irq_thread != 0)) // increase hit counter
510 if (EXPECT_FALSE(_irq_thread->cpu() != current_cpu()))
511 _irq_thread->drq(&_drq, handle_remote_hit, this, 0,
512 Context::Drq::Target_ctxt, Context::Drq::No_wait);
514 send_msg(_irq_thread);
519 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
521 Irq_sender::_hit_level_irq(Upstream_irq const *ui)
523 // We're entered holding the kernel lock, which also means irqs are
524 // disabled on this CPU (XXX always correct?). We never enable irqs
525 // in this stack frame (except maybe in a nonnested invocation of
526 // switch_exec() -> switchin_context()) -- they will be re-enabled
527 // once we return from it (iret in entry.S:all_irqs) or we switch to
528 // a different thread.
530 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
532 assert (cpu_lock.test());
535 count_and_send(queue());
540 Irq_sender::hit_level_irq(Irq_base *i, Upstream_irq const *ui)
541 { nonull_static_cast<Irq_sender*>(i)->_hit_level_irq(ui); }
543 PUBLIC inline NEEDS[Irq_sender::count_and_send, Irq_sender::queue]
545 Irq_sender::_hit_edge_irq(Upstream_irq const *ui)
547 // We're entered holding the kernel lock, which also means irqs are
548 // disabled on this CPU (XXX always correct?). We never enable irqs
549 // in this stack frame (except maybe in a nonnested invocation of
550 // switch_exec() -> switchin_context()) -- they will be re-enabled
551 // once we return from it (iret in entry.S:all_irqs) or we switch to
552 // a different thread.
554 // LOG_MSG_3VAL(current(), "IRQ", dbg_id(), 0, _queued);
556 assert (cpu_lock.test());
559 // if we get a second edge triggered IRQ before the first is
560 // handled we can mask the IRQ. The consume function will
561 // unmask the IRQ when the last IRQ is dequeued.
573 Irq_sender::hit_edge_irq(Irq_base *i, Upstream_irq const *ui)
574 { nonull_static_cast<Irq_sender*>(i)->_hit_edge_irq(ui); }
579 Irq_sender::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/,
582 L4_snd_item_iter snd_items(utcb, tag.words());
584 Receiver *thread = 0;
585 unsigned mode = utcb->values[0] >> 16;
587 if (tag.items() == 0)
590 if (mode & Set_irq_mode)
591 printf("DEPRECATED SET IRQ MODE\n");
592 //pin()->set_mode(mode);
598 return commit_result(0);
601 if (tag.items() && snd_items.next())
603 L4_fpage bind_thread(snd_items.get()->d);
604 if (EXPECT_FALSE(!bind_thread.is_objpage()))
605 return commit_error(utcb, L4_error::Overflow);
607 thread = Kobject::dcast<Thread_object*>(o_space->lookup_local(bind_thread.obj_index()));
611 thread = current_thread();
615 if (mode & Set_irq_mode)
616 printf("DEPRECATED SET IRQ MODE\n");
617 _irq_id = utcb->values[1];
618 return commit_result(0);
621 return commit_result(-L4_err::EInval);
627 Irq_sender::kinvoke(L4_obj_ref, Mword /*rights*/, Syscall_frame *f,
628 Utcb const *utcb, Utcb *)
630 register Context *const c_thread = ::current();
631 assert_opt (c_thread);
632 register Space *const c_space = c_thread->space();
633 assert_opt (c_space);
635 L4_msg_tag tag = f->tag();
637 if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq))
638 return commit_result(-L4_err::EBadproto);
640 if (EXPECT_FALSE(tag.words() < 1))
641 return commit_result(-L4_err::EInval);
643 switch ((utcb->values[0] & 0xffff))
651 case Op_attach: /* ATTACH, DETACH */
652 return sys_attach(tag, utcb, f, c_space);
658 return commit_result(-L4_err::EInval);
664 Irq_sender::obj_id() const
669 // Irq implementation
671 static Kmem_slab _irq_allocator(max(sizeof (Irq_sender), sizeof(Irq_muxer)),
672 __alignof__ (Irq), "Irq");
677 { return &_irq_allocator; }
681 Irq::operator new (size_t, void *p)
686 Irq::operator delete (void *_l)
688 Irq *l = reinterpret_cast<Irq*>(_l);
690 l->_q->free(sizeof(Irq));
692 allocator()->free(l);
695 PUBLIC template<typename T> inline NEEDS[Irq::allocator, Irq::operator new]
698 Irq::allocate(Ram_quota *q)
700 void *nq =allocator()->q_alloc(q);
702 return new (nq) T(q);
708 PUBLIC explicit inline
709 Irq::Irq(Ram_quota *q = 0) : _q(q) {}
713 Irq::destroy(Kobject ***rl)
716 Kobject::destroy(rl);