]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/ipc_sender.cpp
b3ea520e10741be4226b7f53438b47050e600ba8
[l4.git] / kernel / fiasco / src / kern / ipc_sender.cpp
1 INTERFACE:
2
3 #include "sender.h"
4 #include "receiver.h"
5
6 class Ipc_sender_base : public Sender
7 {
8 };
9
10 template< typename Derived >
11 class Ipc_sender : public Ipc_sender_base
12 {
13 private:
14   Derived *derived() { return static_cast<Derived*>(this); }
15   static bool dequeue_sender() { return true; }
16   static bool requeue_sender() { return false; }
17 };
18
19 extern "C" void fast_ret_from_irq(void);
20
21 IMPLEMENTATION:
22
23 #include "config.h"
24 #include "entry_frame.h"
25 #include "globals.h"
26 #include "kdb_ke.h"
27 #include "thread_state.h"
28 #include <cassert>
29
30 PUBLIC
31 virtual void
32 Ipc_sender_base::ipc_receiver_aborted()
33 {
34   assert (receiver());
35
36   sender_dequeue(receiver()->sender_list());
37   receiver()->vcpu_update_state();
38   set_receiver(0);
39 }
40
41 /** Sender-activation function called when receiver gets ready.
42     Irq::hit() actually ensures that this method is always called
43     when an interrupt occurs, even when the receiver was already
44     waiting.
45  */
46 PUBLIC template< typename Derived >
47 virtual bool
48 Ipc_sender<Derived>::ipc_receiver_ready(Receiver *recv)
49 {
50   // we are running with ints off
51   assert_kdb(current()->state() & Thread_ready);
52   assert_kdb(current() == recv);
53
54   if(!recv->sender_ok(this))
55     return false;
56
57   recv->vcpu_disable_irqs();
58
59   recv->ipc_init(this);
60
61   derived()->transfer_msg(recv);
62
63   recv->state_change(~(Thread_receiving
64                        | Thread_transfer_in_progress
65                        | Thread_ipc_in_progress),
66                      Thread_ready);
67
68   if (derived()->dequeue_sender())    // last interrupt in queue?
69     {
70       sender_dequeue(recv->sender_list());
71       recv->vcpu_update_state();
72     }
73
74   // else remain queued if more interrupts are left
75   return true;
76 }
77
78 PROTECTED inline NEEDS["config.h", "globals.h", "thread_state.h"]
79 bool
80 Ipc_sender_base::handle_shortcut(Syscall_frame *dst_regs,
81                                  Receiver *receiver)
82 {
83   if (EXPECT_TRUE
84       ((current() != receiver
85         && receiver->sched()->deblock(current_cpu(), current()->sched(), true)
86         // avoid race in do_ipc() after Thread_send_in_progress
87         // flag was deleted from receiver's thread state
88         // also: no shortcut for alien threads, they need to see the
89         // after-syscall exception
90         && !(receiver->state()
91           & (Thread_ready_mask | Thread_delayed_deadline | Thread_alien))
92         && !current()->schedule_in_progress()))) // no schedule in progress
93     {
94       // we don't need to manipulate the state in a safe way
95       // because we are still running with interrupts turned off
96       receiver->state_add_dirty(Thread_ready);
97
98       if (!Config::Irq_shortcut)
99         {
100           // no shortcut: switch to the interrupt thread which will
101           // calls Irq::ipc_receiver_ready
102           current()->switch_to_locked(receiver);
103           return true;
104         }
105
106       // The following shortcut optimization does not work if PROFILE
107       // is defined because fast_ret_from_irq does not handle the
108       // different implementation of the kernel lock in profiling mode
109
110       // At this point we are sure that the connected interrupt
111       // thread is waiting for the next interrupt and that its 
112       // thread priority is higher than the current one. So we
113       // choose a short cut: Instead of doing the full ipc handshake
114       // we simply build up the return stack frame and go out as 
115       // quick as possible.
116       //
117       // XXX We must own the kernel lock for this optimization!
118       //
119
120       Mword *esp = reinterpret_cast<Mword*>(dst_regs);
121
122       // set return address of irq_thread
123       *--esp = reinterpret_cast<Mword>(fast_ret_from_irq);
124
125       // XXX set stack pointer of irq_thread
126       receiver->set_kernel_sp(esp);
127
128       // directly switch to the interrupt thread context and go out
129       // fast using fast_ret_from_irq (implemented in assembler).
130       // kernel-unlock is done in switch_exec() (on switchee's side).
131
132       // no shortcut if profiling: switch to the interrupt thread
133       current()->switch_to_locked (receiver);
134       return true;
135     }
136   return false;
137 }
138
139
140 PROTECTED template< typename Derived >
141 inline  NEEDS["config.h","globals.h", "thread_state.h",
142               Ipc_sender_base::handle_shortcut]
143 void
144 Ipc_sender<Derived>::send_msg(Receiver *receiver)
145 {
146   set_receiver(receiver);
147
148   if (!Config::Irq_shortcut)
149     {
150       // in profile mode, don't optimize
151       // in non-profile mode, enqueue _after_ shortcut if still necessary
152       sender_enqueue(receiver->sender_list(), 255);
153       receiver->vcpu_set_irq_pending();
154     }
155
156   // if the thread is waiting for this interrupt, make it ready;
157   // this will cause it to run irq->receiver_ready(), which
158   // handles the rest
159
160   // XXX careful!  This code may run in midst of an do_ipc()
161   // operation (or similar)!
162   if (Receiver::Rcv_state s = receiver->sender_ok(this))
163     {
164       Syscall_frame *dst_regs = derived()->transfer_msg(receiver);
165
166       if (derived()->requeue_sender())
167         {
168           sender_enqueue(receiver->sender_list(), 255);
169           receiver->vcpu_set_irq_pending();
170         }
171
172       // ipc completed
173       receiver->state_change_dirty(~Thread_ipc_mask, 0);
174
175       // in case a timeout was set
176       receiver->reset_timeout();
177
178       if (s == Receiver::Rs_ipc_receive)
179         {
180           if (handle_shortcut(dst_regs, receiver))
181             return;
182         }
183       // we don't need to manipulate the state in a safe way
184       // because we are still running with interrupts turned off
185       receiver->state_add_dirty(Thread_ready);
186       receiver->sched()->deblock(receiver->cpu());
187       return;
188     }
189
190   if (Config::Irq_shortcut)
191     {
192       // in profile mode, don't optimize
193       // in non-profile mode, enqueue after shortcut if still necessary
194       sender_enqueue(receiver->sender_list(), 255);
195       receiver->vcpu_set_irq_pending();
196     }
197 }
198