]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/libvcpu/include/vcpu.h
update
[l4.git] / l4 / pkg / libvcpu / include / vcpu.h
1 /*
2  * (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3  *     economic rights: Technische Universität Dresden (Germany)
4  *
5  * This file is part of TUD:OS and distributed under the terms of the
6  * GNU General Public License 2.
7  * Please see the COPYING-GPL-2 file for details.
8  *
9  * As a special exception, you may use this file as part of a free software
10  * library without restriction.  Specifically, if other files instantiate
11  * templates or use macros or inline functions from this file, or you compile
12  * this file and link it with other files to produce an executable, this
13  * file does not by itself cause the resulting executable to be covered by
14  * the GNU General Public License.  This exception does not however
15  * invalidate any other reasons why the executable file might be covered by
16  * the GNU General Public License.
17  */
18 #pragma once
19
20 #include <l4/sys/vcpu.h>
21 #include <l4/sys/utcb.h>
22
23 __BEGIN_DECLS
24
25 /**
26  * \defgroup api_libvcpu vCPU Support Library
27  * \brief vCPU handling functionality.
28  *
29  * This library provides convenience functionality on top of the l4sys vCPU
30  * interface to ease programming. It wraps commonly used code and abstracts
31  * architecture depends parts as far as reasonable.
32  */
33
34 /**
35  * \defgroup api_libvcpu_ext Extended vCPU support
36  * \ingroup api_libvcpu
37  * \brief extended vCPU handling functionality.
38  */
39
40 /**
41  * \brief IRQ/Event enable and disable flags.
42  * \ingroup api_libvcpu
43  */
44 typedef enum l4vcpu_irq_state_t
45 {
46   L4VCPU_IRQ_STATE_DISABLED = 0,             ///< IRQ/Event delivery disabled
47   L4VCPU_IRQ_STATE_ENABLED  = L4_VCPU_F_IRQ, ///< IRQ/Event delivery enabled
48 } l4vcpu_irq_state_t;
49
50 typedef l4_umword_t l4vcpu_state_t;
51 typedef void (*l4vcpu_event_hndl_t)(l4_vcpu_state_t *vcpu);
52 typedef void (*l4vcpu_setup_ipc_t)(l4_utcb_t *utcb);
53
54 /**
55  * \brief Return the state flags of a vCPU.
56  * \ingroup api_libvcpu
57  *
58  * \param vcpu  Pointer to vCPU area.
59  */
60 L4_CV L4_INLINE
61 l4vcpu_state_t
62 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
63
64 /**
65  * \brief Disable a vCPU for event delivery.
66  * \ingroup api_libvcpu
67  *
68  * \param vcpu  Pointer to vCPU area.
69  */
70 L4_CV L4_INLINE
71 void
72 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW;
73
74 /**
75  * \brief Disable a vCPU for event delivery and return previous state.
76  * \ingroup api_libvcpu
77  *
78  * \param vcpu  Pointer to vCPU area.
79  *
80  * \return IRQ state before disabling IRQs.
81  */
82 L4_CV L4_INLINE
83 l4vcpu_irq_state_t
84 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW;
85
86 /**
87  * \brief Enable a vCPU for event delivery.
88  * \ingroup api_libvcpu
89  *
90  * \param vcpu             Pointer to vCPU area.
91  * \param utcb             Utcb pointer of the calling vCPU.
92  * \param do_event_work_cb Call-back function that is called in case an
93  *                         event (such as an interrupt) is pending.
94  * \param setup_ipc        Function call-back that is called right before
95  *                         any IPC operation, and before event delivery is
96  *                         enabled.
97  */
98 L4_CV L4_INLINE
99 void
100 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
101                   l4vcpu_event_hndl_t do_event_work_cb,
102                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
103
104 /**
105  * \brief Restore a previously saved IRQ/event state.
106  * \ingroup api_libvcpu
107  *
108  * \param vcpu             Pointer to vCPU area.
109  * \param s                IRQ state to be restored.
110  * \param utcb             Utcb pointer of the calling vCPU.
111  * \param do_event_work_cb Call-back function that is called in case an
112  *                         event (such as an interrupt) is pending after
113  *                         enabling.
114  * \param setup_ipc        Function call-back that is called right before
115  *                         any IPC operation, and before event delivery is
116  *                         enabled.
117  */
118 L4_CV L4_INLINE
119 void
120 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
121                    l4_utcb_t *utcb,
122                    l4vcpu_event_hndl_t do_event_work_cb,
123                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
124
125 /**
126  * \internal
127  * \ingroup api_libvcpu
128  *
129  * \param vcpu             Pointer to vCPU area.
130  * \param utcb             Utcb pointer of the calling vCPU.
131  * \param to               Timeout to do IPC operation with.
132  * \param do_event_work_cb Call-back function that is called in case an
133  *                         event (such as an interrupt) is pending after
134  *                         enabling.
135  * \param setup_ipc        Function call-back that is called right before
136  *                         any IPC operation.
137  */
138 L4_CV L4_INLINE
139 void
140 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
141             l4_timeout_t to,
142             l4vcpu_event_hndl_t do_event_work_cb,
143             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
144
145 /**
146  * \brief Wait for event.
147  * \ingroup api_libvcpu
148  *
149  * \param vcpu             Pointer to vCPU area.
150  * \param utcb             Utcb pointer of the calling vCPU.
151  * \param do_event_work_cb Call-back function that is called when the vCPU
152  *                         awakes and needs to handle an event/IRQ.
153  * \param setup_ipc        Function call-back that is called right before
154  *                         any IPC operation.
155  *
156  * Note that event delivery remains disabled after this function returns.
157  */
158 L4_CV L4_INLINE
159 void
160 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
161                       l4vcpu_event_hndl_t do_event_work_cb,
162                       l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
163
164
165 /**
166  * \brief Print the state of a vCPU.
167  * \ingroup api_libvcpu
168  *
169  * \param vcpu   Pointer to vCPU area.
170  * \param prefix A prefix for each line printed.
171  */
172 L4_CV void
173 l4vcpu_print_state(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
174
175 /**
176  * \internal
177  */
178 L4_CV void
179 l4vcpu_print_state_arch(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
180
181
182 /**
183  * \brief Return whether the entry reason was an IRQ/IPC message.
184  * \ingroup api_libvcpu
185  *
186  * \param vcpu Pointer to vCPU area.
187  *
188  * return 0 if not, !=0 otherwise.
189  */
190 L4_CV L4_INLINE
191 int
192 l4vcpu_is_irq_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
193
194 /**
195  * \brief Return whether the entry reason was a page fault.
196  * \ingroup api_libvcpu
197  *
198  * \param vcpu Pointer to vCPU area.
199  *
200  * return 0 if not, !=0 otherwise.
201  */
202 L4_CV L4_INLINE
203 int
204 l4vcpu_is_page_fault_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
205
206 /**
207  * \brief Allocate state area for an extended vCPU.
208  * \ingroup api_libvcpu_ext
209  *
210  * \retval vcpu      Allocated vcpu-state area.
211  * \retval ext_state Allocated extended vcpu-state area.
212  * \param  task      Task to use for allocation.
213  * \param  regmgr    Region manager to use for allocation.
214  *
215  * \return 0 for success, error code otherwise
216  */
217 L4_CV int
218 l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
219                  l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW;
220
221 /* ===================================================================== */
222 /* Implementations */
223
224 #include <l4/sys/ipc.h>
225 #include <l4/vcpu/vcpu_arch.h>
226
227 L4_CV L4_INLINE
228 l4vcpu_state_t
229 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW
230 {
231   return vcpu->state;
232 }
233
234 L4_CV L4_INLINE
235 void
236 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW
237 {
238   vcpu->state &= ~L4_VCPU_F_IRQ;
239   l4_barrier();
240 }
241
242 L4_CV L4_INLINE
243 l4vcpu_irq_state_t
244 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW
245 {
246   l4vcpu_irq_state_t s = (l4vcpu_irq_state_t)l4vcpu_state(vcpu);
247   l4vcpu_irq_disable(vcpu);
248   return s;
249 }
250
251 L4_CV L4_INLINE
252 void
253 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
254             l4_timeout_t to,
255             l4vcpu_event_hndl_t do_event_work_cb,
256             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
257 {
258   l4vcpu_irq_disable(vcpu);
259   setup_ipc(utcb);
260   vcpu->i.tag = l4_ipc_wait(utcb, &vcpu->i.label, to);
261   if (EXPECT_TRUE(!l4_msgtag_has_error(vcpu->i.tag)))
262     do_event_work_cb(vcpu);
263 }
264
265 L4_CV L4_INLINE
266 void
267 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
268                   l4vcpu_event_hndl_t do_event_work_cb,
269                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
270 {
271   if (!(vcpu->state & L4_VCPU_F_IRQ))
272     {
273       setup_ipc(utcb);
274       l4_barrier();
275     }
276
277   while (1)
278     {
279       vcpu->state |= L4_VCPU_F_IRQ;
280       l4_barrier();
281
282       if (EXPECT_TRUE(!(vcpu->sticky_flags & L4_VCPU_SF_IRQ_PENDING)))
283         break;
284
285       l4vcpu_wait(vcpu, utcb, L4_IPC_BOTH_TIMEOUT_0,
286                   do_event_work_cb, setup_ipc);
287     }
288 }
289
290 L4_CV L4_INLINE
291 void
292 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
293                    l4_utcb_t *utcb,
294                    l4vcpu_event_hndl_t do_event_work_cb,
295                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
296 {
297   if (s & L4_VCPU_F_IRQ)
298     l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
299 }
300
301 L4_CV L4_INLINE
302 void
303 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
304                       l4vcpu_event_hndl_t do_event_work_cb,
305                       l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
306 {
307   l4vcpu_wait(vcpu, utcb, L4_IPC_NEVER, do_event_work_cb, setup_ipc);
308 }
309
310 __END_DECLS