]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/libvcpu/include/vcpu.h
update: sync
[l4.git] / l4 / pkg / libvcpu / include / vcpu.h
1 /*
2  * (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3  *     economic rights: Technische Universität Dresden (Germany)
4  *
5  * This file is part of TUD:OS and distributed under the terms of the
6  * GNU General Public License 2.
7  * Please see the COPYING-GPL-2 file for details.
8  *
9  * As a special exception, you may use this file as part of a free software
10  * library without restriction.  Specifically, if other files instantiate
11  * templates or use macros or inline functions from this file, or you compile
12  * this file and link it with other files to produce an executable, this
13  * file does not by itself cause the resulting executable to be covered by
14  * the GNU General Public License.  This exception does not however
15  * invalidate any other reasons why the executable file might be covered by
16  * the GNU General Public License.
17  */
18 #pragma once
19
20 #include <l4/sys/vcpu.h>
21 #include <l4/sys/utcb.h>
22
23 __BEGIN_DECLS
24
25 /**
26  * \defgroup api_libvcpu vCPU Support Library
27  * \brief vCPU handling functionality.
28  *
29  * This library provides convenience functionality on top of the l4sys vCPU
30  * interface to ease programming. It wraps commonly used code and abstracts
31  * architecture depends parts as far as reasonable.
32  */
33
34 /**
35  * \defgroup api_libvcpu_ext Extended vCPU support
36  * \ingroup api_libvcpu
37  * \brief extended vCPU handling functionality.
38  */
39
40 /**
41  * \brief IRQ/Event enable and disable flags.
42  * \ingroup api_libvcpu
43  */
44 typedef enum l4vcpu_irq_state_t
45 {
46   L4VCPU_IRQ_STATE_DISABLED = 0,             ///< IRQ/Event delivery disabled
47   L4VCPU_IRQ_STATE_ENABLED  = L4_VCPU_F_IRQ, ///< IRQ/Event delivery enabled
48 } l4vcpu_irq_state_t;
49
50 typedef l4_umword_t l4vcpu_state_t;
51 typedef void (*l4vcpu_event_hndl_t)(l4_vcpu_state_t *vcpu);
52 typedef void (*l4vcpu_setup_ipc_t)(l4_utcb_t *utcb);
53
54 /**
55  * \brief Return the state flags of a vCPU.
56  * \ingroup api_libvcpu
57  *
58  * \param vcpu  Pointer to vCPU area.
59  */
60 L4_CV L4_INLINE
61 l4vcpu_state_t
62 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
63
64 /**
65  * \brief Disable a vCPU for event delivery.
66  * \ingroup api_libvcpu
67  *
68  * \param vcpu  Pointer to vCPU area.
69  */
70 L4_CV L4_INLINE
71 void
72 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW;
73
74 /**
75  * \brief Disable a vCPU for event delivery and return previous state.
76  * \ingroup api_libvcpu
77  *
78  * \param vcpu  Pointer to vCPU area.
79  *
80  * \return IRQ state before disabling IRQs.
81  */
82 L4_CV L4_INLINE
83 l4vcpu_irq_state_t
84 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW;
85
86 /**
87  * \brief Enable a vCPU for event delivery.
88  * \ingroup api_libvcpu
89  *
90  * \param vcpu             Pointer to vCPU area.
91  * \param utcb             Utcb pointer of the calling vCPU.
92  * \param do_event_work_cb Call-back function that is called in case an
93  *                         event (such as an interrupt) is pending.
94  * \param setup_ipc        Function call-back that is called right before
95  *                         any IPC operation.
96  */
97 L4_CV L4_INLINE
98 void
99 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
100                   l4vcpu_event_hndl_t do_event_work_cb,
101                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
102
103 /**
104  * \brief Restore a previously saved IRQ/event state.
105  * \ingroup api_libvcpu
106  *
107  * \param vcpu             Pointer to vCPU area.
108  * \param s                IRQ state to be restored.
109  * \param utcb             Utcb pointer of the calling vCPU.
110  * \param do_event_work_cb Call-back function that is called in case an
111  *                         event (such as an interrupt) is pending after
112  *                         enabling.
113  * \param setup_ipc        Function call-back that is called right before
114  *                         any IPC operation.
115  */
116 L4_CV L4_INLINE
117 void
118 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
119                    l4_utcb_t *utcb,
120                    l4vcpu_event_hndl_t do_event_work_cb,
121                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
122
123 /**
124  * \internal
125  * \ingroup api_libvcpu
126  *
127  * \param vcpu             Pointer to vCPU area.
128  * \param utcb             Utcb pointer of the calling vCPU.
129  * \param to               Timeout to do IPC operation with.
130  * \param do_event_work_cb Call-back function that is called in case an
131  *                         event (such as an interrupt) is pending after
132  *                         enabling.
133  * \param setup_ipc        Function call-back that is called right before
134  *                         any IPC operation.
135  */
136 L4_CV L4_INLINE
137 void
138 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
139             l4_timeout_t to,
140             l4vcpu_event_hndl_t do_event_work_cb,
141             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
142
143 /**
144  * \brief Halt the vCPU (sleep).
145  * \ingroup api_libvcpu
146  *
147  * \param vcpu             Pointer to vCPU area.
148  * \param utcb             Utcb pointer of the calling vCPU.
149  * \param do_event_work_cb Call-back function that is called when the vCPU
150  *                         awakes and needs to handle an event/IRQ.
151  * \param setup_ipc        Function call-back that is called right before
152  *                         any IPC operation.
153  */
154 L4_CV L4_INLINE
155 void
156 l4vcpu_halt(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
157             l4vcpu_event_hndl_t do_event_work_cb,
158             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
159
160
161 /**
162  * \brief Print the state of a vCPU.
163  * \ingroup api_libvcpu
164  *
165  * \param vcpu   Pointer to vCPU area.
166  * \param prefix A prefix for each line printed.
167  */
168 L4_CV void
169 l4vcpu_print_state(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
170
171 /**
172  * \internal
173  */
174 L4_CV void
175 l4vcpu_print_state_arch(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
176
177
178 /**
179  * \brief Return whether the entry reason was an IRQ/IPC message.
180  * \ingroup api_libvcpu
181  *
182  * \param vcpu Pointer to vCPU area.
183  *
184  * return 0 if not, !=0 otherwise.
185  */
186 L4_CV L4_INLINE
187 int
188 l4vcpu_is_irq_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
189
190 /**
191  * \brief Return whether the entry reason was a page fault.
192  * \ingroup api_libvcpu
193  *
194  * \param vcpu Pointer to vCPU area.
195  *
196  * return 0 if not, !=0 otherwise.
197  */
198 L4_CV L4_INLINE
199 int
200 l4vcpu_is_page_fault_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
201
202 /**
203  * \brief Allocate state area for an extended vCPU.
204  * \ingroup api_libvcpu_ext
205  *
206  * \retval vcpu      Allocated vcpu-state area.
207  * \retval ext_state Allocated extended vcpu-state area.
208  * \param  task      Task to use for allocation.
209  * \param  regmgr    Region manager to use for allocation.
210  *
211  * \return 0 for success, error code otherwise
212  */
213 L4_CV int
214 l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
215                  l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW;
216
217 /* ===================================================================== */
218 /* Implementations */
219
220 #include <l4/sys/ipc.h>
221 #include <l4/vcpu/vcpu_arch.h>
222
223 L4_CV L4_INLINE
224 l4vcpu_state_t
225 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW
226 {
227   return vcpu->state;
228 }
229
230 L4_CV L4_INLINE
231 void
232 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW
233 {
234   vcpu->state &= ~L4_VCPU_F_IRQ;
235   l4_barrier();
236 }
237
238 L4_CV L4_INLINE
239 l4vcpu_irq_state_t
240 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW
241 {
242   l4vcpu_irq_state_t s = (l4vcpu_irq_state_t)l4vcpu_state(vcpu);
243   l4vcpu_irq_disable(vcpu);
244   return s;
245 }
246
247 L4_CV L4_INLINE
248 void
249 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
250             l4_timeout_t to,
251             l4vcpu_event_hndl_t do_event_work_cb,
252             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
253 {
254   l4vcpu_irq_disable(vcpu);
255   setup_ipc(utcb);
256   vcpu->i.tag = l4_ipc_wait(utcb, &vcpu->i.label, to);
257   if (EXPECT_TRUE(!l4_msgtag_has_error(vcpu->i.tag)))
258     do_event_work_cb(vcpu);
259 }
260
261 L4_CV L4_INLINE
262 void
263 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
264                   l4vcpu_event_hndl_t do_event_work_cb,
265                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
266 {
267   while (1)
268     {
269       vcpu->state |= L4_VCPU_F_IRQ;
270       l4_barrier();
271
272       if (EXPECT_TRUE(!(vcpu->sticky_flags & L4_VCPU_SF_IRQ_PENDING)))
273         break;
274
275       l4vcpu_wait(vcpu, utcb, L4_IPC_BOTH_TIMEOUT_0,
276                   do_event_work_cb, setup_ipc);
277     }
278 }
279
280 L4_CV L4_INLINE
281 void
282 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
283                    l4_utcb_t *utcb,
284                    l4vcpu_event_hndl_t do_event_work_cb,
285                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
286 {
287   if (s & L4_VCPU_F_IRQ)
288     l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
289 }
290
291 L4_CV L4_INLINE
292 void
293 l4vcpu_halt(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
294             l4vcpu_event_hndl_t do_event_work_cb,
295             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
296 {
297   l4vcpu_wait(vcpu, utcb, L4_IPC_NEVER, do_event_work_cb, setup_ipc);
298   l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
299 }
300
301 __END_DECLS