]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/libvcpu/include/vcpu.h
Update
[l4.git] / l4 / pkg / libvcpu / include / vcpu.h
1 /*
2  * (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3  *     economic rights: Technische Universität Dresden (Germany)
4  *
5  * This file is part of TUD:OS and distributed under the terms of the
6  * GNU General Public License 2.
7  * Please see the COPYING-GPL-2 file for details.
8  *
9  * As a special exception, you may use this file as part of a free software
10  * library without restriction.  Specifically, if other files instantiate
11  * templates or use macros or inline functions from this file, or you compile
12  * this file and link it with other files to produce an executable, this
13  * file does not by itself cause the resulting executable to be covered by
14  * the GNU General Public License.  This exception does not however
15  * invalidate any other reasons why the executable file might be covered by
16  * the GNU General Public License.
17  */
18 #pragma once
19
20 #include <l4/sys/vcpu.h>
21 #include <l4/sys/utcb.h>
22
23 __BEGIN_DECLS
24
25 /**
26  * \defgroup api_libvcpu vCPU Support Library
27  * \brief vCPU handling functionality.
28  *
29  * This library provides convenience functionality on top of the l4sys vCPU
30  * interface to ease programming. It wraps commonly used code and abstracts
31  * architecture depends parts as far as reasonable.
32  */
33
34 /**
35  * \defgroup api_libvcpu_ext Extended vCPU support
36  * \ingroup api_libvcpu
37  * \brief extended vCPU handling functionality.
38  */
39
40 typedef void (*l4vcpu_event_hndl_t)(l4_vcpu_state_t *vcpu);
41 typedef void (*l4vcpu_setup_ipc_t)(l4_utcb_t *utcb);
42
43 /**
44  * \brief Disable a vCPU for event delivery.
45  * \ingroup api_libvcpu
46  *
47  * \param vcpu  Pointer to vCPU area.
48  */
49 L4_CV L4_INLINE
50 void
51 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW;
52
53 /**
54  * \brief Disable a vCPU for event delivery and return previous state.
55  * \ingroup api_libvcpu
56  *
57  * \param vcpu  Pointer to vCPU area.
58  *
59  * \return IRQ state before disabling IRQs.
60  */
61 L4_CV L4_INLINE
62 unsigned
63 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW;
64
65 /**
66  * \brief Enable a vCPU for event delivery.
67  * \ingroup api_libvcpu
68  *
69  * \param vcpu             Pointer to vCPU area.
70  * \param utcb             Utcb pointer of the calling vCPU.
71  * \param do_event_work_cb Call-back function that is called in case an
72  *                         event (such as an interrupt) is pending.
73  * \param setup_ipc        Function call-back that is called right before
74  *                         any IPC operation, and before event delivery is
75  *                         enabled.
76  */
77 L4_CV L4_INLINE
78 void
79 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
80                   l4vcpu_event_hndl_t do_event_work_cb,
81                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
82
83 /**
84  * \brief Restore a previously saved IRQ/event state.
85  * \ingroup api_libvcpu
86  *
87  * \param vcpu             Pointer to vCPU area.
88  * \param s                IRQ state to be restored.
89  * \param utcb             Utcb pointer of the calling vCPU.
90  * \param do_event_work_cb Call-back function that is called in case an
91  *                         event (such as an interrupt) is pending after
92  *                         enabling.
93  * \param setup_ipc        Function call-back that is called right before
94  *                         any IPC operation, and before event delivery is
95  *                         enabled.
96  */
97 L4_CV L4_INLINE
98 void
99 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, unsigned s,
100                    l4_utcb_t *utcb,
101                    l4vcpu_event_hndl_t do_event_work_cb,
102                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
103
104 /**
105  * \internal
106  * \ingroup api_libvcpu
107  *
108  * \param vcpu             Pointer to vCPU area.
109  * \param utcb             Utcb pointer of the calling vCPU.
110  * \param to               Timeout to do IPC operation with.
111  * \param do_event_work_cb Call-back function that is called in case an
112  *                         event (such as an interrupt) is pending after
113  *                         enabling.
114  * \param setup_ipc        Function call-back that is called right before
115  *                         any IPC operation.
116  */
117 L4_CV L4_INLINE
118 void
119 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
120             l4_timeout_t to,
121             l4vcpu_event_hndl_t do_event_work_cb,
122             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
123
124 /**
125  * \brief Wait for event.
126  * \ingroup api_libvcpu
127  *
128  * \param vcpu             Pointer to vCPU area.
129  * \param utcb             Utcb pointer of the calling vCPU.
130  * \param do_event_work_cb Call-back function that is called when the vCPU
131  *                         awakes and needs to handle an event/IRQ.
132  * \param setup_ipc        Function call-back that is called right before
133  *                         any IPC operation.
134  *
135  * Note that event delivery remains disabled after this function returns.
136  */
137 L4_CV L4_INLINE
138 void
139 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
140                       l4vcpu_event_hndl_t do_event_work_cb,
141                       l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
142
143
144 /**
145  * \brief Print the state of a vCPU.
146  * \ingroup api_libvcpu
147  *
148  * \param vcpu   Pointer to vCPU area.
149  * \param prefix A prefix for each line printed.
150  */
151 L4_CV void
152 l4vcpu_print_state(const l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
153
154 /**
155  * \internal
156  */
157 L4_CV void
158 l4vcpu_print_state_arch(const l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
159
160
161 /**
162  * \brief Return whether the entry reason was an IRQ/IPC message.
163  * \ingroup api_libvcpu
164  *
165  * \param vcpu Pointer to vCPU area.
166  *
167  * return 0 if not, !=0 otherwise.
168  */
169 L4_CV L4_INLINE
170 int
171 l4vcpu_is_irq_entry(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
172
173 /**
174  * \brief Return whether the entry reason was a page fault.
175  * \ingroup api_libvcpu
176  *
177  * \param vcpu Pointer to vCPU area.
178  *
179  * return 0 if not, !=0 otherwise.
180  */
181 L4_CV L4_INLINE
182 int
183 l4vcpu_is_page_fault_entry(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
184
185 /**
186  * \brief Allocate state area for an extended vCPU.
187  * \ingroup api_libvcpu_ext
188  *
189  * \retval vcpu      Allocated vcpu-state area.
190  * \retval ext_state Allocated extended vcpu-state area.
191  * \param  task      Task to use for allocation.
192  * \param  regmgr    Region manager to use for allocation.
193  *
194  * \return 0 for success, error code otherwise
195  */
196 L4_CV int
197 l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
198                  l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW;
199
200 /* ===================================================================== */
201 /* Implementations */
202
203 #include <l4/sys/ipc.h>
204 #include <l4/vcpu/vcpu_arch.h>
205
206 L4_CV L4_INLINE
207 void
208 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW
209 {
210   vcpu->state &= ~L4_VCPU_F_IRQ;
211   l4_barrier();
212 }
213
214 L4_CV L4_INLINE
215 unsigned
216 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW
217 {
218   unsigned s = vcpu->state;
219   l4vcpu_irq_disable(vcpu);
220   return s;
221 }
222
223 L4_CV L4_INLINE
224 void
225 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
226             l4_timeout_t to,
227             l4vcpu_event_hndl_t do_event_work_cb,
228             l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
229 {
230   l4vcpu_irq_disable(vcpu);
231   setup_ipc(utcb);
232   vcpu->i.tag = l4_ipc_wait(utcb, &vcpu->i.label, to);
233   if (L4_LIKELY(!l4_msgtag_has_error(vcpu->i.tag)))
234     do_event_work_cb(vcpu);
235 }
236
237 L4_CV L4_INLINE
238 void
239 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
240                   l4vcpu_event_hndl_t do_event_work_cb,
241                   l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
242 {
243   if (!(vcpu->state & L4_VCPU_F_IRQ))
244     {
245       setup_ipc(utcb);
246       l4_barrier();
247     }
248
249   while (1)
250     {
251       vcpu->state |= L4_VCPU_F_IRQ;
252       l4_barrier();
253
254       if (L4_LIKELY(!(vcpu->sticky_flags & L4_VCPU_SF_IRQ_PENDING)))
255         break;
256
257       l4vcpu_wait(vcpu, utcb, L4_IPC_BOTH_TIMEOUT_0,
258                   do_event_work_cb, setup_ipc);
259     }
260 }
261
262 L4_CV L4_INLINE
263 void
264 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, unsigned s,
265                    l4_utcb_t *utcb,
266                    l4vcpu_event_hndl_t do_event_work_cb,
267                    l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
268 {
269   if (s & L4_VCPU_F_IRQ)
270     l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
271   else if (vcpu->state & L4_VCPU_F_IRQ)
272     l4vcpu_irq_disable(vcpu);
273 }
274
275 L4_CV L4_INLINE
276 void
277 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
278                       l4vcpu_event_hndl_t do_event_work_cb,
279                       l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
280 {
281   l4vcpu_wait(vcpu, utcb, L4_IPC_NEVER, do_event_work_cb, setup_ipc);
282 }
283
284 __END_DECLS