2 * (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3 * economic rights: Technische Universität Dresden (Germany)
5 * This file is part of TUD:OS and distributed under the terms of the
6 * GNU General Public License 2.
7 * Please see the COPYING-GPL-2 file for details.
9 * As a special exception, you may use this file as part of a free software
10 * library without restriction. Specifically, if other files instantiate
11 * templates or use macros or inline functions from this file, or you compile
12 * this file and link it with other files to produce an executable, this
13 * file does not by itself cause the resulting executable to be covered by
14 * the GNU General Public License. This exception does not however
15 * invalidate any other reasons why the executable file might be covered by
16 * the GNU General Public License.
20 #include <l4/sys/vcpu.h>
21 #include <l4/sys/utcb.h>
26 * \defgroup api_libvcpu vCPU Support Library
27 * \brief vCPU handling functionality.
29 * This library provides convenience functionality on top of the l4sys vCPU
30 * interface to ease programming. It wraps commonly used code and abstracts
31 * architecture depends parts as far as reasonable.
35 * \defgroup api_libvcpu_ext Extended vCPU support
36 * \ingroup api_libvcpu
37 * \brief extended vCPU handling functionality.
41 * \brief IRQ/Event enable and disable flags.
42 * \ingroup api_libvcpu
44 typedef enum l4vcpu_irq_state_t
46 L4VCPU_IRQ_STATE_DISABLED = 0, ///< IRQ/Event delivery disabled
47 L4VCPU_IRQ_STATE_ENABLED = L4_VCPU_F_IRQ, ///< IRQ/Event delivery enabled
50 typedef l4_umword_t l4vcpu_state_t;
51 typedef void (*l4vcpu_event_hndl_t)(l4_vcpu_state_t *vcpu);
52 typedef void (*l4vcpu_setup_ipc_t)(l4_utcb_t *utcb);
55 * \brief Return the state flags of a vCPU.
56 * \ingroup api_libvcpu
58 * \param vcpu Pointer to vCPU area.
62 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
65 * \brief Disable a vCPU for event delivery.
66 * \ingroup api_libvcpu
68 * \param vcpu Pointer to vCPU area.
72 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW;
75 * \brief Disable a vCPU for event delivery and return previous state.
76 * \ingroup api_libvcpu
78 * \param vcpu Pointer to vCPU area.
80 * \return IRQ state before disabling IRQs.
84 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW;
87 * \brief Enable a vCPU for event delivery.
88 * \ingroup api_libvcpu
90 * \param vcpu Pointer to vCPU area.
91 * \param utcb Utcb pointer of the calling vCPU.
92 * \param do_event_work_cb Call-back function that is called in case an
93 * event (such as an interrupt) is pending.
94 * \param setup_ipc Function call-back that is called right before
99 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
100 l4vcpu_event_hndl_t do_event_work_cb,
101 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
104 * \brief Restore a previously saved IRQ/event state.
105 * \ingroup api_libvcpu
107 * \param vcpu Pointer to vCPU area.
108 * \param s IRQ state to be restored.
109 * \param utcb Utcb pointer of the calling vCPU.
110 * \param do_event_work_cb Call-back function that is called in case an
111 * event (such as an interrupt) is pending after
113 * \param setup_ipc Function call-back that is called right before
118 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
120 l4vcpu_event_hndl_t do_event_work_cb,
121 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
125 * \ingroup api_libvcpu
127 * \param vcpu Pointer to vCPU area.
128 * \param utcb Utcb pointer of the calling vCPU.
129 * \param to Timeout to do IPC operation with.
130 * \param do_event_work_cb Call-back function that is called in case an
131 * event (such as an interrupt) is pending after
133 * \param setup_ipc Function call-back that is called right before
138 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
140 l4vcpu_event_hndl_t do_event_work_cb,
141 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
144 * \brief Halt the vCPU (sleep).
145 * \ingroup api_libvcpu
147 * \param vcpu Pointer to vCPU area.
148 * \param utcb Utcb pointer of the calling vCPU.
149 * \param do_event_work_cb Call-back function that is called when the vCPU
150 * awakes and needs to handle an event/IRQ.
151 * \param setup_ipc Function call-back that is called right before
156 l4vcpu_halt(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
157 l4vcpu_event_hndl_t do_event_work_cb,
158 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
162 * \brief Print the state of a vCPU.
163 * \ingroup api_libvcpu
165 * \param vcpu Pointer to vCPU area.
166 * \param prefix A prefix for each line printed.
169 l4vcpu_print_state(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
175 l4vcpu_print_state_arch(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
179 * \brief Return whether the entry reason was an IRQ/IPC message.
180 * \ingroup api_libvcpu
182 * \param vcpu Pointer to vCPU area.
184 * return 0 if not, !=0 otherwise.
188 l4vcpu_is_irq_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
191 * \brief Return whether the entry reason was a page fault.
192 * \ingroup api_libvcpu
194 * \param vcpu Pointer to vCPU area.
196 * return 0 if not, !=0 otherwise.
200 l4vcpu_is_page_fault_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
203 * \brief Allocate state area for an extended vCPU.
204 * \ingroup api_libvcpu_ext
206 * \retval vcpu Allocated vcpu-state area.
207 * \retval ext_state Allocated extended vcpu-state area.
208 * \param task Task to use for allocation.
209 * \param regmgr Region manager to use for allocation.
211 * \return 0 for success, error code otherwise
214 l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
215 l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW;
217 /* ===================================================================== */
218 /* Implementations */
220 #include <l4/sys/ipc.h>
221 #include <l4/vcpu/vcpu_arch.h>
225 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW
232 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW
234 vcpu->state &= ~L4_VCPU_F_IRQ;
240 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW
242 l4vcpu_irq_state_t s = (l4vcpu_irq_state_t)l4vcpu_state(vcpu);
243 l4vcpu_irq_disable(vcpu);
249 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
251 l4vcpu_event_hndl_t do_event_work_cb,
252 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
254 l4vcpu_irq_disable(vcpu);
256 vcpu->i.tag = l4_ipc_wait(utcb, &vcpu->i.label, to);
257 if (EXPECT_TRUE(!l4_msgtag_has_error(vcpu->i.tag)))
258 do_event_work_cb(vcpu);
263 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
264 l4vcpu_event_hndl_t do_event_work_cb,
265 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
269 vcpu->state |= L4_VCPU_F_IRQ;
272 if (EXPECT_TRUE(!(vcpu->sticky_flags & L4_VCPU_SF_IRQ_PENDING)))
275 l4vcpu_wait(vcpu, utcb, L4_IPC_BOTH_TIMEOUT_0,
276 do_event_work_cb, setup_ipc);
282 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
284 l4vcpu_event_hndl_t do_event_work_cb,
285 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
287 if (s & L4_VCPU_F_IRQ)
288 l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
293 l4vcpu_halt(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
294 l4vcpu_event_hndl_t do_event_work_cb,
295 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
297 l4vcpu_wait(vcpu, utcb, L4_IPC_NEVER, do_event_work_cb, setup_ipc);
298 l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);