2 * (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3 * economic rights: Technische Universität Dresden (Germany)
5 * This file is part of TUD:OS and distributed under the terms of the
6 * GNU General Public License 2.
7 * Please see the COPYING-GPL-2 file for details.
9 * As a special exception, you may use this file as part of a free software
10 * library without restriction. Specifically, if other files instantiate
11 * templates or use macros or inline functions from this file, or you compile
12 * this file and link it with other files to produce an executable, this
13 * file does not by itself cause the resulting executable to be covered by
14 * the GNU General Public License. This exception does not however
15 * invalidate any other reasons why the executable file might be covered by
16 * the GNU General Public License.
20 #include <l4/sys/vcpu.h>
21 #include <l4/sys/utcb.h>
26 * \defgroup api_libvcpu vCPU Support Library
27 * \brief vCPU handling functionality.
29 * This library provides convenience functionality on top of the l4sys vCPU
30 * interface to ease programming. It wraps commonly used code and abstracts
31 * architecture depends parts as far as reasonable.
35 * \defgroup api_libvcpu_ext Extended vCPU support
36 * \ingroup api_libvcpu
37 * \brief extended vCPU handling functionality.
41 * \brief IRQ/Event enable and disable flags.
42 * \ingroup api_libvcpu
44 typedef enum l4vcpu_irq_state_t
46 L4VCPU_IRQ_STATE_DISABLED = 0, ///< IRQ/Event delivery disabled
47 L4VCPU_IRQ_STATE_ENABLED = L4_VCPU_F_IRQ, ///< IRQ/Event delivery enabled
50 typedef l4_umword_t l4vcpu_state_t;
51 typedef void (*l4vcpu_event_hndl_t)(l4_vcpu_state_t *vcpu);
52 typedef void (*l4vcpu_setup_ipc_t)(l4_utcb_t *utcb);
55 * \brief Return the state flags of a vCPU.
56 * \ingroup api_libvcpu
58 * \param vcpu Pointer to vCPU area.
62 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW;
65 * \brief Disable a vCPU for event delivery.
66 * \ingroup api_libvcpu
68 * \param vcpu Pointer to vCPU area.
72 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW;
75 * \brief Disable a vCPU for event delivery and return previous state.
76 * \ingroup api_libvcpu
78 * \param vcpu Pointer to vCPU area.
80 * \return IRQ state before disabling IRQs.
84 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW;
87 * \brief Enable a vCPU for event delivery.
88 * \ingroup api_libvcpu
90 * \param vcpu Pointer to vCPU area.
91 * \param utcb Utcb pointer of the calling vCPU.
92 * \param do_event_work_cb Call-back function that is called in case an
93 * event (such as an interrupt) is pending.
94 * \param setup_ipc Function call-back that is called right before
95 * any IPC operation, and before event delivery is
100 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
101 l4vcpu_event_hndl_t do_event_work_cb,
102 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
105 * \brief Restore a previously saved IRQ/event state.
106 * \ingroup api_libvcpu
108 * \param vcpu Pointer to vCPU area.
109 * \param s IRQ state to be restored.
110 * \param utcb Utcb pointer of the calling vCPU.
111 * \param do_event_work_cb Call-back function that is called in case an
112 * event (such as an interrupt) is pending after
114 * \param setup_ipc Function call-back that is called right before
115 * any IPC operation, and before event delivery is
120 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
122 l4vcpu_event_hndl_t do_event_work_cb,
123 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
127 * \ingroup api_libvcpu
129 * \param vcpu Pointer to vCPU area.
130 * \param utcb Utcb pointer of the calling vCPU.
131 * \param to Timeout to do IPC operation with.
132 * \param do_event_work_cb Call-back function that is called in case an
133 * event (such as an interrupt) is pending after
135 * \param setup_ipc Function call-back that is called right before
140 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
142 l4vcpu_event_hndl_t do_event_work_cb,
143 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
146 * \brief Wait for event.
147 * \ingroup api_libvcpu
149 * \param vcpu Pointer to vCPU area.
150 * \param utcb Utcb pointer of the calling vCPU.
151 * \param do_event_work_cb Call-back function that is called when the vCPU
152 * awakes and needs to handle an event/IRQ.
153 * \param setup_ipc Function call-back that is called right before
156 * Note that event delivery remains disabled after this function returns.
160 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
161 l4vcpu_event_hndl_t do_event_work_cb,
162 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW;
166 * \brief Print the state of a vCPU.
167 * \ingroup api_libvcpu
169 * \param vcpu Pointer to vCPU area.
170 * \param prefix A prefix for each line printed.
173 l4vcpu_print_state(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
179 l4vcpu_print_state_arch(l4_vcpu_state_t *vcpu, const char *prefix) L4_NOTHROW;
183 * \brief Return whether the entry reason was an IRQ/IPC message.
184 * \ingroup api_libvcpu
186 * \param vcpu Pointer to vCPU area.
188 * return 0 if not, !=0 otherwise.
192 l4vcpu_is_irq_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
195 * \brief Return whether the entry reason was a page fault.
196 * \ingroup api_libvcpu
198 * \param vcpu Pointer to vCPU area.
200 * return 0 if not, !=0 otherwise.
204 l4vcpu_is_page_fault_entry(l4_vcpu_state_t *vcpu) L4_NOTHROW;
207 * \brief Allocate state area for an extended vCPU.
208 * \ingroup api_libvcpu_ext
210 * \retval vcpu Allocated vcpu-state area.
211 * \retval ext_state Allocated extended vcpu-state area.
212 * \param task Task to use for allocation.
213 * \param regmgr Region manager to use for allocation.
215 * \return 0 for success, error code otherwise
218 l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
219 l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW;
221 /* ===================================================================== */
222 /* Implementations */
224 #include <l4/sys/ipc.h>
225 #include <l4/vcpu/vcpu_arch.h>
229 l4vcpu_state(l4_vcpu_state_t const *vcpu) L4_NOTHROW
236 l4vcpu_irq_disable(l4_vcpu_state_t *vcpu) L4_NOTHROW
238 vcpu->state &= ~L4_VCPU_F_IRQ;
244 l4vcpu_irq_disable_save(l4_vcpu_state_t *vcpu) L4_NOTHROW
246 l4vcpu_irq_state_t s = (l4vcpu_irq_state_t)l4vcpu_state(vcpu);
247 l4vcpu_irq_disable(vcpu);
253 l4vcpu_wait(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
255 l4vcpu_event_hndl_t do_event_work_cb,
256 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
258 l4vcpu_irq_disable(vcpu);
260 vcpu->i.tag = l4_ipc_wait(utcb, &vcpu->i.label, to);
261 if (EXPECT_TRUE(!l4_msgtag_has_error(vcpu->i.tag)))
262 do_event_work_cb(vcpu);
267 l4vcpu_irq_enable(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
268 l4vcpu_event_hndl_t do_event_work_cb,
269 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
271 if (!(vcpu->state & L4_VCPU_F_IRQ))
279 vcpu->state |= L4_VCPU_F_IRQ;
282 if (EXPECT_TRUE(!(vcpu->sticky_flags & L4_VCPU_SF_IRQ_PENDING)))
285 l4vcpu_wait(vcpu, utcb, L4_IPC_BOTH_TIMEOUT_0,
286 do_event_work_cb, setup_ipc);
292 l4vcpu_irq_restore(l4_vcpu_state_t *vcpu, l4vcpu_irq_state_t s,
294 l4vcpu_event_hndl_t do_event_work_cb,
295 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
297 if (s & L4_VCPU_F_IRQ)
298 l4vcpu_irq_enable(vcpu, utcb, do_event_work_cb, setup_ipc);
303 l4vcpu_wait_for_event(l4_vcpu_state_t *vcpu, l4_utcb_t *utcb,
304 l4vcpu_event_hndl_t do_event_work_cb,
305 l4vcpu_setup_ipc_t setup_ipc) L4_NOTHROW
307 l4vcpu_wait(vcpu, utcb, L4_IPC_NEVER, do_event_work_cb, setup_ipc);