4 * App_thread functions for creating and preparing a new VCPU
6 * (c) 2011-2013 Björn Döbel <doebel@os.inf.tu-dresden.de>,
7 * economic rights: Technische Universität Dresden (Germany)
8 * This file is part of TUD:OS and distributed under the terms of the
9 * GNU General Public License 2.
10 * Please see the COPYING-GPL-2 file for details.
14 #include "app_loading"
15 #include "thread_group.h"
17 #include <l4/libloader/remote_app_model>
18 #include <pthread-l4.h>
20 void Romain::App_thread::alloc_vcpu_mem()
24 L4Re::Util::kumem_alloc(&kumem, 0);
25 _check(kumem == 0, "out of memory in kumem_alloc");
27 _vcpu_utcb = (l4_utcb_t *)kumem;
28 _vcpu = L4vcpu::Vcpu::cast(kumem + L4_UTCB_OFFSET);
30 /* store segment registers - stolen from the example */
31 //_vcpu->r()->gs = _master_ds;
32 _vcpu->r()->fs = _master_ds;
33 _vcpu->r()->es = _master_ds;
34 _vcpu->r()->ds = _master_ds;
35 _vcpu->r()->ss = _master_ds;
37 /* We want to catch ALL exceptions for this vCPU. */
38 _vcpu->saved_state()->set(
40 | L4_VCPU_F_PAGE_FAULTS
41 | L4_VCPU_F_EXCEPTIONS
44 | L4_VCPU_F_FPU_ENABLED
47 DEBUG() << "VCPU: utcb = " << (void*)vcpu_utcb()
48 << " vcpu @ " << (void*)vcpu();
52 void Romain::App_thread::touch_stacks()
54 /* We need to touch at least the handler stack, because upon entry, the vCPU
55 * still has interrupts enabled and we must not raise one by causing a page
56 * fault on the stack area.
58 DEBUG() << "Stack info:";
59 DEBUG() << " handler stack @ " << (void*)_handler_stack
60 << " - " << (void*)(_handler_stack + sizeof(_handler_stack));
62 l4_touch_rw(_handler_stack, sizeof(_handler_stack));
66 void Romain::App_thread::alloc_vcpu_cap()
69 _vcpu_cap = chkcap(L4Re::Util::cap_alloc.alloc<L4::Thread>(),
71 chksys(L4Re::Env::env()->factory()->create_thread(_vcpu_cap),
73 l4_debugger_set_object_name(_vcpu_cap.cap(), "vcpu thread");
78 extern "C" void* pthread_fn(void *);
81 * Create a replica thread
83 void Romain::App_thread::start()
86 * We only set handler IP and SP here, because beforehand our creator
87 * may have modified them.
89 _vcpu->entry_sp(handler_sp());
90 _vcpu->entry_ip((l4_umword_t)_handler_fn);
92 int err = pthread_create(&_pthread, NULL, pthread_fn, this);
93 _check(err != 0, "pthread_create");
95 //_vcpu_cap = L4::Cap<L4::Thread>(pthread_getl4cap(_pthread));
100 * Calculate checksum of the replica's state
102 * This checksum is used to compare replica states.
105 Romain::App_thread::csum_state()
107 // XXX: this should also include the UTCB, which
108 // means the _used_ part of the UTCB
109 return _vcpu->r()->ip
125 Romain::Thread_group::ex_regs(Romain::App_thread *caller)
128 * Thoughts on ex_regs
130 * One use case of ex_regs is for an external thread to halt execution
131 * of a thread, store the halted thread's state and later continue from
132 * this point. As replicated threads execute independently, we will not
133 * stop all replicas in the same state. However, we require the replicas
134 * to be in the same state on resumption.
136 * To make things deterministic, we stop all replicas. As a heuristic, we
137 * then assume that the replicas are all correct (as all state
138 * difference can be attributed to diverging execution). We select the last
139 * stopped replica (potentially being the most advanced) as good copy and
140 * copy its state over to all other replicas, thereby forcing them to the
143 * 1) This may shadow certain errors that occurred prior to the ex_regs.
144 * -> we need to evaluate how often that happens
146 * 2) HP NonStop instead executes replicas in lock-step until it can
147 * figure out which is the most advanced. THen they execute everyone
148 * else up to this point.
150 * -> no lack in error detection
152 * 3) Can we detect situations where this happens as hard overwrite of
153 * the replicated thread's state -> hence we would not need to first
154 * enforce identical states? -> heuristics only, cannot determine
155 * upfront if the caller will later reuse the state.
158 bool need_to_stop = !this->stopped;
161 enter_kdebug("stop thread magic goes here");
164 l4_msg_regs_t *buf = l4_utcb_mr_u(reinterpret_cast<l4_utcb_t*>(caller->remote_utcb()));
165 DEBUG() << std::hex << buf->mr[0];
166 DEBUG() << std::hex << buf->mr[1];
167 DEBUG() << std::hex << buf->mr[2];
169 Romain::App_thread* thread = threads[0];
170 l4_umword_t eip = thread->vcpu()->r()->ip;
171 l4_umword_t esp = thread->vcpu()->r()->sp;
173 DEBUG() << (void*)thread->vcpu();
174 for (unsigned i = 0; i < threads.size(); ++i) {
175 threads[i]->vcpu()->r()->ip = buf->mr[1];
176 threads[i]->vcpu()->r()->sp = buf->mr[2];
182 caller->vcpu()->r()->ax = l4_msgtag(0,3,0,0).raw;
186 * If we stopped the threads before,
187 * we need to reactivate them now.
189 enter_kdebug("reactivation magic goes here");
191 //enter_kdebug("Thread_group::ex_regs");
196 Romain::Thread_group::scheduler_run(Romain::App_thread *caller)
198 l4_msg_regs_t *buf = l4_utcb_mr_u(reinterpret_cast<l4_utcb_t*>(caller->remote_utcb()));
200 DEBUG() << "Thread_group::scheduler_run("
201 << (void*)threads[0]->vcpu()
203 DEBUG() << "granularity | affinity.offs : " << std::hex << buf->mr[1];
204 DEBUG() << "affinity.map : " << std::hex << buf->mr[2];
205 DEBUG() << "prio : " << std::hex << buf->mr[3];
206 DEBUG() << "quantum : " << std::hex << buf->mr[4];
207 DEBUG() << "obj_control : " << std::hex << buf->mr[5];
208 DEBUG() << "thread : " << std::hex << buf->mr[6];
212 //enter_kdebug("Thread_group::schedule()");
217 Romain::Thread_group::sanity_check_control(unsigned flags, l4_utcb_t *utcb)
219 DEBUG() << "Control flags: " << std::hex << l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS];
221 if ((flags & L4_THREAD_CONTROL_ALIEN) ||
222 (flags & L4_THREAD_CONTROL_UX_NATIVE)) {
223 ERROR() << "ux_native and alien not supported yet";
226 if (flags & L4_THREAD_CONTROL_BIND_TASK) {
228 fp.raw = l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_BIND_TASK + 1];
229 DEBUG() << std::hex << "L4_THREAD_CONTROL_BIND_TASK := ("
230 << l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_BIND_UTCB] << ", "
231 << l4_fpage_page(fp) << ")";
232 if (l4_fpage_page(fp) != (L4Re::This_task >> L4_CAP_SHIFT)) {
233 ERROR() << "Binding to different task not supported yet.";
234 enter_kdebug("error");
236 /* Apart from these checks, don't do anything. The replica vCPUs
241 unsigned handler = 0;
244 if (flags & L4_THREAD_CONTROL_SET_PAGER) {
245 pager = l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_PAGER];
246 DEBUG() << "pager <- " << std::hex << pager;
249 if (flags & L4_THREAD_CONTROL_SET_EXC_HANDLER) {
250 handler = l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_EXC_HANDLER];
251 DEBUG() << "exc handler <- " << std::hex << handler;
254 if (handler && pager) {
255 if ((handler != pager) ||
256 (handler != Ldr::Remote_app_std_caps::Rm_thread_cap << L4_CAP_SHIFT) ||
257 (pager != Ldr::Remote_app_std_caps::Rm_thread_cap << L4_CAP_SHIFT)) {
258 ERROR() << "setting different pager or exc. handler not supported yet";
259 enter_kdebug("error");
263 if (handler && (handler != Ldr::Remote_app_std_caps::Rm_thread_cap << L4_CAP_SHIFT)) {
264 ERROR() << "setting non-standard pager not supported yet";
267 if (pager && (pager != Ldr::Remote_app_std_caps::Rm_thread_cap << L4_CAP_SHIFT)) {
268 ERROR() << "setting non-standard exc. handler not supported yet";
274 Romain::Thread_group::control(Romain::App_thread *t, l4_utcb_t *utcb, Romain::App_model *am)
276 unsigned flags = l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS];
278 sanity_check_control(flags, utcb);
280 if (flags & L4_THREAD_CONTROL_BIND_TASK) {
281 l4_addr_t utcb_remote = l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_BIND_UTCB];
282 DEBUG() << "Setting remote UTCB to " << (void*)utcb_remote;
284 Romain::App_model::Dataspace ds = am->alloc_ds(L4_PAGESIZE); // thread info page
285 l4_addr_t local_addr = am->local_attach_ds(ds, L4_PAGESIZE, 0);
286 DEBUG() << "Attached TIP to " << (void*)local_addr;
288 *reinterpret_cast<l4_umword_t*>(local_addr) = utcb_remote;
289 void* tip_addr = am->prog_attach_ds(0, L4_PAGESIZE, ds, 0,
290 L4Re::Rm::Search_addr, "thread info page", local_addr);
292 for (unsigned i = 0; i < threads.size(); ++i) {
293 threads[i]->setup_utcb_segdesc(reinterpret_cast<l4_addr_t>(tip_addr), 4);
295 l4_addr_t utcb_local = am->rm()->remote_to_local(utcb_remote, i);
296 threads[i]->remote_utcb(utcb_local);
297 //threads[i]->commit_client_gdt();
300 //enter_kdebug("utcb");
304 * Our current assumption is that the replicated app uses only default
305 * threading features, e.g. does not change pager, exception handler or
306 * any other thread features. Therefore, after sanity checking for these
307 * assumptions, we simply pretend everything went alright.
309 t->vcpu()->r()->ax = l4_msgtag(0, 3, 0, 0).raw;
313 Romain::Thread_group::gdt(Romain::App_thread* t, l4_utcb_t *utcb)
315 enum { replica_gs_base = 0x58 };
317 l4_msgtag_t *tag = reinterpret_cast<l4_msgtag_t*>(&t->vcpu()->r()->ax);
318 DEBUG() << "\033[34;1mGDT: words\033[0m = " << tag->words();
320 // 1 word -> query GDT start
321 if (tag->words() == 1) {
322 l4_utcb_mr_u(utcb)->mr[0] = replica_gs_base >> 3;
323 t->vcpu()->r()->ax = l4_msgtag(0, 1, 0, 0).raw;
324 enter_kdebug("gdt query");
325 } else { // setup new GDT entry
326 unsigned idx = l4_utcb_mr_u(utcb)->mr[1];
327 unsigned numbytes = (tag->words() == 4) ? 8 : 16;
329 for (unsigned i = 0; i < threads.size(); ++i) {
330 Romain::App_thread* thread = threads[i];
332 if ((idx == 0) and (numbytes == 8)) { // actually, we only support a single entry here
333 thread->write_gdt_entry(&l4_utcb_mr_u(utcb)->mr[2], numbytes);
334 DEBUG() << "GS: " << std::hex << thread->vcpu()->r()->gs;
336 enter_kdebug("GDT??");
340 t->vcpu()->r()->ax = l4_msgtag((idx << 3) + replica_gs_base + 3, 0, 0, 0).raw;
345 void* Romain::GateAgent::listener_function(void *gk)
347 GateAgent *agent = reinterpret_cast<GateAgent*>(gk);
348 static char* utcb_copy[L4_UTCB_OFFSET];
351 snprintf(namebuf, 16, "GK::%s", agent->owner_group->name.c_str());
352 l4_debugger_set_object_name(pthread_getl4cap(pthread_self()), namebuf);
354 sem_wait(&agent->init_sem);
355 sem_destroy(&agent->init_sem);
356 DEBUG() << "starting agent loop";
358 l4_utcb_t *my_utcb = l4_utcb();
363 while (agent->current_client == 0) {
369 tag = agent->gate_irq->receive();
373 DEBUG() << "Keeper activated by replica.";
374 DEBUG() << "agent " << (void*)agent;
375 DEBUG() << "client " << (void*)agent->current_client;
376 DEBUG() << " CLNT: " << (void*)agent->current_client << " remote_utcb: "
377 << (void*)agent->current_client->remote_utcb();
378 if (agent->current_client == 0) {
379 DEBUG() << "!!!!!" << std::hex << " " << tag.raw;
380 DEBUG() << l4sys_errtostr(l4_error(tag));
384 _check(agent->current_client == 0, "agent called with client NULL?");
387 memcpy(utcb_copy, my_utcb, L4_UTCB_OFFSET);
388 memcpy(my_utcb, (void*)agent->current_client->remote_utcb(), L4_UTCB_OFFSET);
390 //outhex32((unsigned)agent->current_client->vcpu()); outstring(" enter kernel\n");
391 asm volatile( L4_ENTER_KERNEL
392 : "=a" (agent->current_client->vcpu()->r()->ax),
393 "=b" (agent->current_client->vcpu()->r()->bx),
394 /* ECX, EDX are overwritten anyway */
395 "=S" (agent->current_client->vcpu()->r()->si),
396 "=D" (agent->current_client->vcpu()->r()->di)
397 : "a" (agent->current_client->vcpu()->r()->ax),
398 /* EBX and EBP will be overwritten with local
399 * values in L4_ENTER_KERNEL */
400 "c" (agent->current_client->vcpu()->r()->cx),
401 "d" (agent->current_client->vcpu()->r()->dx),
402 "S" (agent->current_client->vcpu()->r()->si),
403 "D" (agent->current_client->vcpu()->r()->di)
407 outhex32((unsigned)agent->current_client->vcpu()); outstring(" ret from kernel ");
408 outhex32((unsigned)agent->current_client->vcpu()->r()->ax); outstring("\n");
411 memcpy((void*)agent->current_client->remote_utcb(), my_utcb, L4_UTCB_OFFSET);
412 memcpy(my_utcb, utcb_copy, L4_UTCB_OFFSET);
414 l4_cap_idx_t cap = agent->current_client->vcpu_cap().cap();
415 agent->current_client = 0;
416 tag = l4_ipc_send(cap, l4_utcb(), l4_msgtag(0,0,0,0), L4_IPC_NEVER);
419 enter_kdebug("gateagent exited");