10 FIASCO_DECLARE_KOBJ();
13 class Obj_cap : public L4_obj_ref
18 // ---------------------------------------------------------------------------
24 #include "processor.h"
26 #include "thread_state.h"
29 FIASCO_DEFINE_KOBJ(Thread_object);
33 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
35 PUBLIC inline NEEDS["kobject.h"]
37 Obj_cap::deref(unsigned char *rights = 0, bool dbg = false)
39 Thread *current = current_thread();
40 if (flags() & L4_obj_ref::Ipc_reply)
42 if (rights) *rights = current->caller_rights();
43 Thread *ca = static_cast<Thread*>(current->caller());
45 current->set_caller(0,0);
49 if (EXPECT_FALSE(invalid()))
54 if (rights) *rights = L4_fpage::RWX;
55 return current_thread();
58 return current->space()->obj_space()->lookup_local(cap(), rights);
61 PUBLIC inline NEEDS["kobject.h"]
63 Obj_cap::revalidate(Kobject_iface *o)
69 Thread_object::Thread_object() : Thread() {}
72 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
77 { return dec_ref() == 0; }
81 /** Deallocator. This function currently does nothing: We do not free up
82 space allocated to thread-control blocks.
86 Thread_object::operator delete(void *_t)
88 Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
89 Ram_quota * const q = t->_quota;
90 Mapped_allocator::allocator()->q_unaligned_free(q, Config::thread_block_size, t);
92 LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
93 Log_destroy *l = tbe->payload<Log_destroy>();
97 l->ram = q->current());
103 Thread_object::destroy(Kobject ***rl)
105 Kobject::destroy(rl);
108 assert_kdb(state() == Thread_dead);
110 assert_kdb(_magic == magic);
116 Thread_object::invoke(L4_obj_ref /*self*/, Mword rights, Syscall_frame *f, Utcb *utcb)
118 register unsigned flags = f->ref().flags();
119 if (((flags != 0) && !(flags & L4_obj_ref::Ipc_send))
120 || (flags & L4_obj_ref::Ipc_reply)
121 || f->tag().proto() != L4_msg_tag::Label_thread)
124 Thread *ct = current_thread();
127 bool have_rcv = false;
129 if (EXPECT_FALSE(!check_sys_ipc(flags, &partner, &sender, &have_rcv)))
131 utcb->error = L4_error::Not_existent;
135 ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
136 f->timeout(), f, rights);
140 switch (utcb->values[0] & Opcode_mask)
143 f->tag(sys_control(rights, f->tag(), utcb));
146 f->tag(sys_ex_regs(f->tag(), utcb));
149 f->tag(sys_thread_switch(f->tag(), utcb));
152 f->tag(sys_thread_stats(f->tag(), utcb));
155 f->tag(sys_vcpu_resume(f->tag(), utcb));
157 case Op_register_del_irq:
158 f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
160 case Op_modify_senders:
161 f->tag(sys_modify_senders(f->tag(), utcb, utcb));
164 L4_msg_tag tag = f->tag();
165 if (invoke_arch(tag, utcb))
168 f->tag(commit_result(-L4_err::ENosys));
175 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
177 if (this != current() || !(state() & Thread_vcpu_enabled))
178 return commit_result(-L4_err::EInval);
180 Obj_space *s = space()->obj_space();
181 Vcpu_state *vcpu = access_vcpu(true);
183 L4_obj_ref user_task = vcpu->user_task;
184 if (user_task.valid())
186 unsigned char task_rights = 0;
187 Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
190 if (EXPECT_FALSE(task && !(task_rights & L4_fpage::W)))
191 return commit_result(-L4_err::EPerm);
193 if (task != vcpu_user_space())
194 vcpu_set_user_space(task);
196 vcpu->user_task = L4_obj_ref();
198 else if (user_task.flags() == L4_obj_ref::Ipc_reply)
199 vcpu_set_user_space(0);
201 L4_snd_item_iter snd_items(utcb, tag.words());
202 int items = tag.items();
203 for (; items && snd_items.more(); --items)
205 if (EXPECT_FALSE(!snd_items.next()))
210 L4_snd_item_iter::Item const *const item = snd_items.get();
211 L4_fpage sfp(item->d);
214 L4_error err = fpage_map(space(), sfp,
215 vcpu_user_space(), L4_fpage::all_spaces(),
221 if (EXPECT_FALSE(!err.ok()))
222 return commit_error(utcb, err);
226 vcpu = access_vcpu(true);
228 if ((vcpu->_saved_state & Vcpu_state::F_irqs)
229 && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
231 assert_kdb(cpu_lock.test());
232 do_ipc(L4_msg_tag(), 0, 0, true, 0,
233 L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
234 &vcpu->_ipc_regs, 7);
236 vcpu = access_vcpu(true);
238 if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()))
240 vcpu->_ts.set_ipc_upcall();
244 if (vcpu->_saved_state & Vcpu_state::F_user_mode)
245 sp = vcpu->_entry_sp;
249 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
250 Vcpu_log *l = tbe->payload<Vcpu_log>();
252 l->state = vcpu->state;
253 l->ip = vcpu->_entry_ip;
255 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
258 fast_return_to_user(vcpu->_entry_ip, sp);
262 // --- CUT here for VM stuff
263 vcpu->state = vcpu->_saved_state;
265 memcpy(&ts, &vcpu->_ts, sizeof(Trap_state));
268 assert_kdb(cpu_lock.test());
272 ts.sanitize_user_state();
274 if (vcpu->state & Vcpu_state::F_user_mode)
276 if (!vcpu_user_space())
277 return commit_result(-L4_err::EInval);
279 vcpu->state |= Vcpu_state::F_traps | Vcpu_state::F_exceptions
280 | Vcpu_state::F_debug_exc;
281 state_add_dirty(Thread_vcpu_user_mode);
283 if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
285 state_add_dirty(Thread_vcpu_fpu_disabled);
289 state_del_dirty(Thread_vcpu_fpu_disabled);
291 vcpu_resume_user_arch();
293 vcpu_user_space()->switchin_context(space());
296 LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
297 Vcpu_log *l = tbe->payload<Vcpu_log>();
299 l->state = vcpu->state;
302 l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
305 vcpu_resume(&ts, regs());
311 PRIVATE inline NOEXPORT NEEDS["processor.h"]
313 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
315 if (sender_list()->cursor())
316 return Kobject_iface::commit_result(-L4_err::EBusy);
319 printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
320 in->values[1], in->values[2],
321 in->values[3], in->values[4]);
324 int elems = tag.words();
327 return Kobject_iface::commit_result(0);
333 ::Prio_list_elem *c = sender_list()->head();
336 // this is kind of arbitrary
337 for (int cnt = 50; c && cnt > 0; --cnt)
339 Sender *s = Sender::cast(c);
340 s->modify_label(&in->values[1], elems);
345 return Kobject_iface::commit_result(0);
347 sender_list()->cursor(c);
348 Proc::preemption_point();
349 c = sender_list()->cursor();
351 return Kobject_iface::commit_result(0);
354 PRIVATE inline NOEXPORT
356 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
358 L4_snd_item_iter snd_items(in, tag.words());
360 if (!tag.items() || !snd_items.next())
361 return Kobject_iface::commit_result(-L4_err::EInval);
363 L4_fpage bind_irq(snd_items.get()->d);
364 if (EXPECT_FALSE(!bind_irq.is_objpage()))
365 return Kobject_iface::commit_error(in, L4_error::Overflow);
367 register Context *const c_thread = ::current();
368 register Space *const c_space = c_thread->space();
369 register Obj_space *const o_space = c_space->obj_space();
370 unsigned char irq_rights = 0;
372 = Irq_base::dcast(o_space->lookup_local(bind_irq.obj_index(), &irq_rights));
375 return Kobject_iface::commit_result(-L4_err::EInval);
377 if (EXPECT_FALSE(!(irq_rights & L4_fpage::X)))
378 return Kobject_iface::commit_result(-L4_err::EPerm);
380 register_delete_irq(irq);
381 return Kobject_iface::commit_result(0);
385 PRIVATE inline NOEXPORT
387 Thread_object::sys_control(unsigned char rights, L4_msg_tag const &tag, Utcb *utcb)
389 if (EXPECT_FALSE(!(rights & L4_fpage::W)))
390 return commit_result(-L4_err::EPerm);
392 if (EXPECT_FALSE(tag.words() < 6))
393 return commit_result(-L4_err::EInval);
395 Context *curr = current();
396 Obj_space *s = curr->space()->obj_space();
397 L4_snd_item_iter snd_items(utcb, tag.words());
401 Mword flags = utcb->values[0];
403 Mword _old_pager = _pager.raw() << L4_obj_ref::Cap_shift;
404 Mword _old_exc_handler = _exc_handler.raw() << L4_obj_ref::Cap_shift;
406 Thread_ptr _new_pager(~0UL);
407 Thread_ptr _new_exc_handler(~0UL);
409 if (flags & Ctl_set_pager)
410 _new_pager = Thread_ptr(utcb->values[1] >> L4_obj_ref::Cap_shift);
412 if (flags & Ctl_set_exc_handler)
413 _new_exc_handler = Thread_ptr(utcb->values[2] >> L4_obj_ref::Cap_shift);
415 if (flags & Ctl_bind_task)
417 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
418 return commit_result(-L4_err::EInval);
420 L4_fpage bind_task(snd_items.get()->d);
422 if (EXPECT_FALSE(!bind_task.is_objpage()))
423 return commit_result(-L4_err::EInval);
425 unsigned char task_rights = 0;
426 task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
428 if (EXPECT_FALSE(!(task_rights & L4_fpage::W)))
429 return commit_result(-L4_err::EPerm);
432 return commit_result(-L4_err::EInval);
434 utcb_addr = (void*)utcb->values[5];
437 long res = control(_new_pager, _new_exc_handler,
438 task, utcb_addr, flags & Ctl_vcpu_enabled,
439 utcb->values[4] & Ctl_vcpu_enabled);
442 return commit_result(res);
444 if ((res = sys_control_arch(utcb)) < 0)
445 return commit_result(res);
448 // FIXME: must be done xcpu safe, may be some parts above too
449 Lock_guard<Cpu_lock> guard(&cpu_lock);
450 if (flags & Ctl_alien_thread)
452 if (utcb->values[4] & Ctl_alien_thread)
453 state_change_dirty (~Thread_dis_alien, Thread_alien, false);
455 state_del_dirty(Thread_alien, false);
459 utcb->values[1] = _old_pager;
460 utcb->values[2] = _old_exc_handler;
462 return commit_result(0, 3);
465 // -------------------------------------------------------------------
466 // Thread::ex_regs class system calls
470 Thread_object::ex_regs(Address ip, Address sp,
471 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
474 if (state(false) == Thread_invalid || !space())
477 if (current() == this)
480 if (o_sp) *o_sp = user_sp();
481 if (o_ip) *o_ip = user_ip();
482 if (o_flags) *o_flags = user_flags();
484 // Changing the run state is only possible when the thread is not in
486 if (!(ops & Exr_cancel) && (state(false) & Thread_in_exception))
487 // XXX Maybe we should return false here. Previously, we actually
488 // did so, but we also actually didn't do any state modification.
489 // If you change this value, make sure the logic in
490 // sys_thread_ex_regs still works (in particular,
491 // ex_regs_cap_handler and friends should still be called).
494 if (state(false) & Thread_dead) // resurrect thread
495 state_change_dirty (~Thread_dead, Thread_ready, false);
497 else if (ops & Exr_cancel)
498 // cancel ongoing IPC or other activity
499 state_change_dirty (~(Thread_ipc_in_progress | Thread_delayed_deadline |
500 Thread_delayed_ipc), Thread_cancel | Thread_ready, false);
502 if (ops & Exr_trigger_exception)
504 extern char leave_by_trigger_exception[];
505 do_trigger_exception(regs(), leave_by_trigger_exception);
514 if (current() == this)
522 Thread_object::ex_regs(Utcb *utcb)
524 Address ip = utcb->values[1];
525 Address sp = utcb->values[2];
527 Mword ops = utcb->values[0];
529 LOG_TRACE("Ex-regs", "exr", current(), __fmt_thread_exregs,
530 Log_thread_exregs *l = tbe->payload<Log_thread_exregs>();
532 l->ip = ip; l->sp = sp; l->op = ops;);
534 if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
535 return commit_result(-L4_err::EInval);
537 utcb->values[0] = flags;
538 utcb->values[1] = ip;
539 utcb->values[2] = sp;
541 return commit_result(0, 3);
546 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
548 Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
549 params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->access_utcb());
550 return params->result.proto() == 0 ? Drq::Need_resched : 0;
553 PRIVATE inline NOEXPORT
555 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb * /*utcb*/)
557 if (tag.words() != 3)
558 return commit_result(-L4_err::EInval);
560 Remote_syscall params;
561 params.thread = current_thread();
563 drq(handle_remote_ex_regs, ¶ms, 0, Drq::Any_ctxt);
564 return params.result;
567 PRIVATE inline NOEXPORT NEEDS["timer.h"]
569 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
571 Context *curr = current();
574 return commit_result(0);
576 if (current_cpu() != cpu())
577 return commit_result(0);
580 Sched_context * const cs = current_sched();
584 && ((state() & (Thread_ready | Thread_suspended)) == Thread_ready))
586 curr->switch_exec_schedule_locked (this, Not_Helping);
587 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
588 return commit_result(0, Utcb::Time_val::Words);
591 #if 0 // FIXME: provide API for multiple sched contexts
592 // Compute remaining quantum length of timeslice
593 regs->left (timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
595 // Yield current global timeslice
596 cs->owner()->switch_sched (cs->id() ? cs->next() : cs);
598 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
599 = timeslice_timeout.cpu(current_cpu())->get_timeout(Timer::system_clock());
602 return commit_result(0, Utcb::Time_val::Words);
607 // -------------------------------------------------------------------
608 // Gather statistics information about thread execution
612 Thread_object::sys_thread_stats_remote(void *data)
614 update_consumed_time();
615 *(Clock::Time *)data = consumed_time();
621 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
623 return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
626 PRIVATE inline NOEXPORT
628 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
632 if (cpu() != current_cpu())
633 drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
636 // Respect the fact that the consumed time is only updated on context switch
637 if (this == current())
638 update_consumed_time();
639 value = consumed_time();
642 reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
644 return commit_result(0, Utcb::Time_val::Words);