6 class Thread_object : public Thread
16 class Obj_cap : public L4_obj_ref
21 // ---------------------------------------------------------------------------
28 #include "processor.h"
30 #include "thread_state.h"
36 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
38 PUBLIC inline NEEDS["kobject.h"]
40 Obj_cap::deref(L4_fpage::Rights *rights = 0, bool dbg = false)
42 Thread *current = current_thread();
43 if (op() & L4_obj_ref::Ipc_reply)
45 if (rights) *rights = current->caller_rights();
46 Thread *ca = static_cast<Thread*>(current->caller());
48 current->set_caller(0, L4_fpage::Rights(0));
52 if (EXPECT_FALSE(special()))
57 if (rights) *rights = L4_fpage::Rights::RWX();
58 return current_thread();
61 return current->space()->lookup_local(cap(), rights);
64 PUBLIC inline NEEDS["kobject.h"]
66 Obj_cap::revalidate(Kobject_iface *o)
72 Thread_object::Thread_object() : Thread() {}
75 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
80 { return dec_ref() == 0; }
86 Thread_object::operator delete(void *_t)
88 Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
89 Ram_quota * const q = t->_quota;
90 Kmem_alloc::allocator()->q_unaligned_free(q, Thread::Size, t);
92 LOG_TRACE("Kobject delete", "del", current(), Log_destroy,
96 l->ram = q->current());
102 Thread_object::destroy(Kobject ***rl)
104 Kobject::destroy(rl);
106 assert_kdb(_magic == magic);
111 Thread_object::invoke(L4_obj_ref /*self*/, L4_fpage::Rights rights, Syscall_frame *f, Utcb *utcb)
113 register L4_obj_ref::Operation op = f->ref().op();
114 if (((op != 0) && !(op & L4_obj_ref::Ipc_send))
115 || (op & L4_obj_ref::Ipc_reply)
116 || f->tag().proto() != L4_msg_tag::Label_thread)
119 Thread *ct = current_thread();
122 bool have_rcv = false;
124 if (EXPECT_FALSE(!check_sys_ipc(op, &partner, &sender, &have_rcv)))
126 utcb->error = L4_error::Not_existent;
130 ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
131 f->timeout(), f, rights);
135 switch (utcb->values[0] & Opcode_mask)
138 f->tag(sys_control(rights, f->tag(), utcb));
141 f->tag(sys_ex_regs(f->tag(), utcb));
144 f->tag(sys_thread_switch(f->tag(), utcb));
147 f->tag(sys_thread_stats(f->tag(), utcb));
150 f->tag(sys_vcpu_resume(f->tag(), utcb));
152 case Op_register_del_irq:
153 f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
155 case Op_modify_senders:
156 f->tag(sys_modify_senders(f->tag(), utcb, utcb));
158 case Op_vcpu_control:
159 f->tag(sys_vcpu_control(rights, f->tag(), utcb));
162 f->tag(invoke_arch(f->tag(), utcb));
170 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
172 if (this != current() || !(state() & Thread_vcpu_enabled))
173 return commit_result(-L4_err::EInval);
176 Vcpu_state *vcpu = vcpu_state().access(true);
178 L4_obj_ref user_task = vcpu->user_task;
179 if (user_task.valid())
181 L4_fpage::Rights task_rights = L4_fpage::Rights(0);
182 Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
185 if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W())))
186 return commit_result(-L4_err::EPerm);
188 if (task != vcpu_user_space())
189 vcpu_set_user_space(task);
191 vcpu->user_task = L4_obj_ref();
193 else if (user_task.op() == L4_obj_ref::Ipc_reply)
194 vcpu_set_user_space(0);
196 L4_snd_item_iter snd_items(utcb, tag.words());
197 int items = tag.items();
198 if (vcpu_user_space())
199 for (; items && snd_items.more(); --items)
201 if (EXPECT_FALSE(!snd_items.next()))
204 Lock_guard<Lock> guard;
205 if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock))
206 return commit_result(-L4_err::ENoent);
210 L4_snd_item_iter::Item const *const item = snd_items.get();
211 L4_fpage sfp(item->d);
214 L4_error err = fpage_map(space(), sfp,
215 vcpu_user_space(), L4_fpage::all_spaces(),
221 if (EXPECT_FALSE(!err.ok()))
222 return commit_error(utcb, err);
225 if ((vcpu->_saved_state & Vcpu_state::F_irqs)
226 && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
228 assert_kdb(cpu_lock.test());
229 do_ipc(L4_msg_tag(), 0, 0, true, 0,
230 L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
231 &vcpu->_ipc_regs, L4_fpage::Rights::FULL());
233 vcpu = vcpu_state().access(true);
235 if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
236 || this->utcb().access(true)->error.error() == L4_error::R_timeout))
238 vcpu->_ts.set_ipc_upcall();
242 // tried to resume to user mode, so an IRQ enters from user mode
243 if (vcpu->_saved_state & Vcpu_state::F_user_mode)
244 sp = vcpu->_entry_sp;
248 arch_load_vcpu_kern_state(vcpu, true);
250 LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
252 l->state = vcpu->state;
253 l->ip = vcpu->_entry_ip;
255 l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
258 fast_return_to_user(vcpu->_entry_ip, sp, vcpu_state().usr().get());
262 vcpu->state = vcpu->_saved_state;
263 Task *target_space = nonull_static_cast<Task*>(space());
264 bool user_mode = false;
266 if (vcpu->state & Vcpu_state::F_user_mode)
268 if (!vcpu_user_space())
269 return commit_result(-L4_err::ENoent);
273 if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
275 state_add_dirty(Thread_vcpu_fpu_disabled);
279 state_del_dirty(Thread_vcpu_fpu_disabled);
281 target_space = static_cast<Task*>(vcpu_user_space());
283 arch_load_vcpu_user_state(vcpu, true);
286 LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
288 l->state = vcpu->state;
289 l->ip = vcpu->_ts.ip();
290 l->sp = vcpu->_ts.sp();
291 l->space = target_space->dbg_id();
294 return commit_result(target_space->resume_vcpu(this, vcpu, user_mode));
297 PRIVATE inline NOEXPORT NEEDS["processor.h"]
299 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
301 if (sender_list()->cursor())
302 return Kobject_iface::commit_result(-L4_err::EBusy);
305 printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
306 in->values[1], in->values[2],
307 in->values[3], in->values[4]);
310 int elems = tag.words();
313 return Kobject_iface::commit_result(0);
319 ::Prio_list_elem *c = sender_list()->first();
322 // this is kind of arbitrary
323 for (int cnt = 50; c && cnt > 0; --cnt)
325 Sender *s = Sender::cast(c);
326 s->modify_label(&in->values[1], elems);
327 c = sender_list()->next(c);
331 return Kobject_iface::commit_result(0);
333 sender_list()->cursor(c);
334 Proc::preemption_point();
335 c = sender_list()->cursor();
337 return Kobject_iface::commit_result(0);
340 PRIVATE inline NOEXPORT
342 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
344 L4_snd_item_iter snd_items(in, tag.words());
346 if (!tag.items() || !snd_items.next())
347 return Kobject_iface::commit_result(-L4_err::EInval);
349 L4_fpage bind_irq(snd_items.get()->d);
350 if (EXPECT_FALSE(!bind_irq.is_objpage()))
351 return Kobject_iface::commit_error(in, L4_error::Overflow);
353 register Context *const c_thread = ::current();
354 register Space *const c_space = c_thread->space();
355 L4_fpage::Rights irq_rights = L4_fpage::Rights(0);
357 = Irq_base::dcast(c_space->lookup_local(bind_irq.obj_index(), &irq_rights));
360 return Kobject_iface::commit_result(-L4_err::EInval);
362 if (EXPECT_FALSE(!(irq_rights & L4_fpage::Rights::X())))
363 return Kobject_iface::commit_result(-L4_err::EPerm);
365 register_delete_irq(irq);
366 return Kobject_iface::commit_result(0);
370 PRIVATE inline NOEXPORT
372 Thread_object::sys_control(L4_fpage::Rights rights, L4_msg_tag const &tag, Utcb *utcb)
374 if (EXPECT_FALSE(!(rights & L4_fpage::Rights::W())))
375 return commit_result(-L4_err::EPerm);
377 if (EXPECT_FALSE(tag.words() < 6))
378 return commit_result(-L4_err::EInval);
380 Context *curr = current();
381 Space *s = curr->space();
382 L4_snd_item_iter snd_items(utcb, tag.words());
384 User<Utcb>::Ptr utcb_addr(0);
386 Mword flags = utcb->values[0];
388 Mword _old_pager = cxx::int_value<Cap_index>(_pager.raw()) << L4_obj_ref::Cap_shift;
389 Mword _old_exc_handler = cxx::int_value<Cap_index>(_exc_handler.raw()) << L4_obj_ref::Cap_shift;
391 Thread_ptr _new_pager(Thread_ptr::Invalid);
392 Thread_ptr _new_exc_handler(Thread_ptr::Invalid);
394 if (flags & Ctl_set_pager)
395 _new_pager = Thread_ptr(Cap_index(utcb->values[1] >> L4_obj_ref::Cap_shift));
397 if (flags & Ctl_set_exc_handler)
398 _new_exc_handler = Thread_ptr(Cap_index(utcb->values[2] >> L4_obj_ref::Cap_shift));
400 if (flags & Ctl_bind_task)
402 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
403 return commit_result(-L4_err::EInval);
405 L4_fpage bind_task(snd_items.get()->d);
407 if (EXPECT_FALSE(!bind_task.is_objpage()))
408 return commit_result(-L4_err::EInval);
410 L4_fpage::Rights task_rights = L4_fpage::Rights(0);
411 task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
413 if (EXPECT_FALSE(!(task_rights & L4_fpage::Rights::W())))
414 return commit_result(-L4_err::EPerm);
417 return commit_result(-L4_err::EInval);
419 utcb_addr = User<Utcb>::Ptr((Utcb*)utcb->values[5]);
421 if (EXPECT_FALSE(!bind(task, utcb_addr)))
422 return commit_result(-L4_err::EInval); // unbind first !!
428 long res = control(_new_pager, _new_exc_handler);
431 return commit_result(res);
433 if ((res = sys_control_arch(utcb)) < 0)
434 return commit_result(res);
436 // FIXME: must be done xcpu safe, may be some parts above too
437 if (flags & Ctl_alien_thread)
439 if (utcb->values[4] & Ctl_alien_thread)
441 add_state |= Thread_alien;
442 del_state |= Thread_dis_alien;
445 del_state |= Thread_alien;
448 if (del_state || add_state)
449 drq_state_change(~del_state, add_state);
451 utcb->values[1] = _old_pager;
452 utcb->values[2] = _old_exc_handler;
454 return commit_result(0, 3);
458 PRIVATE inline NOEXPORT
460 Thread_object::sys_vcpu_control(L4_fpage::Rights, L4_msg_tag const &tag,
464 return commit_result(-L4_err::EInval);
466 User<Vcpu_state>::Ptr vcpu(0);
468 if (tag.words() >= 2)
469 vcpu = User<Vcpu_state>::Ptr((Vcpu_state*)utcb->values[1]);
476 Mword size = sizeof(Vcpu_state);
477 if (utcb->values[0] & 0x10000)
479 size = Config::PAGE_SIZE;
480 add_state |= Thread_ext_vcpu_enabled;
483 Space::Ku_mem const *vcpu_m
484 = space()->find_ku_mem(vcpu, size);
487 return commit_result(-L4_err::EInval);
489 add_state |= Thread_vcpu_enabled;
490 _vcpu_state.set(vcpu, vcpu_m->kern_addr(vcpu));
492 Vcpu_state *s = _vcpu_state.access();
493 arch_init_vcpu_state(s, add_state & Thread_ext_vcpu_enabled);
494 arch_update_vcpu_state(s);
497 return commit_result(-L4_err::EInval);
499 /* hm, we do not allow to disable vCPU mode, it's one way enable
502 del_state |= Thread_vcpu_enabled | Thread_vcpu_user_mode
503 | Thread_vcpu_fpu_disabled | Thread_ext_vcpu_enabled;
507 drq_state_change(~del_state, add_state);
509 return commit_result(0);
513 // -------------------------------------------------------------------
514 // Thread::ex_regs class system calls
518 Thread_object::ex_regs(Address ip, Address sp,
519 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
522 if (state(false) == Thread_invalid || !space())
525 if (current() == this)
528 if (o_sp) *o_sp = user_sp();
529 if (o_ip) *o_ip = user_ip();
530 if (o_flags) *o_flags = user_flags();
532 // Changing the run state is only possible when the thread is not in
534 if (!(ops & Exr_cancel) && (state() & Thread_in_exception))
535 // XXX Maybe we should return false here. Previously, we actually
536 // did so, but we also actually didn't do any state modification.
537 // If you change this value, make sure the logic in
538 // sys_thread_ex_regs still works (in particular,
539 // ex_regs_cap_handler and friends should still be called).
542 if (state() & Thread_dead) // resurrect thread
543 state_change_dirty(~Thread_dead, Thread_ready);
545 else if (ops & Exr_cancel)
546 // cancel ongoing IPC or other activity
547 state_add_dirty(Thread_cancel | Thread_ready);
549 if (ops & Exr_trigger_exception)
551 extern char leave_by_trigger_exception[];
552 do_trigger_exception(regs(), leave_by_trigger_exception);
561 if (current() == this)
569 Thread_object::ex_regs(Utcb *utcb)
571 Address ip = utcb->values[1];
572 Address sp = utcb->values[2];
574 Mword ops = utcb->values[0];
576 LOG_TRACE("Ex-regs", "exr", current(), Log_thread_exregs,
578 l->ip = ip; l->sp = sp; l->op = ops;);
580 if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
581 return commit_result(-L4_err::EInval);
583 utcb->values[0] = flags;
584 utcb->values[1] = ip;
585 utcb->values[2] = sp;
587 return commit_result(0, 3);
592 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
594 Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
595 params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->utcb().access());
596 return params->result.proto() == 0 ? Drq::Need_resched : 0;
599 PRIVATE inline NOEXPORT
601 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb *utcb)
603 if (tag.words() != 3)
604 return commit_result(-L4_err::EInval);
606 if (current() == this)
607 return ex_regs(utcb);
609 Remote_syscall params;
610 params.thread = current_thread();
612 drq(handle_remote_ex_regs, ¶ms, 0, Drq::Any_ctxt);
613 return params.result;
616 PRIVATE inline NOEXPORT NEEDS["timer.h"]
618 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
620 Context *curr = current();
623 return commit_result(0);
625 if (current_cpu() != cpu())
626 return commit_result(0);
629 Sched_context * const cs = current_sched();
632 if (curr != this && (state() & Thread_ready_mask))
634 curr->switch_exec_schedule_locked (this, Not_Helping);
635 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
636 return commit_result(0, Utcb::Time_val::Words);
639 #if 0 // FIXME: provide API for multiple sched contexts
640 // Compute remaining quantum length of timeslice
641 regs->left(timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
643 // Yield current global timeslice
644 cs->owner()->switch_sched(cs->id() ? cs->next() : cs);
646 reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
647 = timeslice_timeout.current()->get_timeout(Timer::system_clock());
650 return commit_result(0, Utcb::Time_val::Words);
655 // -------------------------------------------------------------------
656 // Gather statistics information about thread execution
660 Thread_object::sys_thread_stats_remote(void *data)
662 update_consumed_time();
663 *(Clock::Time *)data = consumed_time();
669 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
671 return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
674 PRIVATE inline NOEXPORT
676 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
680 if (cpu() != current_cpu())
681 drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
684 // Respect the fact that the consumed time is only updated on context switch
685 if (this == current())
686 update_consumed_time();
687 value = consumed_time();
690 reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
692 return commit_result(0, Utcb::Time_val::Words);