]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread_object.cpp
4c24387417a91c4d818d97c453732022e65a42fa
[l4.git] / kernel / fiasco / src / kern / thread_object.cpp
1 INTERFACE:
2
3 #include "kobject.h"
4 #include "thread.h"
5
6 class Thread_object : public Thread
7 {
8 private:
9   struct Remote_syscall
10   {
11     Thread *thread;
12     L4_msg_tag result;
13   };
14 };
15
16 class Obj_cap : public L4_obj_ref
17 {
18 };
19
20
21 // ---------------------------------------------------------------------------
22 IMPLEMENTATION:
23
24 #include "context.h"
25 #include "fpu.h"
26 #include "irq_chip.h"
27 #include "map_util.h"
28 #include "processor.h"
29 #include "task.h"
30 #include "thread_state.h"
31 #include "timer.h"
32
33
34
35 PUBLIC inline
36 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
37
38 PUBLIC inline NEEDS["kobject.h"]
39 Kobject_iface *
40 Obj_cap::deref(L4_fpage::Rights *rights = 0, bool dbg = false)
41 {
42   Thread *current = current_thread();
43   if (op() & L4_obj_ref::Ipc_reply)
44     {
45       if (rights) *rights = current->caller_rights();
46       Thread *ca = static_cast<Thread*>(current->caller());
47       if (!dbg)
48         current->set_caller(0, L4_fpage::Rights(0));
49       return ca;
50     }
51
52   if (EXPECT_FALSE(special()))
53     {
54       if (!self())
55         return 0;
56
57       if (rights) *rights = L4_fpage::Rights::RWX();
58       return current_thread();
59     }
60
61   return current->space()->lookup_local(cap(), rights);
62 }
63
64 PUBLIC inline NEEDS["kobject.h"]
65 bool
66 Obj_cap::revalidate(Kobject_iface *o)
67 {
68   return deref() == o;
69 }
70
71 PUBLIC
72 Thread_object::Thread_object() : Thread() {}
73
74 PUBLIC
75 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
76
77 PUBLIC virtual
78 bool
79 Thread_object::put()
80 { return dec_ref() == 0; }
81
82
83
84 PUBLIC
85 void
86 Thread_object::operator delete(void *_t)
87 {
88   Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
89   Ram_quota * const q = t->_quota;
90   Kmem_alloc::allocator()->q_unaligned_free(q, Thread::Size, t);
91
92   LOG_TRACE("Kobject delete", "del", current(), Log_destroy,
93       l->id = t->dbg_id();
94       l->obj = t;
95       l->type = "Thread";
96       l->ram = q->current());
97 }
98
99
100 PUBLIC
101 void
102 Thread_object::destroy(Kobject ***rl)
103 {
104   Kobject::destroy(rl);
105   check_kdb(kill());
106   assert_kdb(_magic == magic);
107 }
108
109 PUBLIC
110 void
111 Thread_object::invoke(L4_obj_ref /*self*/, L4_fpage::Rights rights, Syscall_frame *f, Utcb *utcb)
112 {
113   register L4_obj_ref::Operation op = f->ref().op();
114   if (((op != 0) && !(op & L4_obj_ref::Ipc_send))
115       || (op & L4_obj_ref::Ipc_reply)
116       || f->tag().proto() != L4_msg_tag::Label_thread)
117     {
118       /* we do IPC */
119       Thread *ct = current_thread();
120       Thread *sender = 0;
121       Thread *partner = 0;
122       bool have_rcv = false;
123
124       if (EXPECT_FALSE(!check_sys_ipc(op, &partner, &sender, &have_rcv)))
125         {
126           utcb->error = L4_error::Not_existent;
127           return;
128         }
129
130       ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
131                  f->timeout(), f, rights);
132       return;
133     }
134
135   switch (utcb->values[0] & Opcode_mask)
136     {
137     case Op_control:
138       f->tag(sys_control(rights, f->tag(), utcb));
139       return;
140     case Op_ex_regs:
141       f->tag(sys_ex_regs(f->tag(), utcb));
142       return;
143     case Op_switch:
144       f->tag(sys_thread_switch(f->tag(), utcb));
145       return;
146     case Op_stats:
147       f->tag(sys_thread_stats(f->tag(), utcb));
148       return;
149     case Op_vcpu_resume:
150       f->tag(sys_vcpu_resume(f->tag(), utcb));
151       return;
152     case Op_register_del_irq:
153       f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
154       return;
155     case Op_modify_senders:
156       f->tag(sys_modify_senders(f->tag(), utcb, utcb));
157       return;
158     case Op_vcpu_control:
159       f->tag(sys_vcpu_control(rights, f->tag(), utcb));
160       return;
161     default:
162       f->tag(invoke_arch(f->tag(), utcb));
163       return;
164     }
165 }
166
167
168 PRIVATE inline
169 L4_msg_tag
170 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
171 {
172   if (this != current() || !(state() & Thread_vcpu_enabled))
173     return commit_result(-L4_err::EInval);
174
175   Space *s = space();
176   Vcpu_state *vcpu = vcpu_state().access(true);
177
178   L4_obj_ref user_task = vcpu->user_task;
179   if (user_task.valid())
180     {
181       L4_fpage::Rights task_rights = L4_fpage::Rights(0);
182       Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
183                                                          &task_rights));
184
185       if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W())))
186         return commit_result(-L4_err::EPerm);
187
188       if (task != vcpu_user_space())
189         vcpu_set_user_space(task);
190
191       vcpu->user_task = L4_obj_ref();
192     }
193   else if (user_task.op() == L4_obj_ref::Ipc_reply)
194     vcpu_set_user_space(0);
195
196   L4_snd_item_iter snd_items(utcb, tag.words());
197   int items = tag.items();
198   if (vcpu_user_space())
199     for (; items && snd_items.more(); --items)
200       {
201         if (EXPECT_FALSE(!snd_items.next()))
202           break;
203
204         Lock_guard<Lock> guard;
205         if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock))
206           return commit_result(-L4_err::ENoent);
207
208         cpu_lock.clear();
209
210         L4_snd_item_iter::Item const *const item = snd_items.get();
211         L4_fpage sfp(item->d);
212
213         Reap_list rl;
214         L4_error err = fpage_map(space(), sfp,
215                                  vcpu_user_space(), L4_fpage::all_spaces(),
216                                  item->b, &rl);
217         rl.del();
218
219         cpu_lock.lock();
220
221         if (EXPECT_FALSE(!err.ok()))
222           return commit_error(utcb, err);
223       }
224
225   if ((vcpu->_saved_state & Vcpu_state::F_irqs)
226       && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
227     {
228       assert_kdb(cpu_lock.test());
229       do_ipc(L4_msg_tag(), 0, 0, true, 0,
230              L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
231              &vcpu->_ipc_regs, L4_fpage::Rights::FULL());
232
233       vcpu = vcpu_state().access(true);
234
235       if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
236                       || this->utcb().access(true)->error.error() == L4_error::R_timeout))
237         {
238           vcpu->_ts.set_ipc_upcall();
239
240           Address sp;
241
242           // tried to resume to user mode, so an IRQ enters from user mode
243           if (vcpu->_saved_state & Vcpu_state::F_user_mode)
244             sp = vcpu->_entry_sp;
245           else
246             sp = vcpu->_ts.sp();
247
248           arch_load_vcpu_kern_state(vcpu, true);
249
250           LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
251               l->type = 4;
252               l->state = vcpu->state;
253               l->ip = vcpu->_entry_ip;
254               l->sp = sp;
255               l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
256               );
257
258           fast_return_to_user(vcpu->_entry_ip, sp, vcpu_state().usr().get());
259         }
260     }
261
262   vcpu->state = vcpu->_saved_state;
263   Task *target_space = nonull_static_cast<Task*>(space());
264   bool user_mode = false;
265
266   if (vcpu->state & Vcpu_state::F_user_mode)
267     {
268       if (!vcpu_user_space())
269         return commit_result(-L4_err::ENoent);
270
271       user_mode = true;
272
273       if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
274         {
275           state_add_dirty(Thread_vcpu_fpu_disabled);
276           Fpu::disable();
277         }
278       else
279         state_del_dirty(Thread_vcpu_fpu_disabled);
280
281       target_space = static_cast<Task*>(vcpu_user_space());
282
283       arch_load_vcpu_user_state(vcpu, true);
284     }
285
286   LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
287       l->type = 0;
288       l->state = vcpu->state;
289       l->ip = vcpu->_ts.ip();
290       l->sp = vcpu->_ts.sp();
291       l->space = target_space->dbg_id();
292       );
293
294   return commit_result(target_space->resume_vcpu(this, vcpu, user_mode));
295 }
296
297 PRIVATE inline NOEXPORT NEEDS["processor.h"]
298 L4_msg_tag
299 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
300 {
301   if (sender_list()->cursor())
302     return Kobject_iface::commit_result(-L4_err::EBusy);
303
304   if (0)
305     printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
306            in->values[1], in->values[2],
307            in->values[3], in->values[4]);
308
309
310   int elems = tag.words();
311
312   if (elems < 5)
313     return Kobject_iface::commit_result(0);
314
315   --elems;
316
317   elems = elems / 4;
318
319   ::Prio_list_elem *c = sender_list()->first();
320   while (c)
321     {
322       // this is kind of arbitrary
323       for (int cnt = 50; c && cnt > 0; --cnt)
324         {
325           Sender *s = Sender::cast(c);
326           s->modify_label(&in->values[1], elems);
327           c = sender_list()->next(c);
328         }
329
330       if (!c)
331         return Kobject_iface::commit_result(0);
332
333       sender_list()->cursor(c);
334       Proc::preemption_point();
335       c = sender_list()->cursor();
336     }
337   return Kobject_iface::commit_result(0);
338 }
339
340 PRIVATE inline NOEXPORT
341 L4_msg_tag
342 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
343 {
344   L4_snd_item_iter snd_items(in, tag.words());
345
346   if (!tag.items() || !snd_items.next())
347     return Kobject_iface::commit_result(-L4_err::EInval);
348
349   L4_fpage bind_irq(snd_items.get()->d);
350   if (EXPECT_FALSE(!bind_irq.is_objpage()))
351     return Kobject_iface::commit_error(in, L4_error::Overflow);
352
353   register Context *const c_thread = ::current();
354   register Space *const c_space = c_thread->space();
355   L4_fpage::Rights irq_rights = L4_fpage::Rights(0);
356   Irq_base *irq
357     = Irq_base::dcast(c_space->lookup_local(bind_irq.obj_index(), &irq_rights));
358
359   if (!irq)
360     return Kobject_iface::commit_result(-L4_err::EInval);
361
362   if (EXPECT_FALSE(!(irq_rights & L4_fpage::Rights::X())))
363     return Kobject_iface::commit_result(-L4_err::EPerm);
364
365   register_delete_irq(irq);
366   return Kobject_iface::commit_result(0);
367 }
368
369
370 PRIVATE inline NOEXPORT
371 L4_msg_tag
372 Thread_object::sys_control(L4_fpage::Rights rights, L4_msg_tag const &tag, Utcb *utcb)
373 {
374   if (EXPECT_FALSE(!(rights & L4_fpage::Rights::W())))
375     return commit_result(-L4_err::EPerm);
376
377   if (EXPECT_FALSE(tag.words() < 6))
378     return commit_result(-L4_err::EInval);
379
380   Context *curr = current();
381   Space *s = curr->space();
382   L4_snd_item_iter snd_items(utcb, tag.words());
383   Task *task = 0;
384   User<Utcb>::Ptr utcb_addr(0);
385
386   Mword flags = utcb->values[0];
387
388   Mword _old_pager = cxx::int_value<Cap_index>(_pager.raw()) << L4_obj_ref::Cap_shift;
389   Mword _old_exc_handler = cxx::int_value<Cap_index>(_exc_handler.raw()) << L4_obj_ref::Cap_shift;
390
391   Thread_ptr _new_pager(Thread_ptr::Invalid);
392   Thread_ptr _new_exc_handler(Thread_ptr::Invalid);
393
394   if (flags & Ctl_set_pager)
395     _new_pager = Thread_ptr(Cap_index(utcb->values[1] >> L4_obj_ref::Cap_shift));
396
397   if (flags & Ctl_set_exc_handler)
398     _new_exc_handler = Thread_ptr(Cap_index(utcb->values[2] >> L4_obj_ref::Cap_shift));
399
400   if (flags & Ctl_bind_task)
401     {
402       if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
403         return commit_result(-L4_err::EInval);
404
405       L4_fpage bind_task(snd_items.get()->d);
406
407       if (EXPECT_FALSE(!bind_task.is_objpage()))
408         return commit_result(-L4_err::EInval);
409
410       L4_fpage::Rights task_rights = L4_fpage::Rights(0);
411       task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
412
413       if (EXPECT_FALSE(!(task_rights & L4_fpage::Rights::W())))
414         return commit_result(-L4_err::EPerm);
415
416       if (!task)
417         return commit_result(-L4_err::EInval);
418
419       utcb_addr = User<Utcb>::Ptr((Utcb*)utcb->values[5]);
420
421       if (EXPECT_FALSE(!bind(task, utcb_addr)))
422         return commit_result(-L4_err::EInval); // unbind first !!
423     }
424
425   Mword del_state = 0;
426   Mword add_state = 0;
427
428   long res = control(_new_pager, _new_exc_handler);
429
430   if (res < 0)
431     return commit_result(res);
432
433   if ((res = sys_control_arch(utcb)) < 0)
434     return commit_result(res);
435
436   // FIXME: must be done xcpu safe, may be some parts above too
437   if (flags & Ctl_alien_thread)
438     {
439       if (utcb->values[4] & Ctl_alien_thread)
440         {
441           add_state |= Thread_alien;
442           del_state |= Thread_dis_alien;
443         }
444       else
445         del_state |= Thread_alien;
446     }
447
448   if (del_state || add_state)
449     drq_state_change(~del_state, add_state);
450
451   utcb->values[1] = _old_pager;
452   utcb->values[2] = _old_exc_handler;
453
454   return commit_result(0, 3);
455 }
456
457
458 PRIVATE inline NOEXPORT
459 L4_msg_tag
460 Thread_object::sys_vcpu_control(L4_fpage::Rights, L4_msg_tag const &tag,
461                                 Utcb *utcb)
462 {
463   if (!space())
464     return commit_result(-L4_err::EInval);
465
466   User<Vcpu_state>::Ptr vcpu(0);
467
468   if (tag.words() >= 2)
469     vcpu = User<Vcpu_state>::Ptr((Vcpu_state*)utcb->values[1]);
470
471   Mword del_state = 0;
472   Mword add_state = 0;
473
474   if (vcpu)
475     {
476       Mword size = sizeof(Vcpu_state);
477       if (utcb->values[0] & 0x10000)
478         {
479           size = Config::PAGE_SIZE;
480           add_state |= Thread_ext_vcpu_enabled;
481         }
482
483       Space::Ku_mem const *vcpu_m
484         = space()->find_ku_mem(vcpu, size);
485
486       if (!vcpu_m)
487         return commit_result(-L4_err::EInval);
488
489       add_state |= Thread_vcpu_enabled;
490       _vcpu_state.set(vcpu, vcpu_m->kern_addr(vcpu));
491
492       Vcpu_state *s = _vcpu_state.access();
493       arch_init_vcpu_state(s, add_state & Thread_ext_vcpu_enabled);
494       arch_update_vcpu_state(s);
495     }
496   else
497     return commit_result(-L4_err::EInval);
498
499   /* hm, we do not allow to disable vCPU mode, it's one way enable
500   else
501     {
502       del_state |= Thread_vcpu_enabled | Thread_vcpu_user_mode
503                    | Thread_vcpu_fpu_disabled | Thread_ext_vcpu_enabled;
504     }
505   */
506
507   drq_state_change(~del_state, add_state);
508
509   return commit_result(0);
510 }
511
512
513 // -------------------------------------------------------------------
514 // Thread::ex_regs class system calls
515
516 PUBLIC
517 bool
518 Thread_object::ex_regs(Address ip, Address sp,
519                 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
520                 Mword ops = 0)
521 {
522   if (state(false) == Thread_invalid || !space())
523     return false;
524
525   if (current() == this)
526     spill_user_state();
527
528   if (o_sp) *o_sp = user_sp();
529   if (o_ip) *o_ip = user_ip();
530   if (o_flags) *o_flags = user_flags();
531
532   // Changing the run state is only possible when the thread is not in
533   // an exception.
534   if (!(ops & Exr_cancel) && (state() & Thread_in_exception))
535     // XXX Maybe we should return false here.  Previously, we actually
536     // did so, but we also actually didn't do any state modification.
537     // If you change this value, make sure the logic in
538     // sys_thread_ex_regs still works (in particular,
539     // ex_regs_cap_handler and friends should still be called).
540     return true;
541
542   if (state() & Thread_dead)    // resurrect thread
543     state_change_dirty(~Thread_dead, Thread_ready);
544
545   else if (ops & Exr_cancel)
546     // cancel ongoing IPC or other activity
547     state_add_dirty(Thread_cancel | Thread_ready);
548
549   if (ops & Exr_trigger_exception)
550     {
551       extern char leave_by_trigger_exception[];
552       do_trigger_exception(regs(), leave_by_trigger_exception);
553     }
554
555   if (ip != ~0UL)
556     user_ip(ip);
557
558   if (sp != ~0UL)
559     user_sp (sp);
560
561   if (current() == this)
562     fill_user_state();
563
564   return true;
565 }
566
567 PUBLIC inline
568 L4_msg_tag
569 Thread_object::ex_regs(Utcb *utcb)
570 {
571   Address ip = utcb->values[1];
572   Address sp = utcb->values[2];
573   Mword flags;
574   Mword ops = utcb->values[0];
575
576   LOG_TRACE("Ex-regs", "exr", current(), Log_thread_exregs,
577       l->id = dbg_id();
578       l->ip = ip; l->sp = sp; l->op = ops;);
579
580   if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
581     return commit_result(-L4_err::EInval);
582
583   utcb->values[0] = flags;
584   utcb->values[1] = ip;
585   utcb->values[2] = sp;
586
587   return commit_result(0, 3);
588 }
589
590 PRIVATE static
591 unsigned
592 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
593 {
594   Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
595   params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->utcb().access());
596   return params->result.proto() == 0 ? Drq::Need_resched : 0;
597 }
598
599 PRIVATE inline NOEXPORT
600 L4_msg_tag
601 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb *utcb)
602 {
603   if (tag.words() != 3)
604     return commit_result(-L4_err::EInval);
605
606   if (current() == this)
607     return ex_regs(utcb);
608
609   Remote_syscall params;
610   params.thread = current_thread();
611
612   drq(handle_remote_ex_regs, &params, 0, Drq::Any_ctxt);
613   return params.result;
614 }
615
616 PRIVATE inline NOEXPORT NEEDS["timer.h"]
617 L4_msg_tag
618 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
619 {
620   Context *curr = current();
621
622   if (curr == this)
623     return commit_result(0);
624
625   if (current_cpu() != cpu())
626     return commit_result(0);
627
628 #ifdef FIXME
629   Sched_context * const cs = current_sched();
630 #endif
631
632   if (curr != this && (state() & Thread_ready_mask))
633     {
634       curr->switch_exec_schedule_locked (this, Not_Helping);
635       reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
636       return commit_result(0, Utcb::Time_val::Words);
637     }
638
639 #if 0 // FIXME: provide API for multiple sched contexts
640       // Compute remaining quantum length of timeslice
641       regs->left(timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
642
643       // Yield current global timeslice
644       cs->owner()->switch_sched(cs->id() ? cs->next() : cs);
645 #endif
646   reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
647     = timeslice_timeout.current()->get_timeout(Timer::system_clock());
648   curr->schedule();
649
650   return commit_result(0, Utcb::Time_val::Words);
651 }
652
653
654
655 // -------------------------------------------------------------------
656 // Gather statistics information about thread execution
657
658 PRIVATE
659 unsigned
660 Thread_object::sys_thread_stats_remote(void *data)
661 {
662   update_consumed_time();
663   *(Clock::Time *)data = consumed_time();
664   return 0;
665 }
666
667 PRIVATE static
668 unsigned
669 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
670 {
671   return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
672 }
673
674 PRIVATE inline NOEXPORT
675 L4_msg_tag
676 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
677 {
678   Clock::Time value;
679
680   if (cpu() != current_cpu())
681     drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
682   else
683     {
684       // Respect the fact that the consumed time is only updated on context switch
685       if (this == current())
686         update_consumed_time();
687       value = consumed_time();
688     }
689
690   reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
691
692   return commit_result(0, Utcb::Time_val::Words);
693 }
694
695