]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread_object.cpp
update
[l4.git] / kernel / fiasco / src / kern / thread_object.cpp
1 INTERFACE:
2
3 #include "kobject.h"
4 #include "thread.h"
5
6 class Thread_object : public Thread
7 {
8 private:
9   struct Remote_syscall
10   {
11     Thread *thread;
12     L4_msg_tag result;
13   };
14 };
15
16 class Obj_cap : public L4_obj_ref
17 {
18 };
19
20
21 // ---------------------------------------------------------------------------
22 IMPLEMENTATION:
23
24 #include "context.h"
25 #include "fpu.h"
26 #include "map_util.h"
27 #include "processor.h"
28 #include "task.h"
29 #include "thread_state.h"
30 #include "timer.h"
31
32
33
34 PUBLIC inline
35 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
36
37 PUBLIC inline NEEDS["kobject.h"]
38 Kobject_iface *
39 Obj_cap::deref(unsigned char *rights = 0, bool dbg = false)
40 {
41   Thread *current = current_thread();
42   if (op() & L4_obj_ref::Ipc_reply)
43     {
44       if (rights) *rights = current->caller_rights();
45       Thread *ca = static_cast<Thread*>(current->caller());
46       if (!dbg)
47         current->set_caller(0,0);
48       return ca;
49     }
50
51   if (EXPECT_FALSE(special()))
52     {
53       if (!self())
54         return 0;
55
56       if (rights) *rights = L4_fpage::RWX;
57       return current_thread();
58     }
59
60   return current->space()->obj_space()->lookup_local(cap(), rights);
61 }
62
63 PUBLIC inline NEEDS["kobject.h"]
64 bool
65 Obj_cap::revalidate(Kobject_iface *o)
66 {
67   return deref() == o;
68 }
69
70 PUBLIC
71 Thread_object::Thread_object() : Thread() {}
72
73 PUBLIC
74 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
75
76 PUBLIC virtual
77 bool
78 Thread_object::put()
79 { return dec_ref() == 0; }
80
81
82
83 PUBLIC
84 void
85 Thread_object::operator delete(void *_t)
86 {
87   Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
88   Ram_quota * const q = t->_quota;
89   Mapped_allocator::allocator()->q_unaligned_free(q, Config::thread_block_size, t);
90
91   LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
92       Log_destroy *l = tbe->payload<Log_destroy>();
93       l->id = t->dbg_id();
94       l->obj = t;
95       l->type = "Thread";
96       l->ram = q->current());
97 }
98
99
100 PUBLIC
101 void
102 Thread_object::destroy(Kobject ***rl)
103 {
104   Kobject::destroy(rl);
105   check_kdb(kill());
106   assert_kdb(_magic == magic);
107 }
108
109 PUBLIC
110 void
111 Thread_object::invoke(L4_obj_ref /*self*/, Mword rights, Syscall_frame *f, Utcb *utcb)
112 {
113   register L4_obj_ref::Operation op = f->ref().op();
114   if (((op != 0) && !(op & L4_obj_ref::Ipc_send))
115       || (op & L4_obj_ref::Ipc_reply)
116       || f->tag().proto() != L4_msg_tag::Label_thread)
117     {
118       /* we do IPC */
119       Thread *ct = current_thread();
120       Thread *sender = 0;
121       Thread *partner = 0;
122       bool have_rcv = false;
123
124       if (EXPECT_FALSE(!check_sys_ipc(op, &partner, &sender, &have_rcv)))
125         {
126           utcb->error = L4_error::Not_existent;
127           return;
128         }
129
130       ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
131                  f->timeout(), f, rights);
132       return;
133     }
134
135   switch (utcb->values[0] & Opcode_mask)
136     {
137     case Op_control:
138       f->tag(sys_control(rights, f->tag(), utcb));
139       return;
140     case Op_ex_regs:
141       f->tag(sys_ex_regs(f->tag(), utcb));
142       return;
143     case Op_switch:
144       f->tag(sys_thread_switch(f->tag(), utcb));
145       return;
146     case Op_stats:
147       f->tag(sys_thread_stats(f->tag(), utcb));
148       return;
149     case Op_vcpu_resume:
150       f->tag(sys_vcpu_resume(f->tag(), utcb));
151       return;
152     case Op_register_del_irq:
153       f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
154       return;
155     case Op_modify_senders:
156       f->tag(sys_modify_senders(f->tag(), utcb, utcb));
157       return;
158     case Op_vcpu_control:
159       f->tag(sys_vcpu_control(rights, f->tag(), utcb));
160       return;
161     default:
162       L4_msg_tag tag = f->tag();
163       if (invoke_arch(tag, utcb))
164         f->tag(tag);
165       else
166         f->tag(commit_result(-L4_err::ENosys));
167       return;
168     }
169 }
170
171
172 PRIVATE inline
173 L4_msg_tag
174 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
175 {
176   if (this != current() || !(state() & Thread_vcpu_enabled))
177     return commit_result(-L4_err::EInval);
178
179   Obj_space *s = space()->obj_space();
180   Vcpu_state *vcpu = vcpu_state().access(true);
181
182   L4_obj_ref user_task = vcpu->user_task;
183   if (user_task.valid())
184     {
185       unsigned char task_rights = 0;
186       Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
187                                                          &task_rights));
188
189       if (EXPECT_FALSE(task && !(task_rights & L4_fpage::W)))
190         return commit_result(-L4_err::EPerm);
191
192       if (task != vcpu_user_space())
193         vcpu_set_user_space(task);
194
195       vcpu->user_task = L4_obj_ref();
196     }
197   else if (user_task.op() == L4_obj_ref::Ipc_reply)
198     vcpu_set_user_space(0);
199
200   L4_snd_item_iter snd_items(utcb, tag.words());
201   int items = tag.items();
202   for (; items && snd_items.more(); --items)
203     {
204       if (EXPECT_FALSE(!snd_items.next()))
205         break;
206
207       // XXX: need to take existance lock for map
208       cpu_lock.clear();
209
210       L4_snd_item_iter::Item const *const item = snd_items.get();
211       L4_fpage sfp(item->d);
212
213       Reap_list rl;
214       L4_error err = fpage_map(space(), sfp,
215                                vcpu_user_space(), L4_fpage::all_spaces(),
216                                item->b, &rl);
217       rl.del();
218
219       cpu_lock.lock();
220
221       if (EXPECT_FALSE(!err.ok()))
222         return commit_error(utcb, err);
223     }
224
225   if ((vcpu->_saved_state & Vcpu_state::F_irqs)
226       && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
227     {
228       assert_kdb(cpu_lock.test());
229       do_ipc(L4_msg_tag(), 0, 0, true, 0,
230              L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
231              &vcpu->_ipc_regs, 3);
232
233       vcpu = vcpu_state().access(true);
234
235       if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
236                       || this->utcb().access(true)->error.error() == L4_error::R_timeout))
237         {
238           vcpu->_ts.set_ipc_upcall();
239
240           Address sp;
241
242           // tried to resume to user mode, so an IRQ enters from user mode
243           if (vcpu->_saved_state & Vcpu_state::F_user_mode)
244             sp = vcpu->_entry_sp;
245           else
246             sp = vcpu->_ts.sp();
247
248           LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
249               Vcpu_log *l = tbe->payload<Vcpu_log>();
250               l->type = 4;
251               l->state = vcpu->state;
252               l->ip = vcpu->_entry_ip;
253               l->sp = sp;
254               l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
255               );
256
257           fast_return_to_user(vcpu->_entry_ip, sp, vcpu_state().usr().get());
258         }
259     }
260
261   vcpu->state = vcpu->_saved_state;
262   Task *target_space = nonull_static_cast<Task*>(space());
263   bool user_mode = false;
264
265   if (vcpu->state & Vcpu_state::F_user_mode)
266     {
267       if (!vcpu_user_space())
268         return commit_result(-L4_err::EInval);
269
270       user_mode = true;
271
272       if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
273         {
274           state_add_dirty(Thread_vcpu_fpu_disabled);
275           Fpu::disable();
276         }
277       else
278         state_del_dirty(Thread_vcpu_fpu_disabled);
279
280       target_space = static_cast<Task*>(vcpu_user_space());
281     }
282
283   LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
284       Vcpu_log *l = tbe->payload<Vcpu_log>();
285       l->type = 0;
286       l->state = vcpu->state;
287       l->ip = vcpu->_ts.ip();
288       l->sp = vcpu->_ts.sp();
289       l->space = target_space->dbg_id();
290       );
291
292   return commit_result(target_space->resume_vcpu(this, vcpu, user_mode));
293 }
294
295 PRIVATE inline NOEXPORT NEEDS["processor.h"]
296 L4_msg_tag
297 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
298 {
299   if (sender_list()->cursor())
300     return Kobject_iface::commit_result(-L4_err::EBusy);
301
302   if (0)
303     printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
304            in->values[1], in->values[2],
305            in->values[3], in->values[4]);
306
307
308   int elems = tag.words();
309
310   if (elems < 5)
311     return Kobject_iface::commit_result(0);
312
313   --elems;
314
315   elems = elems / 4;
316
317   ::Prio_list_elem *c = sender_list()->head();
318   while (c)
319     {
320       // this is kind of arbitrary
321       for (int cnt = 50; c && cnt > 0; --cnt)
322         {
323           Sender *s = Sender::cast(c);
324           s->modify_label(&in->values[1], elems);
325           c = c->next();
326         }
327
328       if (!c)
329         return Kobject_iface::commit_result(0);
330
331       sender_list()->cursor(c);
332       Proc::preemption_point();
333       c = sender_list()->cursor();
334     }
335   return Kobject_iface::commit_result(0);
336 }
337
338 PRIVATE inline NOEXPORT
339 L4_msg_tag
340 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
341 {
342   L4_snd_item_iter snd_items(in, tag.words());
343
344   if (!tag.items() || !snd_items.next())
345     return Kobject_iface::commit_result(-L4_err::EInval);
346
347   L4_fpage bind_irq(snd_items.get()->d);
348   if (EXPECT_FALSE(!bind_irq.is_objpage()))
349     return Kobject_iface::commit_error(in, L4_error::Overflow);
350
351   register Context *const c_thread = ::current();
352   register Space *const c_space = c_thread->space();
353   register Obj_space *const o_space = c_space->obj_space();
354   unsigned char irq_rights = 0;
355   Irq_base *irq
356     = Irq_base::dcast(o_space->lookup_local(bind_irq.obj_index(), &irq_rights));
357
358   if (!irq)
359     return Kobject_iface::commit_result(-L4_err::EInval);
360
361   if (EXPECT_FALSE(!(irq_rights & L4_fpage::X)))
362     return Kobject_iface::commit_result(-L4_err::EPerm);
363
364   register_delete_irq(irq);
365   return Kobject_iface::commit_result(0);
366 }
367
368
369 PRIVATE inline NOEXPORT
370 L4_msg_tag
371 Thread_object::sys_control(unsigned char rights, L4_msg_tag const &tag, Utcb *utcb)
372 {
373   if (EXPECT_FALSE(!(rights & L4_fpage::W)))
374     return commit_result(-L4_err::EPerm);
375
376   if (EXPECT_FALSE(tag.words() < 6))
377     return commit_result(-L4_err::EInval);
378
379   Context *curr = current();
380   Obj_space *s = curr->space()->obj_space();
381   L4_snd_item_iter snd_items(utcb, tag.words());
382   Task *task = 0;
383   User<Utcb>::Ptr utcb_addr(0);
384
385   Mword flags = utcb->values[0];
386
387   Mword _old_pager = _pager.raw() << L4_obj_ref::Cap_shift;
388   Mword _old_exc_handler = _exc_handler.raw() << L4_obj_ref::Cap_shift;
389
390   Thread_ptr _new_pager(~0UL);
391   Thread_ptr _new_exc_handler(~0UL);
392
393   if (flags & Ctl_set_pager)
394     _new_pager = Thread_ptr(utcb->values[1] >> L4_obj_ref::Cap_shift);
395
396   if (flags & Ctl_set_exc_handler)
397     _new_exc_handler = Thread_ptr(utcb->values[2] >> L4_obj_ref::Cap_shift);
398
399   if (flags & Ctl_bind_task)
400     {
401       if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
402         return commit_result(-L4_err::EInval);
403
404       L4_fpage bind_task(snd_items.get()->d);
405
406       if (EXPECT_FALSE(!bind_task.is_objpage()))
407         return commit_result(-L4_err::EInval);
408
409       unsigned char task_rights = 0;
410       task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
411
412       if (EXPECT_FALSE(!(task_rights & L4_fpage::W)))
413         return commit_result(-L4_err::EPerm);
414
415       if (!task)
416         return commit_result(-L4_err::EInval);
417
418       utcb_addr = User<Utcb>::Ptr((Utcb*)utcb->values[5]);
419
420       if (EXPECT_FALSE(!bind(task, utcb_addr)))
421         return commit_result(-L4_err::EInval); // unbind first !!
422     }
423
424   Mword del_state = 0;
425   Mword add_state = 0;
426
427   long res = control(_new_pager, _new_exc_handler);
428
429   if (res < 0)
430     return commit_result(res);
431
432   if ((res = sys_control_arch(utcb)) < 0)
433     return commit_result(res);
434
435   // FIXME: must be done xcpu safe, may be some parts above too
436   if (flags & Ctl_alien_thread)
437     {
438       if (utcb->values[4] & Ctl_alien_thread)
439         {
440           add_state |= Thread_alien;
441           del_state |= Thread_dis_alien;
442         }
443       else
444         del_state |= Thread_alien;
445     }
446
447   if (del_state || add_state)
448     drq_state_change(~del_state, add_state);
449
450   utcb->values[1] = _old_pager;
451   utcb->values[2] = _old_exc_handler;
452
453   return commit_result(0, 3);
454 }
455
456
457 PRIVATE inline NOEXPORT
458 L4_msg_tag
459 Thread_object::sys_vcpu_control(unsigned char, L4_msg_tag const &tag,
460                                 Utcb *utcb)
461 {
462   if (!space())
463     return commit_result(-L4_err::EInval);
464
465   User<Vcpu_state>::Ptr vcpu(0);
466
467   if (tag.words() >= 2)
468     vcpu = User<Vcpu_state>::Ptr((Vcpu_state*)utcb->values[1]);
469
470   Mword del_state = 0;
471   Mword add_state = 0;
472
473   if (vcpu)
474     {
475       Mword size = sizeof(Vcpu_state);
476       if (utcb->values[0] & 0x10000)
477         {
478           size = Config::PAGE_SIZE;
479           add_state |= Thread_ext_vcpu_enabled;
480         }
481
482       Space::Ku_mem const *vcpu_m
483         = space()->find_ku_mem(vcpu, size);
484
485       if (!vcpu_m)
486         return commit_result(-L4_err::EInval);
487
488       add_state |= Thread_vcpu_enabled;
489       _vcpu_state.set(vcpu, vcpu_m->kern_addr(vcpu));
490     }
491   else
492     return commit_result(-L4_err::EInval);
493
494   /* hm, we do not allow to disable vCPU mode, it's one way enable
495   else
496     {
497       del_state |= Thread_vcpu_enabled | Thread_vcpu_user_mode
498                    | Thread_vcpu_fpu_disabled | Thread_ext_vcpu_enabled;
499     }
500   */
501
502   drq_state_change(~del_state, add_state);
503
504   return commit_result(0);
505 }
506
507
508 // -------------------------------------------------------------------
509 // Thread::ex_regs class system calls
510
511 PUBLIC
512 bool
513 Thread_object::ex_regs(Address ip, Address sp,
514                 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
515                 Mword ops = 0)
516 {
517   if (state(false) == Thread_invalid || !space())
518     return false;
519
520   if (current() == this)
521     spill_user_state();
522
523   if (o_sp) *o_sp = user_sp();
524   if (o_ip) *o_ip = user_ip();
525   if (o_flags) *o_flags = user_flags();
526
527   // Changing the run state is only possible when the thread is not in
528   // an exception.
529   if (!(ops & Exr_cancel) && (state() & Thread_in_exception))
530     // XXX Maybe we should return false here.  Previously, we actually
531     // did so, but we also actually didn't do any state modification.
532     // If you change this value, make sure the logic in
533     // sys_thread_ex_regs still works (in particular,
534     // ex_regs_cap_handler and friends should still be called).
535     return true;
536
537   if (state() & Thread_dead)    // resurrect thread
538     state_change_dirty(~Thread_dead, Thread_ready);
539
540   else if (ops & Exr_cancel)
541     // cancel ongoing IPC or other activity
542     state_add_dirty(Thread_cancel | Thread_ready);
543
544   if (ops & Exr_trigger_exception)
545     {
546       extern char leave_by_trigger_exception[];
547       do_trigger_exception(regs(), leave_by_trigger_exception);
548     }
549
550   if (ip != ~0UL)
551     user_ip(ip);
552
553   if (sp != ~0UL)
554     user_sp (sp);
555
556   if (current() == this)
557     fill_user_state();
558
559   return true;
560 }
561
562 PUBLIC inline
563 L4_msg_tag
564 Thread_object::ex_regs(Utcb *utcb)
565 {
566   Address ip = utcb->values[1];
567   Address sp = utcb->values[2];
568   Mword flags;
569   Mword ops = utcb->values[0];
570
571   LOG_TRACE("Ex-regs", "exr", current(), __fmt_thread_exregs,
572       Log_thread_exregs *l = tbe->payload<Log_thread_exregs>();
573       l->id = dbg_id();
574       l->ip = ip; l->sp = sp; l->op = ops;);
575
576   if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
577     return commit_result(-L4_err::EInval);
578
579   utcb->values[0] = flags;
580   utcb->values[1] = ip;
581   utcb->values[2] = sp;
582
583   return commit_result(0, 3);
584 }
585
586 PRIVATE static
587 unsigned
588 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
589 {
590   Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
591   params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->utcb().access());
592   return params->result.proto() == 0 ? Drq::Need_resched : 0;
593 }
594
595 PRIVATE inline NOEXPORT
596 L4_msg_tag
597 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb *utcb)
598 {
599   if (tag.words() != 3)
600     return commit_result(-L4_err::EInval);
601
602   if (current() == this)
603     return ex_regs(utcb);
604
605   Remote_syscall params;
606   params.thread = current_thread();
607
608   drq(handle_remote_ex_regs, &params, 0, Drq::Any_ctxt);
609   return params.result;
610 }
611
612 PRIVATE inline NOEXPORT NEEDS["timer.h"]
613 L4_msg_tag
614 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
615 {
616   Context *curr = current();
617
618   if (curr == this)
619     return commit_result(0);
620
621   if (current_cpu() != cpu())
622     return commit_result(0);
623
624 #ifdef FIXME
625   Sched_context * const cs = current_sched();
626 #endif
627
628   if (curr != this
629       && ((state() & (Thread_ready | Thread_suspended)) == Thread_ready))
630     {
631       curr->switch_exec_schedule_locked (this, Not_Helping);
632       reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
633       return commit_result(0, Utcb::Time_val::Words);
634     }
635
636 #if 0 // FIXME: provide API for multiple sched contexts
637       // Compute remaining quantum length of timeslice
638       regs->left(timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
639
640       // Yield current global timeslice
641       cs->owner()->switch_sched(cs->id() ? cs->next() : cs);
642 #endif
643   reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
644     = timeslice_timeout.cpu(current_cpu())->get_timeout(Timer::system_clock());
645   curr->schedule();
646
647   return commit_result(0, Utcb::Time_val::Words);
648 }
649
650
651
652 // -------------------------------------------------------------------
653 // Gather statistics information about thread execution
654
655 PRIVATE
656 unsigned
657 Thread_object::sys_thread_stats_remote(void *data)
658 {
659   update_consumed_time();
660   *(Clock::Time *)data = consumed_time();
661   return 0;
662 }
663
664 PRIVATE static
665 unsigned
666 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
667 {
668   return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
669 }
670
671 PRIVATE inline NOEXPORT
672 L4_msg_tag
673 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
674 {
675   Clock::Time value;
676
677   if (cpu() != current_cpu())
678     drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
679   else
680     {
681       // Respect the fact that the consumed time is only updated on context switch
682       if (this == current())
683         update_consumed_time();
684       value = consumed_time();
685     }
686
687   reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
688
689   return commit_result(0, Utcb::Time_val::Words);
690 }
691
692