]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread_object.cpp
update
[l4.git] / kernel / fiasco / src / kern / thread_object.cpp
1 INTERFACE:
2
3 #include "kobject.h"
4 #include "thread.h"
5
6 class Thread_object : public Thread
7 {
8 private:
9   struct Remote_syscall
10   {
11     Thread *thread;
12     L4_msg_tag result;
13   };
14 };
15
16 class Obj_cap : public L4_obj_ref
17 {
18 };
19
20
21 // ---------------------------------------------------------------------------
22 IMPLEMENTATION:
23
24 #include "context.h"
25 #include "fpu.h"
26 #include "map_util.h"
27 #include "processor.h"
28 #include "task.h"
29 #include "thread_state.h"
30 #include "timer.h"
31
32
33
34 PUBLIC inline
35 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
36
37 PUBLIC inline NEEDS["kobject.h"]
38 Kobject_iface *
39 Obj_cap::deref(unsigned char *rights = 0, bool dbg = false)
40 {
41   Thread *current = current_thread();
42   if (op() & L4_obj_ref::Ipc_reply)
43     {
44       if (rights) *rights = current->caller_rights();
45       Thread *ca = static_cast<Thread*>(current->caller());
46       if (!dbg)
47         current->set_caller(0,0);
48       return ca;
49     }
50
51   if (EXPECT_FALSE(special()))
52     {
53       if (!self())
54         return 0;
55
56       if (rights) *rights = L4_fpage::RWX;
57       return current_thread();
58     }
59
60   return current->space()->obj_space()->lookup_local(cap(), rights);
61 }
62
63 PUBLIC inline NEEDS["kobject.h"]
64 bool
65 Obj_cap::revalidate(Kobject_iface *o)
66 {
67   return deref() == o;
68 }
69
70 PUBLIC
71 Thread_object::Thread_object() : Thread() {}
72
73 PUBLIC
74 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
75
76 PUBLIC virtual
77 bool
78 Thread_object::put()
79 { return dec_ref() == 0; }
80
81
82
83 /** Deallocator.  This function currently does nothing: We do not free up
84     space allocated to thread-control blocks.
85  */
86 PUBLIC
87 void
88 Thread_object::operator delete(void *_t)
89 {
90   Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
91   Ram_quota * const q = t->_quota;
92   Mapped_allocator::allocator()->q_unaligned_free(q, Config::thread_block_size, t);
93
94   LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
95       Log_destroy *l = tbe->payload<Log_destroy>();
96       l->id = t->dbg_id();
97       l->obj = t;
98       l->type = "Thread";
99       l->ram = q->current());
100 }
101
102
103 PUBLIC
104 void
105 Thread_object::destroy(Kobject ***rl)
106 {
107   Kobject::destroy(rl);
108   check_kdb(kill());
109   assert_kdb(_magic == magic);
110
111 }
112
113 PUBLIC
114 void
115 Thread_object::invoke(L4_obj_ref /*self*/, Mword rights, Syscall_frame *f, Utcb *utcb)
116 {
117   register L4_obj_ref::Operation op = f->ref().op();
118   if (((op != 0) && !(op & L4_obj_ref::Ipc_send))
119       || (op & L4_obj_ref::Ipc_reply)
120       || f->tag().proto() != L4_msg_tag::Label_thread)
121     {
122       /* we do IPC */
123       Thread *ct = current_thread();
124       Thread *sender = 0;
125       Thread *partner = 0;
126       bool have_rcv = false;
127
128       if (EXPECT_FALSE(!check_sys_ipc(op, &partner, &sender, &have_rcv)))
129         {
130           utcb->error = L4_error::Not_existent;
131           return;
132         }
133
134       ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
135                  f->timeout(), f, rights);
136       return;
137     }
138
139   switch (utcb->values[0] & Opcode_mask)
140     {
141     case Op_control:
142       f->tag(sys_control(rights, f->tag(), utcb));
143       return;
144     case Op_ex_regs:
145       f->tag(sys_ex_regs(f->tag(), utcb));
146       return;
147     case Op_switch:
148       f->tag(sys_thread_switch(f->tag(), utcb));
149       return;
150     case Op_stats:
151       f->tag(sys_thread_stats(f->tag(), utcb));
152       return;
153     case Op_vcpu_resume:
154       f->tag(sys_vcpu_resume(f->tag(), utcb));
155       return;
156     case Op_register_del_irq:
157       f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
158       return;
159     case Op_modify_senders:
160       f->tag(sys_modify_senders(f->tag(), utcb, utcb));
161       return;
162     case Op_vcpu_control:
163       f->tag(sys_vcpu_control(rights, f->tag(), utcb));
164       return;
165     default:
166       L4_msg_tag tag = f->tag();
167       if (invoke_arch(tag, utcb))
168         f->tag(tag);
169       else
170         f->tag(commit_result(-L4_err::ENosys));
171       return;
172     }
173 }
174
175
176 PRIVATE inline
177 L4_msg_tag
178 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
179 {
180   if (this != current() || !(state() & Thread_vcpu_enabled))
181     return commit_result(-L4_err::EInval);
182
183   Obj_space *s = space()->obj_space();
184   Vcpu_state *vcpu = vcpu_state().access(true);
185
186   L4_obj_ref user_task = vcpu->user_task;
187   if (user_task.valid())
188     {
189       unsigned char task_rights = 0;
190       Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
191                                                          &task_rights));
192
193       if (EXPECT_FALSE(task && !(task_rights & L4_fpage::W)))
194         return commit_result(-L4_err::EPerm);
195
196       if (task != vcpu_user_space())
197         vcpu_set_user_space(task);
198
199       vcpu->user_task = L4_obj_ref();
200     }
201   else if (user_task.op() == L4_obj_ref::Ipc_reply)
202     vcpu_set_user_space(0);
203
204   L4_snd_item_iter snd_items(utcb, tag.words());
205   int items = tag.items();
206   for (; items && snd_items.more(); --items)
207     {
208       if (EXPECT_FALSE(!snd_items.next()))
209         break;
210
211       // XXX: need to take existance lock for map
212       cpu_lock.clear();
213
214       L4_snd_item_iter::Item const *const item = snd_items.get();
215       L4_fpage sfp(item->d);
216
217       Reap_list rl;
218       L4_error err = fpage_map(space(), sfp,
219                                vcpu_user_space(), L4_fpage::all_spaces(),
220                                item->b, &rl);
221       rl.del();
222
223       cpu_lock.lock();
224
225       if (EXPECT_FALSE(!err.ok()))
226         return commit_error(utcb, err);
227     }
228
229   if ((vcpu->_saved_state & Vcpu_state::F_irqs)
230       && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
231     {
232       assert_kdb(cpu_lock.test());
233       do_ipc(L4_msg_tag(), 0, 0, true, 0,
234              L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
235              &vcpu->_ipc_regs, 3);
236
237       vcpu = vcpu_state().access(true);
238
239       if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
240                       || this->utcb().access(true)->error.error() == L4_error::R_timeout))
241         {
242           vcpu->_ts.set_ipc_upcall();
243
244           Address sp;
245
246           // tried to resume to user mode, so an IRQ enters from user mode
247           if (vcpu->_saved_state & Vcpu_state::F_user_mode)
248             sp = vcpu->_entry_sp;
249           else
250             sp = vcpu->_ts.sp();
251
252           LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
253               Vcpu_log *l = tbe->payload<Vcpu_log>();
254               l->type = 4;
255               l->state = vcpu->state;
256               l->ip = vcpu->_entry_ip;
257               l->sp = sp;
258               l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
259               );
260
261           fast_return_to_user(vcpu->_entry_ip, sp, vcpu_state().usr().get());
262         }
263     }
264
265   vcpu->state = vcpu->_saved_state;
266   Task *target_space = nonull_static_cast<Task*>(space());
267   bool user_mode = false;
268
269   if (vcpu->state & Vcpu_state::F_user_mode)
270     {
271       if (!vcpu_user_space())
272         return commit_result(-L4_err::EInval);
273
274       user_mode = true;
275
276       if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
277         {
278           state_add_dirty(Thread_vcpu_fpu_disabled);
279           Fpu::disable();
280         }
281       else
282         state_del_dirty(Thread_vcpu_fpu_disabled);
283
284       target_space = static_cast<Task*>(vcpu_user_space());
285     }
286
287   LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
288       Vcpu_log *l = tbe->payload<Vcpu_log>();
289       l->type = 0;
290       l->state = vcpu->state;
291       l->ip = vcpu->_ts.ip();
292       l->sp = vcpu->_ts.sp();
293       l->space = target_space->dbg_id();
294       );
295
296   return commit_result(target_space->resume_vcpu(this, vcpu, user_mode));
297 }
298
299 PRIVATE inline NOEXPORT NEEDS["processor.h"]
300 L4_msg_tag
301 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
302 {
303   if (sender_list()->cursor())
304     return Kobject_iface::commit_result(-L4_err::EBusy);
305
306   if (0)
307     printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
308            in->values[1], in->values[2],
309            in->values[3], in->values[4]);
310
311
312   int elems = tag.words();
313
314   if (elems < 5)
315     return Kobject_iface::commit_result(0);
316
317   --elems;
318
319   elems = elems / 4;
320
321   ::Prio_list_elem *c = sender_list()->head();
322   while (c)
323     {
324       // this is kind of arbitrary
325       for (int cnt = 50; c && cnt > 0; --cnt)
326         {
327           Sender *s = Sender::cast(c);
328           s->modify_label(&in->values[1], elems);
329           c = c->next();
330         }
331
332       if (!c)
333         return Kobject_iface::commit_result(0);
334
335       sender_list()->cursor(c);
336       Proc::preemption_point();
337       c = sender_list()->cursor();
338     }
339   return Kobject_iface::commit_result(0);
340 }
341
342 PRIVATE inline NOEXPORT
343 L4_msg_tag
344 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
345 {
346   L4_snd_item_iter snd_items(in, tag.words());
347
348   if (!tag.items() || !snd_items.next())
349     return Kobject_iface::commit_result(-L4_err::EInval);
350
351   L4_fpage bind_irq(snd_items.get()->d);
352   if (EXPECT_FALSE(!bind_irq.is_objpage()))
353     return Kobject_iface::commit_error(in, L4_error::Overflow);
354
355   register Context *const c_thread = ::current();
356   register Space *const c_space = c_thread->space();
357   register Obj_space *const o_space = c_space->obj_space();
358   unsigned char irq_rights = 0;
359   Irq_base *irq
360     = Irq_base::dcast(o_space->lookup_local(bind_irq.obj_index(), &irq_rights));
361
362   if (!irq)
363     return Kobject_iface::commit_result(-L4_err::EInval);
364
365   if (EXPECT_FALSE(!(irq_rights & L4_fpage::X)))
366     return Kobject_iface::commit_result(-L4_err::EPerm);
367
368   register_delete_irq(irq);
369   return Kobject_iface::commit_result(0);
370 }
371
372
373 PRIVATE inline NOEXPORT
374 L4_msg_tag
375 Thread_object::sys_control(unsigned char rights, L4_msg_tag const &tag, Utcb *utcb)
376 {
377   if (EXPECT_FALSE(!(rights & L4_fpage::W)))
378     return commit_result(-L4_err::EPerm);
379
380   if (EXPECT_FALSE(tag.words() < 6))
381     return commit_result(-L4_err::EInval);
382
383   Context *curr = current();
384   Obj_space *s = curr->space()->obj_space();
385   L4_snd_item_iter snd_items(utcb, tag.words());
386   Task *task = 0;
387   User<Utcb>::Ptr utcb_addr(0);
388
389   Mword flags = utcb->values[0];
390
391   Mword _old_pager = _pager.raw() << L4_obj_ref::Cap_shift;
392   Mword _old_exc_handler = _exc_handler.raw() << L4_obj_ref::Cap_shift;
393
394   Thread_ptr _new_pager(~0UL);
395   Thread_ptr _new_exc_handler(~0UL);
396
397   if (flags & Ctl_set_pager)
398     _new_pager = Thread_ptr(utcb->values[1] >> L4_obj_ref::Cap_shift);
399
400   if (flags & Ctl_set_exc_handler)
401     _new_exc_handler = Thread_ptr(utcb->values[2] >> L4_obj_ref::Cap_shift);
402
403   if (flags & Ctl_bind_task)
404     {
405       if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
406         return commit_result(-L4_err::EInval);
407
408       L4_fpage bind_task(snd_items.get()->d);
409
410       if (EXPECT_FALSE(!bind_task.is_objpage()))
411         return commit_result(-L4_err::EInval);
412
413       unsigned char task_rights = 0;
414       task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
415
416       if (EXPECT_FALSE(!(task_rights & L4_fpage::W)))
417         return commit_result(-L4_err::EPerm);
418
419       if (!task)
420         return commit_result(-L4_err::EInval);
421
422       utcb_addr = User<Utcb>::Ptr((Utcb*)utcb->values[5]);
423
424       if (EXPECT_FALSE(!bind(task, utcb_addr)))
425         return commit_result(-L4_err::EInval); // unbind first !!
426     }
427
428   Mword del_state = 0;
429   Mword add_state = 0;
430
431   long res = control(_new_pager, _new_exc_handler);
432
433   if (res < 0)
434     return commit_result(res);
435
436   if ((res = sys_control_arch(utcb)) < 0)
437     return commit_result(res);
438
439   // FIXME: must be done xcpu safe, may be some parts above too
440   if (flags & Ctl_alien_thread)
441     {
442       if (utcb->values[4] & Ctl_alien_thread)
443         {
444           add_state |= Thread_alien;
445           del_state |= Thread_dis_alien;
446         }
447       else
448         del_state |= Thread_alien;
449     }
450
451   if (del_state || add_state)
452     drq_state_change(~del_state, add_state);
453
454   utcb->values[1] = _old_pager;
455   utcb->values[2] = _old_exc_handler;
456
457   return commit_result(0, 3);
458 }
459
460
461 PRIVATE inline NOEXPORT
462 L4_msg_tag
463 Thread_object::sys_vcpu_control(unsigned char, L4_msg_tag const &tag,
464                                 Utcb *utcb)
465 {
466   if (!space())
467     return commit_result(-L4_err::EInval);
468
469   User<Vcpu_state>::Ptr vcpu(0);
470
471   if (tag.words() >= 2)
472     vcpu = User<Vcpu_state>::Ptr((Vcpu_state*)utcb->values[1]);
473
474   Mword del_state = 0;
475   Mword add_state = 0;
476
477   if (vcpu)
478     {
479       Mword size = sizeof(Vcpu_state);
480       if (utcb->values[0] & 0x10000)
481         {
482           size = Config::PAGE_SIZE;
483           add_state |= Thread_ext_vcpu_enabled;
484         }
485
486       Space::Ku_mem const *vcpu_m
487         = space()->find_ku_mem(vcpu, size);
488
489       if (!vcpu_m)
490         return commit_result(-L4_err::EInval);
491
492       add_state |= Thread_vcpu_enabled;
493       _vcpu_state.set(vcpu, vcpu_m->kern_addr(vcpu));
494     }
495   else
496     return commit_result(-L4_err::EInval);
497
498   /* hm, we do not allow to disable vCPU mode, it's one way enable
499   else
500     {
501       del_state |= Thread_vcpu_enabled | Thread_vcpu_user_mode
502                    | Thread_vcpu_fpu_disabled | Thread_ext_vcpu_enabled;
503     }
504   */
505
506   drq_state_change(~del_state, add_state);
507
508   return commit_result(0);
509 }
510
511
512 // -------------------------------------------------------------------
513 // Thread::ex_regs class system calls
514
515 PUBLIC
516 bool
517 Thread_object::ex_regs(Address ip, Address sp,
518                 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
519                 Mword ops = 0)
520 {
521   if (state(false) == Thread_invalid || !space())
522     return false;
523
524   if (current() == this)
525     spill_user_state();
526
527   if (o_sp) *o_sp = user_sp();
528   if (o_ip) *o_ip = user_ip();
529   if (o_flags) *o_flags = user_flags();
530
531   // Changing the run state is only possible when the thread is not in
532   // an exception.
533   if (!(ops & Exr_cancel) && (state() & Thread_in_exception))
534     // XXX Maybe we should return false here.  Previously, we actually
535     // did so, but we also actually didn't do any state modification.
536     // If you change this value, make sure the logic in
537     // sys_thread_ex_regs still works (in particular,
538     // ex_regs_cap_handler and friends should still be called).
539     return true;
540
541   if (state() & Thread_dead)    // resurrect thread
542     state_change_dirty(~Thread_dead, Thread_ready);
543
544   else if (ops & Exr_cancel)
545     // cancel ongoing IPC or other activity
546     state_add_dirty(Thread_cancel | Thread_ready);
547
548   if (ops & Exr_trigger_exception)
549     {
550       extern char leave_by_trigger_exception[];
551       do_trigger_exception(regs(), leave_by_trigger_exception);
552     }
553
554   if (ip != ~0UL)
555     user_ip(ip);
556
557   if (sp != ~0UL)
558     user_sp (sp);
559
560   if (current() == this)
561     fill_user_state();
562
563   return true;
564 }
565
566 PUBLIC inline
567 L4_msg_tag
568 Thread_object::ex_regs(Utcb *utcb)
569 {
570   Address ip = utcb->values[1];
571   Address sp = utcb->values[2];
572   Mword flags;
573   Mword ops = utcb->values[0];
574
575   LOG_TRACE("Ex-regs", "exr", current(), __fmt_thread_exregs,
576       Log_thread_exregs *l = tbe->payload<Log_thread_exregs>();
577       l->id = dbg_id();
578       l->ip = ip; l->sp = sp; l->op = ops;);
579
580   if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
581     return commit_result(-L4_err::EInval);
582
583   utcb->values[0] = flags;
584   utcb->values[1] = ip;
585   utcb->values[2] = sp;
586
587   return commit_result(0, 3);
588 }
589
590 PRIVATE static
591 unsigned
592 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
593 {
594   Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
595   params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->utcb().access());
596   return params->result.proto() == 0 ? Drq::Need_resched : 0;
597 }
598
599 PRIVATE inline NOEXPORT
600 L4_msg_tag
601 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb *utcb)
602 {
603   if (tag.words() != 3)
604     return commit_result(-L4_err::EInval);
605
606   if (current() == this)
607     return ex_regs(utcb);
608
609   Remote_syscall params;
610   params.thread = current_thread();
611
612   drq(handle_remote_ex_regs, &params, 0, Drq::Any_ctxt);
613   return params.result;
614 }
615
616 PRIVATE inline NOEXPORT NEEDS["timer.h"]
617 L4_msg_tag
618 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
619 {
620   Context *curr = current();
621
622   if (curr == this)
623     return commit_result(0);
624
625   if (current_cpu() != cpu())
626     return commit_result(0);
627
628 #ifdef FIXME
629   Sched_context * const cs = current_sched();
630 #endif
631
632   if (curr != this
633       && ((state() & (Thread_ready | Thread_suspended)) == Thread_ready))
634     {
635       curr->switch_exec_schedule_locked (this, Not_Helping);
636       reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
637       return commit_result(0, Utcb::Time_val::Words);
638     }
639
640 #if 0 // FIXME: provide API for multiple sched contexts
641       // Compute remaining quantum length of timeslice
642       regs->left(timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
643
644       // Yield current global timeslice
645       cs->owner()->switch_sched(cs->id() ? cs->next() : cs);
646 #endif
647   reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
648     = timeslice_timeout.cpu(current_cpu())->get_timeout(Timer::system_clock());
649   curr->schedule();
650
651   return commit_result(0, Utcb::Time_val::Words);
652 }
653
654
655
656 // -------------------------------------------------------------------
657 // Gather statistics information about thread execution
658
659 PRIVATE
660 unsigned
661 Thread_object::sys_thread_stats_remote(void *data)
662 {
663   update_consumed_time();
664   *(Clock::Time *)data = consumed_time();
665   return 0;
666 }
667
668 PRIVATE static
669 unsigned
670 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
671 {
672   return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
673 }
674
675 PRIVATE inline NOEXPORT
676 L4_msg_tag
677 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
678 {
679   Clock::Time value;
680
681   if (cpu() != current_cpu())
682     drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
683   else
684     {
685       // Respect the fact that the consumed time is only updated on context switch
686       if (this == current())
687         update_consumed_time();
688       value = consumed_time();
689     }
690
691   reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
692
693   return commit_result(0, Utcb::Time_val::Words);
694 }
695
696