]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/thread_object.cpp
update
[l4.git] / kernel / fiasco / src / kern / thread_object.cpp
1 INTERFACE:
2
3 #include "kobject.h"
4 #include "thread.h"
5
6 class Thread_object :
7   public Thread,
8   public Kobject
9 {
10   FIASCO_DECLARE_KOBJ();
11 };
12
13 class Obj_cap : public L4_obj_ref
14 {
15 };
16
17
18 // ---------------------------------------------------------------------------
19 IMPLEMENTATION:
20
21 #include "context.h"
22 #include "fpu.h"
23 #include "map_util.h"
24 #include "processor.h"
25 #include "task.h"
26 #include "thread_state.h"
27 #include "timer.h"
28
29 FIASCO_DEFINE_KOBJ(Thread_object);
30
31
32 PUBLIC inline
33 Obj_cap::Obj_cap(L4_obj_ref const &o) : L4_obj_ref(o) {}
34
35 PUBLIC inline NEEDS["kobject.h"]
36 Kobject_iface *
37 Obj_cap::deref(unsigned char *rights = 0, bool dbg = false)
38 {
39   Thread *current = current_thread();
40   if (flags() & L4_obj_ref::Ipc_reply)
41     {
42       if (rights) *rights = current->caller_rights();
43       Thread *ca = static_cast<Thread*>(current->caller());
44       if (!dbg)
45         current->set_caller(0,0);
46       return ca;
47     }
48
49   if (EXPECT_FALSE(invalid()))
50     {
51       if (!self())
52         return 0;
53
54       if (rights) *rights = L4_fpage::RWX;
55       return current_thread();
56     }
57
58   return current->space()->obj_space()->lookup_local(cap(), rights);
59 }
60
61 PUBLIC inline NEEDS["kobject.h"]
62 bool
63 Obj_cap::revalidate(Kobject_iface *o)
64 {
65   return deref() == o;
66 }
67
68 PUBLIC inline
69 Thread_object::Thread_object() : Thread() {}
70
71 PUBLIC inline
72 Thread_object::Thread_object(Context_mode_kernel k) : Thread(k) {}
73
74 PUBLIC virtual
75 bool
76 Thread_object::put()
77 { return dec_ref() == 0; }
78
79
80
81 /** Deallocator.  This function currently does nothing: We do not free up
82     space allocated to thread-control blocks.
83  */
84 PUBLIC
85 void
86 Thread_object::operator delete(void *_t)
87 {
88   Thread_object * const t = nonull_static_cast<Thread_object*>(_t);
89   Ram_quota * const q = t->_quota;
90   Mapped_allocator::allocator()->q_unaligned_free(q, Config::thread_block_size, t);
91
92   LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
93       Log_destroy *l = tbe->payload<Log_destroy>();
94       l->id = t->dbg_id();
95       l->obj = t;
96       l->type = "Thread";
97       l->ram = q->current());
98 }
99
100
101 PUBLIC
102 void
103 Thread_object::destroy(Kobject ***rl)
104 {
105   Kobject::destroy(rl);
106   check_kdb(kill());
107 #if 0
108   assert_kdb(state() == Thread_dead);
109 #endif
110   assert_kdb(_magic == magic);
111
112 }
113
114 PUBLIC
115 void
116 Thread_object::invoke(L4_obj_ref /*self*/, Mword rights, Syscall_frame *f, Utcb *utcb)
117 {
118   register unsigned flags = f->ref().flags();
119   if (((flags != 0) && !(flags & L4_obj_ref::Ipc_send))
120       || (flags & L4_obj_ref::Ipc_reply)
121       || f->tag().proto() != L4_msg_tag::Label_thread)
122     {
123       /* we do IPC */
124       Thread *ct = current_thread();
125       Thread *sender = 0;
126       Thread *partner = 0;
127       bool have_rcv = false;
128
129       if (EXPECT_FALSE(!check_sys_ipc(flags, &partner, &sender, &have_rcv)))
130         {
131           utcb->error = L4_error::Not_existent;
132           return;
133         }
134
135       ct->do_ipc(f->tag(), partner, partner, have_rcv, sender,
136                  f->timeout(), f, rights);
137       return;
138     }
139
140   switch (utcb->values[0] & Opcode_mask)
141     {
142     case Op_control:
143       f->tag(sys_control(rights, f->tag(), utcb));
144       return;
145     case Op_ex_regs:
146       f->tag(sys_ex_regs(f->tag(), utcb));
147       return;
148     case Op_switch:
149       f->tag(sys_thread_switch(f->tag(), utcb));
150       return;
151     case Op_stats:
152       f->tag(sys_thread_stats(f->tag(), utcb));
153       return;
154     case Op_vcpu_resume:
155       f->tag(sys_vcpu_resume(f->tag(), utcb));
156       return;
157     case Op_register_del_irq:
158       f->tag(sys_register_delete_irq(f->tag(), utcb, utcb));
159       return;
160     case Op_modify_senders:
161       f->tag(sys_modify_senders(f->tag(), utcb, utcb));
162       return;
163     default:
164       L4_msg_tag tag = f->tag();
165       if (invoke_arch(tag, utcb))
166         f->tag(tag);
167       else
168         f->tag(commit_result(-L4_err::ENosys));
169       return;
170     }
171 }
172
173 PRIVATE inline
174 L4_msg_tag
175 Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
176 {
177   if (this != current() || !(state() & Thread_vcpu_enabled))
178     return commit_result(-L4_err::EInval);
179
180   Obj_space *s = space()->obj_space();
181   Vcpu_state *vcpu = access_vcpu(true);
182
183   L4_obj_ref user_task = vcpu->user_task;
184   if (user_task.valid())
185     {
186       unsigned char task_rights = 0;
187       Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
188                                                          &task_rights));
189
190       if (EXPECT_FALSE(task && !(task_rights & L4_fpage::W)))
191         return commit_result(-L4_err::EPerm);
192
193       if (task != vcpu_user_space())
194         vcpu_set_user_space(task);
195
196       vcpu->user_task = L4_obj_ref();
197     }
198   else if (user_task.flags() == L4_obj_ref::Ipc_reply)
199     vcpu_set_user_space(0);
200
201   L4_snd_item_iter snd_items(utcb, tag.words());
202   int items = tag.items();
203   for (; items && snd_items.more(); --items)
204     {
205       if (EXPECT_FALSE(!snd_items.next()))
206         break;
207
208       cpu_lock.clear();
209
210       L4_snd_item_iter::Item const *const item = snd_items.get();
211       L4_fpage sfp(item->d);
212
213       Reap_list rl;
214       L4_error err = fpage_map(space(), sfp,
215                                vcpu_user_space(), L4_fpage::all_spaces(),
216                                item->b.raw(), &rl);
217       rl.del();
218
219       cpu_lock.lock();
220
221       if (EXPECT_FALSE(!err.ok()))
222         return commit_error(utcb, err);
223     }
224
225   if (tag.items())
226     vcpu = access_vcpu(true);
227
228   if ((vcpu->_saved_state & Vcpu_state::F_irqs)
229       && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
230     {
231       assert_kdb(cpu_lock.test());
232       do_ipc(L4_msg_tag(), 0, 0, true, 0,
233              L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
234              &vcpu->_ipc_regs, 7);
235
236       vcpu = access_vcpu(true);
237
238       if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()))
239         {
240           vcpu->_ts.set_ipc_upcall();
241
242           Address sp;
243
244           if (vcpu->_saved_state & Vcpu_state::F_user_mode)
245             sp = vcpu->_entry_sp;
246           else
247             sp = vcpu->_ts.sp();
248
249           LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
250               Vcpu_log *l = tbe->payload<Vcpu_log>();
251               l->type = 4;
252               l->state = vcpu->state;
253               l->ip = vcpu->_entry_ip;
254               l->sp = sp;
255               l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
256               );
257
258           fast_return_to_user(vcpu->_entry_ip, sp);
259         }
260     }
261
262   // --- CUT here for VM stuff
263   vcpu->state = vcpu->_saved_state;
264   Trap_state ts;
265   memcpy(&ts, &vcpu->_ts, sizeof(Trap_state));
266
267
268   assert_kdb(cpu_lock.test());
269
270   ts.set_ipc_upcall();
271
272   ts.sanitize_user_state();
273
274   if (vcpu->state & Vcpu_state::F_user_mode)
275     {
276       if (!vcpu_user_space())
277         return commit_result(-L4_err::EInval);
278
279       vcpu->state |= Vcpu_state::F_traps | Vcpu_state::F_exceptions
280                      | Vcpu_state::F_debug_exc;
281       state_add_dirty(Thread_vcpu_user_mode);
282
283       if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
284         {
285           state_add_dirty(Thread_vcpu_fpu_disabled);
286           Fpu::disable();
287         }
288       else
289         state_del_dirty(Thread_vcpu_fpu_disabled);
290
291       vcpu_resume_user_arch();
292
293       vcpu_user_space()->switchin_context(space());
294     }
295
296   LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
297       Vcpu_log *l = tbe->payload<Vcpu_log>();
298       l->type = 0;
299       l->state = vcpu->state;
300       l->ip = ts.ip();
301       l->sp = ts.sp();
302       l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
303       );
304
305   vcpu_resume(&ts, regs());
306   // -- END NON VM
307
308   // DO VM ENTRY
309 }
310
311 PRIVATE inline NOEXPORT NEEDS["processor.h"]
312 L4_msg_tag
313 Thread_object::sys_modify_senders(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
314 {
315   if (sender_list()->cursor())
316     return Kobject_iface::commit_result(-L4_err::EBusy);
317
318   if (0)
319     printf("MODIFY ID (%08lx:%08lx->%08lx:%08lx\n",
320            in->values[1], in->values[2],
321            in->values[3], in->values[4]);
322
323
324   int elems = tag.words();
325
326   if (elems < 5)
327     return Kobject_iface::commit_result(0);
328
329   --elems;
330
331   elems = elems / 4;
332
333   ::Prio_list_elem *c = sender_list()->head();
334   while (c)
335     {
336       // this is kind of arbitrary
337       for (int cnt = 50; c && cnt > 0; --cnt)
338         {
339           Sender *s = Sender::cast(c);
340           s->modify_label(&in->values[1], elems);
341           c = c->next();
342         }
343
344       if (!c)
345         return Kobject_iface::commit_result(0);
346
347       sender_list()->cursor(c);
348       Proc::preemption_point();
349       c = sender_list()->cursor();
350     }
351   return Kobject_iface::commit_result(0);
352 }
353
354 PRIVATE inline NOEXPORT
355 L4_msg_tag
356 Thread_object::sys_register_delete_irq(L4_msg_tag tag, Utcb const *in, Utcb * /*out*/)
357 {
358   L4_snd_item_iter snd_items(in, tag.words());
359
360   if (!tag.items() || !snd_items.next())
361     return Kobject_iface::commit_result(-L4_err::EInval);
362
363   L4_fpage bind_irq(snd_items.get()->d);
364   if (EXPECT_FALSE(!bind_irq.is_objpage()))
365     return Kobject_iface::commit_error(in, L4_error::Overflow);
366
367   register Context *const c_thread = ::current();
368   register Space *const c_space = c_thread->space();
369   register Obj_space *const o_space = c_space->obj_space();
370   unsigned char irq_rights = 0;
371   Irq_base *irq
372     = Irq_base::dcast(o_space->lookup_local(bind_irq.obj_index(), &irq_rights));
373
374   if (!irq)
375     return Kobject_iface::commit_result(-L4_err::EInval);
376
377   if (EXPECT_FALSE(!(irq_rights & L4_fpage::X)))
378     return Kobject_iface::commit_result(-L4_err::EPerm);
379
380   register_delete_irq(irq);
381   return Kobject_iface::commit_result(0);
382 }
383
384
385 PRIVATE inline NOEXPORT
386 L4_msg_tag
387 Thread_object::sys_control(unsigned char rights, L4_msg_tag const &tag, Utcb *utcb)
388 {
389   if (EXPECT_FALSE(!(rights & L4_fpage::W)))
390     return commit_result(-L4_err::EPerm);
391
392   if (EXPECT_FALSE(tag.words() < 6))
393     return commit_result(-L4_err::EInval);
394
395   Context *curr = current();
396   Obj_space *s = curr->space()->obj_space();
397   L4_snd_item_iter snd_items(utcb, tag.words());
398   Task *task = 0;
399   void *utcb_addr = 0;
400
401   Mword flags = utcb->values[0];
402
403   Mword _old_pager = _pager.raw() << L4_obj_ref::Cap_shift;
404   Mword _old_exc_handler = _exc_handler.raw() << L4_obj_ref::Cap_shift;
405
406   Thread_ptr _new_pager(~0UL);
407   Thread_ptr _new_exc_handler(~0UL);
408
409   if (flags & Ctl_set_pager)
410     _new_pager = Thread_ptr(utcb->values[1] >> L4_obj_ref::Cap_shift);
411
412   if (flags & Ctl_set_exc_handler)
413     _new_exc_handler = Thread_ptr(utcb->values[2] >> L4_obj_ref::Cap_shift);
414
415   if (flags & Ctl_bind_task)
416     {
417       if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
418         return commit_result(-L4_err::EInval);
419
420       L4_fpage bind_task(snd_items.get()->d);
421
422       if (EXPECT_FALSE(!bind_task.is_objpage()))
423         return commit_result(-L4_err::EInval);
424
425       unsigned char task_rights = 0;
426       task = Kobject::dcast<Task*>(s->lookup_local(bind_task.obj_index(), &task_rights));
427
428       if (EXPECT_FALSE(!(task_rights & L4_fpage::W)))
429         return commit_result(-L4_err::EPerm);
430
431       if (!task)
432         return commit_result(-L4_err::EInval);
433
434       utcb_addr = (void*)utcb->values[5];
435     }
436
437   long res = control(_new_pager, _new_exc_handler,
438                      task, utcb_addr, flags & Ctl_vcpu_enabled,
439                      utcb->values[4] & Ctl_vcpu_enabled);
440
441   if (res < 0)
442     return commit_result(res);
443
444   if ((res = sys_control_arch(utcb)) < 0)
445     return commit_result(res);
446
447     {
448       // FIXME: must be done xcpu safe, may be some parts above too
449       Lock_guard<Cpu_lock> guard(&cpu_lock);
450       if (flags & Ctl_alien_thread)
451         {
452           if (utcb->values[4] & Ctl_alien_thread)
453             state_change_dirty (~Thread_dis_alien, Thread_alien, false);
454           else
455             state_del_dirty(Thread_alien, false);
456         }
457     }
458
459   utcb->values[1] = _old_pager;
460   utcb->values[2] = _old_exc_handler;
461
462   return commit_result(0, 3);
463 }
464
465 // -------------------------------------------------------------------
466 // Thread::ex_regs class system calls
467
468 PUBLIC
469 bool
470 Thread_object::ex_regs(Address ip, Address sp,
471                 Address *o_ip = 0, Address *o_sp = 0, Mword *o_flags = 0,
472                 Mword ops = 0)
473 {
474   if (state(false) == Thread_invalid || !space())
475     return false;
476
477   if (current() == this)
478     spill_user_state();
479
480   if (o_sp) *o_sp = user_sp();
481   if (o_ip) *o_ip = user_ip();
482   if (o_flags) *o_flags = user_flags();
483
484   // Changing the run state is only possible when the thread is not in
485   // an exception.
486   if (!(ops & Exr_cancel) && (state(false) & Thread_in_exception))
487     // XXX Maybe we should return false here.  Previously, we actually
488     // did so, but we also actually didn't do any state modification.
489     // If you change this value, make sure the logic in
490     // sys_thread_ex_regs still works (in particular,
491     // ex_regs_cap_handler and friends should still be called).
492     return true;
493
494   if (state(false) & Thread_dead)       // resurrect thread
495     state_change_dirty (~Thread_dead, Thread_ready, false);
496
497   else if (ops & Exr_cancel)
498     // cancel ongoing IPC or other activity
499     state_change_dirty (~(Thread_ipc_in_progress | Thread_delayed_deadline |
500                         Thread_delayed_ipc), Thread_cancel | Thread_ready, false);
501
502   if (ops & Exr_trigger_exception)
503     {
504       extern char leave_by_trigger_exception[];
505       do_trigger_exception(regs(), leave_by_trigger_exception);
506     }
507
508   if (ip != ~0UL)
509     user_ip(ip);
510
511   if (sp != ~0UL)
512     user_sp (sp);
513
514   if (current() == this)
515     fill_user_state();
516
517   return true;
518 }
519
520 PUBLIC inline
521 L4_msg_tag
522 Thread_object::ex_regs(Utcb *utcb)
523 {
524   Address ip = utcb->values[1];
525   Address sp = utcb->values[2];
526   Mword flags;
527   Mword ops = utcb->values[0];
528
529   LOG_TRACE("Ex-regs", "exr", current(), __fmt_thread_exregs,
530       Log_thread_exregs *l = tbe->payload<Log_thread_exregs>();
531       l->id = dbg_id();
532       l->ip = ip; l->sp = sp; l->op = ops;);
533
534   if (!ex_regs(ip, sp, &ip, &sp, &flags, ops))
535     return commit_result(-L4_err::EInval);
536
537   utcb->values[0] = flags;
538   utcb->values[1] = ip;
539   utcb->values[2] = sp;
540
541   return commit_result(0, 3);
542 }
543
544 PRIVATE static
545 unsigned
546 Thread_object::handle_remote_ex_regs(Drq *, Context *self, void *p)
547 {
548   Remote_syscall *params = reinterpret_cast<Remote_syscall*>(p);
549   params->result = nonull_static_cast<Thread_object*>(self)->ex_regs(params->thread->access_utcb());
550   return params->result.proto() == 0 ? Drq::Need_resched : 0;
551 }
552
553 PRIVATE inline NOEXPORT
554 L4_msg_tag
555 Thread_object::sys_ex_regs(L4_msg_tag const &tag, Utcb * /*utcb*/)
556 {
557   if (tag.words() != 3)
558     return commit_result(-L4_err::EInval);
559
560   Remote_syscall params;
561   params.thread = current_thread();
562
563   drq(handle_remote_ex_regs, &params, 0, Drq::Any_ctxt);
564   return params.result;
565 }
566
567 PRIVATE inline NOEXPORT NEEDS["timer.h"]
568 L4_msg_tag
569 Thread_object::sys_thread_switch(L4_msg_tag const &/*tag*/, Utcb *utcb)
570 {
571   Context *curr = current();
572
573   if (curr == this)
574     return commit_result(0);
575
576   if (current_cpu() != cpu())
577     return commit_result(0);
578
579 #ifdef FIXME
580   Sched_context * const cs = current_sched();
581 #endif
582
583   if (curr != this
584       && ((state() & (Thread_ready | Thread_suspended)) == Thread_ready))
585     {
586       curr->switch_exec_schedule_locked (this, Not_Helping);
587       reinterpret_cast<Utcb::Time_val*>(utcb->values)->t = 0; // Assume timeslice was used up
588       return commit_result(0, Utcb::Time_val::Words);
589     }
590
591 #if 0 // FIXME: provide API for multiple sched contexts
592       // Compute remaining quantum length of timeslice
593       regs->left (timeslice_timeout.cpu(cpu())->get_timeout(Timer::system_clock()));
594
595       // Yield current global timeslice
596       cs->owner()->switch_sched (cs->id() ? cs->next() : cs);
597 #endif
598   reinterpret_cast<Utcb::Time_val*>(utcb->values)->t
599     = timeslice_timeout.cpu(current_cpu())->get_timeout(Timer::system_clock());
600   curr->schedule();
601
602   return commit_result(0, Utcb::Time_val::Words);
603 }
604
605
606
607 // -------------------------------------------------------------------
608 // Gather statistics information about thread execution
609
610 PRIVATE
611 unsigned
612 Thread_object::sys_thread_stats_remote(void *data)
613 {
614   update_consumed_time();
615   *(Clock::Time *)data = consumed_time();
616   return 0;
617 }
618
619 PRIVATE static
620 unsigned
621 Thread_object::handle_sys_thread_stats_remote(Drq *, Context *self, void *data)
622 {
623   return nonull_static_cast<Thread_object*>(self)->sys_thread_stats_remote(data);
624 }
625
626 PRIVATE inline NOEXPORT
627 L4_msg_tag
628 Thread_object::sys_thread_stats(L4_msg_tag const &/*tag*/, Utcb *utcb)
629 {
630   Clock::Time value;
631
632   if (cpu() != current_cpu())
633     drq(handle_sys_thread_stats_remote, &value, 0, Drq::Any_ctxt);
634   else
635     {
636       // Respect the fact that the consumed time is only updated on context switch
637       if (this == current())
638         update_consumed_time();
639       value = consumed_time();
640     }
641
642   reinterpret_cast<Utcb::Time_val *>(utcb->values)->t = value;
643
644   return commit_result(0, Utcb::Time_val::Words);
645 }
646
647