10 class slab_cache_anon;
13 * \brief A task is a protection domain.
15 * A is derived from Space, which aggregates a set of address spaces.
16 * Additionally to a space, a task provides initialization and
17 * destruction functionality for a protection domain.
18 * Task is also derived from Rcu_item to provide RCU shutdown of tasks.
24 FIASCO_DECLARE_KOBJ();
26 friend class Jdb_space;
29 /// \brief Do host (platform) specific initialization.
32 /// \brief Map the trace buffer to the user address space.
47 /// map the global utcb pointer page into this task
48 void map_utcb_ptr_page();
52 //---------------------------------------------------------------------------
58 #include "entry_frame.h"
62 #include "kmem_slab_simple.h"
64 #include "l4_buf_iter.h"
67 #include "mem_layout.h"
68 #include "ram_quota.h"
70 #include "vmem_alloc.h"
72 FIASCO_DEFINE_KOBJ(Task);
74 static Kmem_slab_t<Task::Ku_mem> _k_u_mem_list_alloc("Ku_mem");
75 slab_cache_anon *Space::Ku_mem::a = &_k_u_mem_list_alloc;
77 extern "C" void vcpu_resume(Trap_state *, Return_frame *sp)
78 FIASCO_FASTCALL FIASCO_NORETURN;
82 Task::resume_vcpu(Context *ctxt, Vcpu_state *vcpu, bool user_mode)
85 memcpy(&ts, &vcpu->_ts, sizeof(Trap_state));
87 assert_kdb(cpu_lock.test());
89 ts.sanitize_user_state();
91 // FIXME: UX is currently broken
92 /* UX:ctxt->vcpu_resume_user_arch(); */
94 vcpu->state |= Vcpu_state::F_traps | Vcpu_state::F_exceptions
95 | Vcpu_state::F_debug_exc;
97 ctxt->space_ref()->user_mode(user_mode);
98 switchin_context(ctxt->space());
99 vcpu_resume(&ts, ctxt->regs());
105 { return dec_ref() == 0; }
109 Task::alloc_ku_mem_chunk(User<void>::Ptr u_addr, unsigned size, void **k_addr)
111 assert_kdb ((size & (size - 1)) == 0);
113 Mapped_allocator *const alloc = Mapped_allocator::allocator();
114 void *p = alloc->q_unaligned_alloc(ram_quota(), size);
116 if (EXPECT_FALSE(!p))
117 return -L4_err::ENomem;
122 unsigned long page_size = Config::PAGE_SIZE;
124 // the following works because the size is a power of two
125 // and once we have size larger than a super page we have
126 // always multiples of superpages
127 if (size >= Config::SUPERPAGE_SIZE)
128 page_size = Config::SUPERPAGE_SIZE;
130 for (unsigned long i = 0; i < size; i += page_size)
132 Address kern_va = (Address)p + i;
133 Address user_va = (Address)u_addr.get() + i;
134 Address pa = mem_space()->pmem_to_phys(kern_va);
136 // must be valid physical address
139 Mem_space::Status res =
140 mem_space()->v_insert(Mem_space::Phys_addr(pa),
141 Mem_space::Addr(user_va), Mem_space::Size(page_size),
142 Mem_space::Page_writable | Mem_space::Page_user_accessible
143 | Mem_space::Page_cacheable);
147 case Mem_space::Insert_ok: break;
148 case Mem_space::Insert_err_nomem:
149 free_ku_mem_chunk(p, u_addr, size);
150 return -L4_err::ENomem;
152 case Mem_space::Insert_err_exists:
153 free_ku_mem_chunk(p, u_addr, size);
154 return -L4_err::EExists;
157 printf("UTCB mapping failed: va=%p, ph=%p, res=%d\n",
158 (void*)user_va, (void*)kern_va, res);
159 kdb_ke("BUG in utcb allocation");
160 free_ku_mem_chunk(p, u_addr, size);
172 Task::alloc_ku_mem(L4_fpage ku_area)
174 if (ku_area.order() < Config::PAGE_SHIFT || ku_area.order() > 20)
175 return -L4_err::EInval;
177 Mword sz = 1UL << ku_area.order();
179 Ku_mem *m = new (ram_quota()) Ku_mem();
182 return -L4_err::ENomem;
184 User<void>::Ptr u_addr((void*)Virt_addr(ku_area.mem_address()).value());
187 if (int e = alloc_ku_mem_chunk(u_addr, sz, &p))
189 m->free(ram_quota());
197 // safely add the new Ku_mem object to the list
202 while (!mp_cas(&_ku_mem, m->next, m));
207 PRIVATE inline NOEXPORT
209 Task::free_ku_mem(Ku_mem *m)
211 free_ku_mem_chunk(m->k_addr, m->u_addr, m->size);
212 m->free(ram_quota());
217 Task::free_ku_mem_chunk(void *k_addr, User<void>::Ptr u_addr, unsigned size)
220 Mapped_allocator * const alloc = Mapped_allocator::allocator();
221 unsigned long page_size = Config::PAGE_SIZE;
223 // the following works because the size is a poer of two
224 // and once we have size larger than a super page we have
225 // always multiples of superpages
226 if (size >= Config::SUPERPAGE_SIZE)
227 page_size = Config::SUPERPAGE_SIZE;
229 for (unsigned long i = 0; i < size; i += page_size)
231 Address user_va = (Address)u_addr.get() + i;
232 mem_space()->v_delete(Mem_space::Addr(user_va),
233 Mem_space::Size(page_size));
236 alloc->q_unaligned_free(ram_quota(), size, k_addr);
256 /** Allocate space for the UTCBs of all threads in this task.
257 * @ return true on success, false if not enough memory for the UTCBs
263 // For UX, map the UTCB pointer page. For ia32, do nothing
270 * \brief Create a normal Task.
271 * \pre \a parent must be valid and exist.
274 template< typename SPACE_FACTORY >
275 Task::Task(SPACE_FACTORY const &sf, Ram_quota *q)
280 // increment reference counter from zero
283 if (mem_space()->is_sigma0())
287 PROTECTED template<typename SPACE_FACTORY>
288 Task::Task(SPACE_FACTORY const &sf, Ram_quota *q, Mem_space::Dir_type* pdir)
291 // increment reference counter from zero
295 // The allocator for tasks
296 static Kmem_slab_t<Task> _task_allocator("Task");
301 { return &_task_allocator; }
304 PROTECTED inline NEEDS["kmem_slab_simple.h"]
306 Task::operator new (size_t size, void *p)
309 assert (size == sizeof (Task));
314 PUBLIC //inline NEEDS["kmem_slab_simple.h"]
316 Task::operator delete (void *ptr)
318 Task *t = reinterpret_cast<Task*>(ptr);
319 LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
320 Log_destroy *l = tbe->payload<Log_destroy>();
324 l->ram = t->ram_quota()->current());
326 allocator()->q_free(t->ram_quota(), ptr);
330 PUBLIC template< typename SPACE_FACTORY >
333 Task::create(SPACE_FACTORY const &sf, Ram_quota *quota,
334 L4_fpage const &utcb_area)
336 void *t = allocator()->q_alloc(quota);
340 auto_ptr<Task> a(new (t) Task(sf, quota));
344 if (utcb_area.is_valid())
346 int e = a->alloc_ku_mem(utcb_area);
357 { return mem_space()->valid(); }
361 * \brief Shutdown the task.
364 * -# Unbind and delete all contexts bound to this task.
365 * -# Unmap everything from all spaces.
366 * -# Delete child tasks.
370 Task::destroy(Kobject ***reap_list)
372 Kobject::destroy(reap_list);
374 fpage_unmap(this, L4_fpage::all_spaces(L4_fpage::RWX), L4_map_mask::full(), reap_list);
377 PRIVATE inline NOEXPORT
379 Task::sys_map(unsigned char rights, Syscall_frame *f, Utcb *utcb)
381 LOG_TRACE("Task map", "map", ::current(), __task_unmap_fmt,
382 Log_unmap *lu = tbe->payload<Log_unmap>();
384 lu->mask = utcb->values[1];
385 lu->fpage = utcb->values[2]);
387 if (EXPECT_FALSE(!(rights & L4_fpage::W)))
388 return commit_result(-L4_err::EPerm);
390 L4_msg_tag const tag = f->tag();
392 Obj_space *s = current()->space()->obj_space();
393 L4_snd_item_iter snd_items(utcb, tag.words());
395 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
396 return commit_result(-L4_err::EInval);
398 L4_fpage src_task(snd_items.get()->d);
399 if (EXPECT_FALSE(!src_task.is_objpage()))
400 return commit_result(-L4_err::EInval);
402 Task *from = Kobject::dcast<Task*>(s->lookup_local(src_task.obj_index()));
404 return commit_result(-L4_err::EInval);
410 // enforce lock order to prevent deadlocks.
411 // always take lock from task with the lower memory address first
412 Lock_guard_2<Lock> guard;
414 // FIXME: avoid locking the current task, it is not needed
415 if (!guard.lock(&existence_lock, &from->existence_lock))
416 return commit_result(-L4_err::EInval);
420 ret = fpage_map(from, L4_fpage(utcb->values[2]), this,
421 L4_fpage::all_spaces(), utcb->values[1], &rl);
429 // FIXME: treat reaped stuff
431 return commit_result(0);
433 return commit_error(utcb, ret);
437 PRIVATE inline NOEXPORT
439 Task::sys_unmap(Syscall_frame *f, Utcb *utcb)
442 unsigned words = f->tag().words();
444 LOG_TRACE("Task unmap", "unm", ::current(), __task_unmap_fmt,
445 Log_unmap *lu = tbe->payload<Log_unmap>();
447 lu->mask = utcb->values[1];
448 lu->fpage = utcb->values[2]);
451 Lock_guard<Lock> guard;
453 // FIXME: avoid locking the current task, it is not needed
454 if (!guard.try_lock(&existence_lock))
455 return commit_error(utcb, L4_error::Not_existent);
459 L4_map_mask m(utcb->values[1]);
461 for (unsigned i = 2; i < words; ++i)
463 unsigned const flushed = fpage_unmap(this, L4_fpage(utcb->values[i]), m, rl.list());
464 utcb->values[i] = (utcb->values[i] & ~0xfUL) | flushed;
473 return commit_result(0, words);
476 PRIVATE inline NOEXPORT
478 Task::sys_cap_valid(Syscall_frame *, Utcb *utcb)
480 L4_obj_ref obj(utcb->values[1]);
483 return commit_result(0);
485 Obj_space::Capability cap = obj_space()->lookup(obj.cap());
486 if (EXPECT_TRUE(cap.valid()))
488 if (!(utcb->values[1] & 1))
489 return commit_result(1);
491 return commit_result(cap.obj()->map_root()->cap_ref_cnt());
494 return commit_result(0);
497 PRIVATE inline NOEXPORT
499 Task::sys_caps_equal(Syscall_frame *, Utcb *utcb)
501 L4_obj_ref obj_a(utcb->values[1]);
502 L4_obj_ref obj_b(utcb->values[2]);
505 return commit_result(1);
507 if (obj_a.invalid() || obj_b.invalid())
508 return commit_result(obj_a.invalid() && obj_b.invalid());
510 Obj_space::Capability c_a = obj_space()->lookup(obj_a.cap());
511 Obj_space::Capability c_b = obj_space()->lookup(obj_b.cap());
513 return commit_result(c_a == c_b);
516 PRIVATE inline NOEXPORT
518 Task::sys_add_ku_mem(Syscall_frame *f, Utcb *utcb)
520 unsigned const w = f->tag().words();
521 for (unsigned i = 1; i < w; ++i)
523 L4_fpage ku_fp(utcb->values[i]);
524 if (!ku_fp.is_valid() || !ku_fp.is_mempage())
525 return commit_result(-L4_err::EInval);
527 int e = alloc_ku_mem(ku_fp);
529 return commit_result(e);
532 return commit_result(0);
535 PRIVATE inline NOEXPORT
537 Task::sys_cap_info(Syscall_frame *f, Utcb *utcb)
539 L4_msg_tag const &tag = f->tag();
543 default: return commit_result(-L4_err::EInval);
544 case 2: return sys_cap_valid(f, utcb);
545 case 3: return sys_caps_equal(f, utcb);
552 Task::invoke(L4_obj_ref, Mword rights, Syscall_frame *f, Utcb *utcb)
554 if (EXPECT_FALSE(f->tag().proto() != L4_msg_tag::Label_task))
556 f->tag(commit_result(-L4_err::EBadproto));
560 switch (utcb->values[0])
563 f->tag(sys_map(rights, f, utcb));
566 f->tag(sys_unmap(f, utcb));
569 f->tag(sys_cap_info(f, utcb));
572 f->tag(sys_add_ku_mem(f, utcb));
575 L4_msg_tag tag = f->tag();
576 if (invoke_arch(tag, utcb))
579 f->tag(commit_result(-L4_err::ENosys));
585 //---------------------------------------------------------------------------
586 IMPLEMENTATION [!ux]:
590 Task::map_utcb_ptr_page()
608 // ---------------------------------------------------------------------------
619 } __attribute__((packed));
621 static unsigned unmap_fmt(Tb_entry *, int max, char *buf) asm ("__task_unmap_fmt");
624 // ---------------------------------------------------------------------------
625 IMPLEMENTATION [debug]:
629 Task::unmap_fmt(Tb_entry *e, int max, char *buf)
631 Log_unmap *l = e->payload<Log_unmap>();
632 L4_fpage fp(l->fpage);
633 return snprintf(buf, max, "task=[U:%lx] mask=%lx fpage=[%u/%u]%lx",
634 l->id, l->mask, (unsigned)fp.order(), fp.type(), l->fpage);
637 // ---------------------------------------------------------------------------
638 IMPLEMENTATION[!ia32 || !svm]:
640 PRIVATE inline NOEXPORT
642 Task::sys_vm_run(Syscall_frame *, Utcb *)
644 return commit_result(-L4_err::ENosys);