11 * \brief A task is a protection domain.
13 * A is derived from Space, which aggregates a set of address spaces.
14 * Additionally to a space, a task provides initialization and
15 * destruction functionality for a protection domain.
16 * Task is also derived from Rcu_item to provide RCU shutdown of tasks.
22 FIASCO_DECLARE_KOBJ();
24 friend class Jdb_space;
27 /// \brief Do host (platform) specific initialization.
41 /// map the global utcb pointer page into this task
42 void map_utcb_ptr_page();
46 //---------------------------------------------------------------------------
50 #include "unique_ptr.h"
52 #include "entry_frame.h"
56 #include "kmem_slab.h"
58 #include "l4_buf_iter.h"
61 #include "mem_layout.h"
62 #include "ram_quota.h"
63 #include "thread_state.h"
66 FIASCO_DEFINE_KOBJ(Task);
68 static Kmem_slab_t<Task::Ku_mem> _k_u_mem_list_alloc("Ku_mem");
69 Slab_cache *Space::Ku_mem::a = &_k_u_mem_list_alloc;
71 extern "C" void vcpu_resume(Trap_state *, Return_frame *sp)
72 FIASCO_FASTCALL FIASCO_NORETURN;
76 Task::resume_vcpu(Context *ctxt, Vcpu_state *vcpu, bool user_mode)
79 memcpy(&ts, &vcpu->_ts, sizeof(Trap_state));
81 assert_kdb(cpu_lock.test());
83 ts.sanitize_user_state();
85 // FIXME: UX is currently broken
86 /* UX:ctxt->vcpu_resume_user_arch(); */
89 ctxt->state_add_dirty(Thread_vcpu_user);
90 vcpu->state |= Vcpu_state::F_traps | Vcpu_state::F_exceptions
91 | Vcpu_state::F_debug_exc;
94 ctxt->space_ref()->user_mode(user_mode);
95 switchin_context(ctxt->space());
96 vcpu_resume(&ts, ctxt->regs());
102 { return dec_ref() == 0; }
106 Task::alloc_ku_mem_chunk(User<void>::Ptr u_addr, unsigned size, void **k_addr)
108 assert_kdb ((size & (size - 1)) == 0);
110 Kmem_alloc *const alloc = Kmem_alloc::allocator();
111 void *p = alloc->q_unaligned_alloc(ram_quota(), size);
113 if (EXPECT_FALSE(!p))
114 return -L4_err::ENomem;
119 unsigned long page_size = Config::PAGE_SIZE;
121 // the following works because the size is a power of two
122 // and once we have size larger than a super page we have
123 // always multiples of superpages
124 if (size >= Config::SUPERPAGE_SIZE)
125 page_size = Config::SUPERPAGE_SIZE;
127 for (unsigned long i = 0; i < size; i += page_size)
129 Address kern_va = (Address)p + i;
130 Address user_va = (Address)u_addr.get() + i;
131 Address pa = pmem_to_phys(kern_va);
133 // must be valid physical address
136 Mem_space::Status res =
137 static_cast<Mem_space*>(this)->v_insert(Mem_space::Phys_addr(pa),
138 Mem_space::Addr(user_va), Mem_space::Size(page_size),
139 Mem_space::Page_writable | Mem_space::Page_user_accessible
140 | Mem_space::Page_cacheable);
144 case Mem_space::Insert_ok: break;
145 case Mem_space::Insert_err_nomem:
146 free_ku_mem_chunk(p, u_addr, size, i);
147 return -L4_err::ENomem;
149 case Mem_space::Insert_err_exists:
150 free_ku_mem_chunk(p, u_addr, size, i);
151 return -L4_err::EExists;
154 printf("UTCB mapping failed: va=%p, ph=%p, res=%d\n",
155 (void*)user_va, (void*)kern_va, res);
156 kdb_ke("BUG in utcb allocation");
157 free_ku_mem_chunk(p, u_addr, size, i);
169 Task::alloc_ku_mem(L4_fpage ku_area)
171 if (ku_area.order() < Config::PAGE_SHIFT || ku_area.order() > 20)
172 return -L4_err::EInval;
174 Mword sz = 1UL << ku_area.order();
176 Ku_mem *m = new (ram_quota()) Ku_mem();
179 return -L4_err::ENomem;
181 User<void>::Ptr u_addr((void*)Virt_addr(ku_area.mem_address()).value());
184 if (int e = alloc_ku_mem_chunk(u_addr, sz, &p))
186 m->free(ram_quota());
194 _ku_mem.add(m, mp_cas<cxx::S_list_item*>);
199 PRIVATE inline NOEXPORT
201 Task::free_ku_mem(Ku_mem *m)
203 free_ku_mem_chunk(m->k_addr, m->u_addr, m->size, m->size);
204 m->free(ram_quota());
209 Task::free_ku_mem_chunk(void *k_addr, User<void>::Ptr u_addr, unsigned size,
210 unsigned mapped_size)
213 Kmem_alloc * const alloc = Kmem_alloc::allocator();
214 unsigned long page_size = Config::PAGE_SIZE;
216 // the following works because the size is a poer of two
217 // and once we have size larger than a super page we have
218 // always multiples of superpages
219 if (size >= Config::SUPERPAGE_SIZE)
220 page_size = Config::SUPERPAGE_SIZE;
222 for (unsigned long i = 0; i < mapped_size; i += page_size)
224 Address user_va = (Address)u_addr.get() + i;
225 static_cast<Mem_space*>(this)->v_delete(Mem_space::Addr(user_va),
226 Mem_space::Size(page_size));
229 alloc->q_unaligned_free(ram_quota(), size, k_addr);
236 while (Ku_mem *m = _ku_mem.pop_front())
241 /** Allocate space for the UTCBs of all threads in this task.
242 * @ return true on success, false if not enough memory for the UTCBs
248 if (!Mem_space::initialize())
251 // For UX, map the UTCB pointer page. For ia32, do nothing
258 * \brief Create a normal Task.
259 * \pre \a parent must be valid and exist.
262 Task::Task(Ram_quota *q) : Space(q)
266 // increment reference counter from zero
271 Task::Task(Ram_quota *q, Mem_space::Dir_type* pdir)
274 // increment reference counter from zero
278 // The allocator for tasks
279 static Kmem_slab_t<Task> _task_allocator("Task");
284 { return &_task_allocator; }
289 Task::operator new (size_t size, void *p) throw()
292 assert (size == sizeof (Task));
299 Task::operator delete (void *ptr)
301 Task *t = reinterpret_cast<Task*>(ptr);
302 LOG_TRACE("Kobject delete", "del", current(), Log_destroy,
306 l->ram = t->ram_quota()->current());
308 allocator()->q_free(t->ram_quota(), ptr);
312 PUBLIC template< typename TARGET >
315 Task::create(Ram_quota *quota, L4_fpage const &utcb_area)
317 void *t = allocator()->q_alloc(quota);
321 cxx::unique_ptr<Task> a(new (t) TARGET(quota));
323 if (!a->initialize())
328 if (utcb_area.is_valid())
330 int e = a->alloc_ku_mem(utcb_area);
339 * \brief Shutdown the task.
342 * -# Unbind and delete all contexts bound to this task.
343 * -# Unmap everything from all spaces.
344 * -# Delete child tasks.
348 Task::destroy(Kobject ***reap_list)
350 Kobject::destroy(reap_list);
352 fpage_unmap(this, L4_fpage::all_spaces(L4_fpage::RWX), L4_map_mask::full(), reap_list);
355 PRIVATE inline NOEXPORT
357 Task::sys_map(unsigned char rights, Syscall_frame *f, Utcb *utcb)
359 LOG_TRACE("Task map", "map", ::current(), Log_unmap,
361 l->mask = utcb->values[1];
362 l->fpage = utcb->values[2]);
364 if (EXPECT_FALSE(!(rights & L4_fpage::W)))
365 return commit_result(-L4_err::EPerm);
367 L4_msg_tag const tag = f->tag();
369 Obj_space *s = current()->space();
371 L4_snd_item_iter snd_items(utcb, tag.words());
373 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
374 return commit_result(-L4_err::EInval);
376 L4_fpage src_task(snd_items.get()->d);
377 if (EXPECT_FALSE(!src_task.is_objpage()))
378 return commit_result(-L4_err::EInval);
380 Task *from = Kobject::dcast<Task*>(s->lookup_local(src_task.obj_index()));
382 return commit_result(-L4_err::EInval);
388 // enforce lock order to prevent deadlocks.
389 // always take lock from task with the lower memory address first
390 Lock_guard_2<Lock> guard;
392 // FIXME: avoid locking the current task, it is not needed
393 if (!guard.check_and_lock(&existence_lock, &from->existence_lock))
394 return commit_result(-L4_err::EInval);
398 ret = fpage_map(from, L4_fpage(utcb->values[2]), this,
399 L4_fpage::all_spaces(), L4_msg_item(utcb->values[1]), &rl);
407 // FIXME: treat reaped stuff
409 return commit_result(0);
411 return commit_error(utcb, ret);
415 PRIVATE inline NOEXPORT
417 Task::sys_unmap(Syscall_frame *f, Utcb *utcb)
420 unsigned words = f->tag().words();
422 LOG_TRACE("Task unmap", "unm", ::current(), Log_unmap,
424 l->mask = utcb->values[1];
425 l->fpage = utcb->values[2]);
428 Lock_guard<Lock> guard;
430 // FIXME: avoid locking the current task, it is not needed
431 if (!guard.check_and_lock(&existence_lock))
432 return commit_error(utcb, L4_error::Not_existent);
436 L4_map_mask m(utcb->values[1]);
438 for (unsigned i = 2; i < words; ++i)
440 unsigned const flushed = fpage_unmap(this, L4_fpage(utcb->values[i]), m, rl.list());
441 utcb->values[i] = (utcb->values[i] & ~0xfUL) | flushed;
450 return commit_result(0, words);
453 PRIVATE inline NOEXPORT
455 Task::sys_cap_valid(Syscall_frame *, Utcb *utcb)
457 L4_obj_ref obj(utcb->values[1]);
460 return commit_result(0);
462 Obj_space::Capability cap = lookup(obj.cap());
463 if (EXPECT_TRUE(cap.valid()))
465 if (!(utcb->values[1] & 1))
466 return commit_result(1);
468 return commit_result(cap.obj()->map_root()->cap_ref_cnt());
471 return commit_result(0);
474 PRIVATE inline NOEXPORT
476 Task::sys_caps_equal(Syscall_frame *, Utcb *utcb)
478 L4_obj_ref obj_a(utcb->values[1]);
479 L4_obj_ref obj_b(utcb->values[2]);
482 return commit_result(1);
484 if (obj_a.special() || obj_b.special())
485 return commit_result(obj_a.special_cap() == obj_b.special_cap());
487 Obj_space::Capability c_a = lookup(obj_a.cap());
488 Obj_space::Capability c_b = lookup(obj_b.cap());
490 return commit_result(c_a == c_b);
493 PRIVATE inline NOEXPORT
495 Task::sys_add_ku_mem(Syscall_frame *f, Utcb *utcb)
497 unsigned const w = f->tag().words();
498 for (unsigned i = 1; i < w; ++i)
500 L4_fpage ku_fp(utcb->values[i]);
501 if (!ku_fp.is_valid() || !ku_fp.is_mempage())
502 return commit_result(-L4_err::EInval);
504 int e = alloc_ku_mem(ku_fp);
506 return commit_result(e);
509 return commit_result(0);
512 PRIVATE inline NOEXPORT
514 Task::sys_cap_info(Syscall_frame *f, Utcb *utcb)
516 L4_msg_tag const &tag = f->tag();
520 default: return commit_result(-L4_err::EInval);
521 case 2: return sys_cap_valid(f, utcb);
522 case 3: return sys_caps_equal(f, utcb);
529 Task::invoke(L4_obj_ref, Mword rights, Syscall_frame *f, Utcb *utcb)
531 if (EXPECT_FALSE(f->tag().proto() != L4_msg_tag::Label_task))
533 f->tag(commit_result(-L4_err::EBadproto));
537 switch (utcb->values[0])
540 f->tag(sys_map(rights, f, utcb));
543 f->tag(sys_unmap(f, utcb));
546 f->tag(sys_cap_info(f, utcb));
549 f->tag(sys_add_ku_mem(f, utcb));
552 L4_msg_tag tag = f->tag();
553 if (invoke_arch(tag, utcb))
556 f->tag(commit_result(-L4_err::ENosys));
562 //---------------------------------------------------------------------------
563 IMPLEMENTATION [!ux]:
565 IMPLEMENT inline void Task::map_utcb_ptr_page() {}
566 IMPLEMENT inline void Task::ux_init() {}
573 // ---------------------------------------------------------------------------
576 #include "tb_entry.h"
581 struct Log_unmap : public Tb_entry
586 unsigned print(int max, char *buf) const;
587 } __attribute__((packed));
591 // ---------------------------------------------------------------------------
592 IMPLEMENTATION [debug]:
596 Task::Log_unmap::print(int max, char *buf) const
599 return snprintf(buf, max, "task=[U:%lx] mask=%lx fpage=[%u/%u]%lx",
600 id, mask, (unsigned)fp.order(), (unsigned)fp.type(), fpage);
603 // ---------------------------------------------------------------------------
604 IMPLEMENTATION[!ia32 || !svm]:
606 PRIVATE inline NOEXPORT
608 Task::sys_vm_run(Syscall_frame *, Utcb *)
610 return commit_result(-L4_err::ENosys);