10 class slab_cache_anon;
13 * \brief A task is a protection domain.
15 * A is derived from Space, which aggregates a set of address spaces.
16 * Additionally to a space, a task provides initialization and
17 * destruction functionality for a protection domain.
18 * Task is also derived from Rcu_item to provide RCU shutdown of tasks.
26 FIASCO_DECLARE_KOBJ();
28 friend class Jdb_space;
32 /// \brief Do host (platform) specific initialization.
35 /// \brief Map the trace buffer to the user address space.
50 /// \brief Destroy it.
54 * \brief Allocate memory for UTCBs for that task.
55 * \return true on success, or false on memory shortage.
60 * \brief Free the UTCBs allocated with alloc_utcbs()-.
68 /// Number of Utcbs fitting on a single page
69 Utcbs_per_page = Config::PAGE_SIZE / sizeof(Utcb),
73 /// map the global utcb pointer page into this task
74 void map_utcb_ptr_page();
81 //---------------------------------------------------------------------------
87 #include "entry_frame.h"
91 #include "kmem_slab_simple.h"
93 #include "l4_buf_iter.h"
96 #include "mem_layout.h"
97 #include "ram_quota.h"
99 #include "vmem_alloc.h"
101 FIASCO_DEFINE_KOBJ(Task);
108 return dec_ref() == 0;
116 if (!utcb_area_size())
118 set_kern_utcb_area(0);
122 Mapped_allocator *const alloc = Mapped_allocator::allocator();
123 void *utcbs = alloc->q_unaligned_alloc(ram_quota(), utcb_area_size());
125 if (EXPECT_FALSE(!utcbs))
129 memset(utcbs, 0, utcb_area_size());
130 set_kern_utcb_area(Address(utcbs));
132 unsigned long page_size = Config::PAGE_SIZE;
134 // the following works because the size is a power of two
135 // and once we have size larger than a super page we have
136 // always multiples of superpages
137 if (utcb_area_size() >= Config::SUPERPAGE_SIZE)
138 page_size = Config::SUPERPAGE_SIZE;
140 for (unsigned long i = 0; i < utcb_area_size(); i += page_size)
142 Address kern_va = kern_utcb_area() + i;
143 Address user_va = user_utcb_area() + i;
144 Address pa = mem_space()->pmem_to_phys(kern_va);
146 // must be valid physical address
149 Mem_space::Status res =
150 mem_space()->v_insert(Mem_space::Phys_addr(pa),
151 Mem_space::Addr(user_va), Mem_space::Size(page_size),
152 Mem_space::Page_writable | Mem_space::Page_user_accessible
153 | Mem_space::Page_cacheable);
157 case Mem_space::Insert_ok: break;
158 case Mem_space::Insert_err_nomem:
162 printf("UTCB mapping failed: va=%p, ph=%p, res=%d\n",
163 (void*)user_va, (void*)kern_va, res);
164 kdb_ke("BUG in utcb allocation");
177 if (EXPECT_FALSE(!kern_utcb_area() || !mem_space() || !mem_space()->dir()))
180 Mapped_allocator * const alloc = Mapped_allocator::allocator();
181 unsigned long page_size = Config::PAGE_SIZE;
183 // the following works because the size is a poer of two
184 // and once we have size larger than a super page we have
185 // always multiples of superpages
186 if (utcb_area_size() >= Config::SUPERPAGE_SIZE)
187 page_size = Config::SUPERPAGE_SIZE;
189 for (unsigned long i = 0; i < utcb_area_size(); i += page_size)
191 Address user_va = user_utcb_area() + i;
192 mem_space()->v_delete(Mem_space::Addr(user_va),
193 Mem_space::Size(page_size));
196 alloc->q_unaligned_free(ram_quota(), utcb_area_size(), (void*)kern_utcb_area());
198 set_kern_utcb_area(0);
203 /** Allocate space for the UTCBs of all threads in this task.
204 * @ return true on success, false if not enough memory for the UTCBs
210 // For UX, map the UTCB pointer page. For ia32, do nothing
217 * \brief Create a normal Task.
218 * \pre \a parent must be valid and exist.
221 template< typename SPACE_FACTORY >
222 Task::Task(SPACE_FACTORY const &sf, Ram_quota *q, L4_fpage const &utcb_area)
223 : Space(sf, q, utcb_area)
229 if (mem_space()->is_sigma0())
234 Lock_guard<Spin_lock> guard(state_lock());
244 static slab_cache_anon* slabs = new Kmem_slab_simple (sizeof (Task),
249 // If Fiasco would kill all tasks even when exiting through the
250 // kernel debugger, we could use a deallocating version of the above:
252 // static auto_ptr<slab_cache_anon> slabs
253 // (new Kmem_slab_simple (sizeof (Task), sizeof (Mword)))
254 // return slabs.get();
259 PROTECTED inline NEEDS["kmem_slab_simple.h"]
261 Task::operator new (size_t size, void *p)
264 assert (size == sizeof (Task));
269 PUBLIC //inline NEEDS["kmem_slab_simple.h"]
271 Task::operator delete (void *ptr)
273 Task *t = reinterpret_cast<Task*>(ptr);
274 LOG_TRACE("Kobject delete", "del", current(), __fmt_kobj_destroy,
275 Log_destroy *l = tbe->payload<Log_destroy>();
279 l->ram = t->ram_quota()->current());
281 allocator()->q_free(t->ram_quota(), ptr);
284 PUBLIC template< typename SPACE_FACTORY > inline NEEDS[Task::operator new]
287 Task::create(SPACE_FACTORY const &sf, Ram_quota *quota,
288 L4_fpage const &utcb_area)
290 if (void *t = allocator()->q_alloc(quota))
292 Task *a = new (t) Task(sf, quota, utcb_area);
305 { return mem_space()->valid() && state() == Ready; }
310 Task::initiate_deletion(Kobject ***reap_list)
312 Kobject::initiate_deletion(reap_list);
314 Lock_guard<Spin_lock> guard(state_lock());
315 set_state(In_deletion);
319 * \brief Shutdown the task.
322 * -# Unbind and delete all contexts bound to this task.
323 * -# Unmap everything from all spaces.
324 * -# Delete child tasks.
328 Task::destroy(Kobject ***reap_list)
330 Kobject::destroy(reap_list);
332 fpage_unmap(this, L4_fpage::all_spaces(L4_fpage::RWX), L4_map_mask::full(), reap_list);
335 PRIVATE inline NOEXPORT
337 Task::sys_map(unsigned char rights, Syscall_frame *f, Utcb *utcb)
339 LOG_TRACE("Task map", "map", ::current(), __task_unmap_fmt,
340 Log_unmap *lu = tbe->payload<Log_unmap>();
342 lu->mask = utcb->values[1];
343 lu->fpage = utcb->values[2]);
345 if (EXPECT_FALSE(!(rights & L4_fpage::W)))
346 return commit_result(-L4_err::EPerm);
348 L4_msg_tag const tag = f->tag();
350 Obj_space *s = current()->space()->obj_space();
351 L4_snd_item_iter snd_items(utcb, tag.words());
353 if (EXPECT_FALSE(!tag.items() || !snd_items.next()))
354 return commit_result(-L4_err::EInval);
356 L4_fpage src_task(snd_items.get()->d);
357 if (EXPECT_FALSE(!src_task.is_objpage()))
358 return commit_result(-L4_err::EInval);
360 Task *from = Kobject::dcast<Task*>(s->lookup_local(src_task.obj_index()));
362 return commit_result(-L4_err::EInval);
364 // enforce lock order to prevent deadlocks.
365 // always take lock from task with the lower memory address first
366 Lock_guard_2<Lock> guard;
368 if (!guard.lock(&existence_lock, &from->existence_lock))
369 return commit_result(-L4_err::EInval);
375 L4_error ret = fpage_map(from, L4_fpage(utcb->values[2]), this, L4_fpage::all_spaces(), utcb->values[1], &rl);
380 // FIXME: treat reaped stuff
382 return commit_result(0);
384 return commit_error(utcb, ret);
388 PRIVATE inline NOEXPORT
390 Task::sys_unmap(Syscall_frame *f, Utcb *utcb)
392 Lock_guard<Lock> guard;
394 if (!guard.lock(&existence_lock))
395 return commit_error(utcb, L4_error::Not_existent);
397 LOG_TRACE("Task unmap", "unm", ::current(), __task_unmap_fmt,
398 Log_unmap *lu = tbe->payload<Log_unmap>();
400 lu->mask = utcb->values[1];
401 lu->fpage = utcb->values[2]);
406 L4_map_mask m(utcb->values[1]);
407 unsigned words = f->tag().words();
409 for (unsigned i = 2; i < words; ++i)
411 unsigned const flushed = fpage_unmap(this, L4_fpage(utcb->values[i]), m, rl.list());
412 utcb->values[i] = (utcb->values[i] & ~0xfUL) | flushed;
418 return commit_result(0, words);
421 PRIVATE inline NOEXPORT
423 Task::sys_cap_valid(Syscall_frame *, Utcb *utcb)
425 L4_obj_ref obj(utcb->values[1]);
428 return commit_result(0);
430 Obj_space::Capability cap = obj_space()->lookup(obj.cap());
431 if (EXPECT_TRUE(cap.valid()))
433 if (!(utcb->values[1] & 1))
434 return commit_result(1);
436 return commit_result(cap.obj()->map_root()->cap_ref_cnt());
439 return commit_result(0);
442 PRIVATE inline NOEXPORT
444 Task::sys_caps_equal(Syscall_frame *, Utcb *utcb)
446 L4_obj_ref obj_a(utcb->values[1]);
447 L4_obj_ref obj_b(utcb->values[2]);
450 return commit_result(1);
452 if (obj_a.invalid() || obj_b.invalid())
453 return commit_result(obj_a.invalid() && obj_b.invalid());
455 Obj_space::Capability c_a = obj_space()->lookup(obj_a.cap());
456 Obj_space::Capability c_b = obj_space()->lookup(obj_b.cap());
458 return commit_result(c_a == c_b);
461 PRIVATE inline NOEXPORT
463 Task::sys_cap_info(Syscall_frame *f, Utcb *utcb)
465 L4_msg_tag const &tag = f->tag();
469 default: return commit_result(-L4_err::EInval);
470 case 2: return sys_cap_valid(f, utcb);
471 case 3: return sys_caps_equal(f, utcb);
480 Task::invoke(L4_obj_ref, Mword rights, Syscall_frame *f, Utcb *utcb)
482 if (EXPECT_FALSE(f->tag().proto() != L4_msg_tag::Label_task))
484 f->tag(commit_result(-L4_err::EBadproto));
488 switch (utcb->values[0])
491 f->tag(sys_map(rights, f, utcb));
494 f->tag(sys_unmap(f, utcb));
497 f->tag(sys_cap_info(f, utcb));
500 L4_msg_tag tag = f->tag();
501 if (invoke_arch(tag, utcb))
504 f->tag(commit_result(-L4_err::ENosys));
509 //---------------------------------------------------------------------------
510 IMPLEMENTATION [!ux]:
514 Task::map_utcb_ptr_page()
528 //---------------------------------------------------------------------------
529 IMPLEMENTATION [!(ia32|ux|amd64)]:
537 // ---------------------------------------------------------------------------
548 } __attribute__((packed));
550 static unsigned unmap_fmt(Tb_entry *, int max, char *buf) asm ("__task_unmap_fmt");
553 // ---------------------------------------------------------------------------
554 IMPLEMENTATION [debug]:
558 Task::unmap_fmt(Tb_entry *e, int max, char *buf)
560 Log_unmap *l = e->payload<Log_unmap>();
561 L4_fpage fp(l->fpage);
562 return snprintf(buf, max, "task=[U:%lx] mask=%lx fpage=[%u/%u]%lx",
563 l->id, l->mask, (unsigned)fp.order(), fp.type(), l->fpage);
566 // ---------------------------------------------------------------------------
567 IMPLEMENTATION[!ia32 || !svm]:
569 PRIVATE inline NOEXPORT
571 Task::sys_vm_run(Syscall_frame *, Utcb *)
573 return commit_result(-L4_err::ENosys);