1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
30 #include <locale.h> /* for __uselocale */
32 #include <l4/sys/ipc.h>
34 #include <l4/re/mem_alloc>
35 #include <l4/re/dataspace>
37 #include <l4/re/util/cap_alloc>
38 #include <l4/sys/capability>
39 #include <l4/sys/factory>
40 #include <l4/sys/scheduler>
41 #include <l4/sys/thread>
45 #include "internals.h"
48 #include "semaphore.h"
53 #define USE_L4RE_FOR_STACK
56 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
59 extern "C" void __pthread_new_thread_entry(void);
62 /* Indicate whether at least one thread has a user-defined stack (if 1),
63 or if all threads have stacks supplied by LinuxThreads (if 0). */
64 int __pthread_nonstandard_stacks;
67 /* Number of active entries in __pthread_handles (used by gdb) */
68 __volatile__ int __pthread_handles_num = 2;
70 /* Whether to use debugger additional actions for thread creation
72 __volatile__ int __pthread_threads_debug;
74 static pthread_descr manager_thread;
76 /* Mapping from stack segment to thread descriptor. */
77 /* Stack segment numbers are also indices into the __pthread_handles array. */
78 /* Stack segment number 0 is reserved for the initial thread. */
80 # define thread_segment(seq) NULL
82 /* Flag set in signal handler to record child termination */
84 static __volatile__ int terminated_children;
86 /* Flag set when the initial thread is blocked on pthread_exit waiting
87 for all other threads to terminate */
89 static int main_thread_exiting;
91 /* Counter used to generate unique thread identifier.
92 Thread identifier is pthread_threads_counter + segment. */
94 //l4/static pthread_t pthread_threads_counter;
96 /* Forward declarations */
98 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
99 void * (*start_routine)(void *), void *arg);
100 static void pthread_handle_free(pthread_t th_id);
101 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
102 __attribute__ ((noreturn));
103 //l4/static void pthread_kill_all_threads(int main_thread_also);
104 static void pthread_for_each_thread(void *arg,
105 void (*fn)(void *, pthread_descr));
107 static void pthread_exited(pthread_descr th);
109 /* The server thread managing requests for thread creation and termination */
112 __attribute__ ((noreturn))
113 __pthread_manager(void *arg)
115 pthread_descr self = manager_thread = (pthread_descr)arg;
116 struct pthread_request request;
119 TLS_INIT_TP(self, 0);
121 /* If we have special thread_self processing, initialize it. */
122 #ifdef INIT_THREAD_SELF
123 INIT_THREAD_SELF(self, 1);
125 #if !(USE_TLS && HAVE___THREAD)
126 /* Set the error variable. */
127 self->p_errnop = &self->p_errno;
128 self->p_h_errnop = &self->p_h_errno;
130 /* Raise our priority to match that of main thread */
131 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
134 l4_msgtag_t tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
136 /* Enter server loop */
140 tag = l4_ipc_reply_and_wait(l4_utcb(), tag, &src, L4_IPC_NEVER);
142 tag = l4_ipc_wait(l4_utcb(), &src, L4_IPC_NEVER);
144 if (l4_msgtag_has_error(tag))
150 memcpy(&request, l4_utcb_mr()->mr, sizeof(request));
153 switch(request.req_kind)
156 request.req_thread->p_retcode =
157 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
158 request.req_args.create.attr,
159 request.req_args.create.fn,
160 request.req_args.create.arg);
164 pthread_handle_free(request.req_args.free.thread_id);
166 case REQ_PROCESS_EXIT:
167 pthread_handle_exit(request.req_thread,
168 request.req_args.exit.code);
171 case REQ_MAIN_THREAD_EXIT:
172 main_thread_exiting = 1;
173 /* Reap children in case all other threads died and the signal handler
174 went off before we set main_thread_exiting to 1, and therefore did
176 //l4/pthread_reap_children();
178 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
179 restart(__pthread_main_thread);
180 /* The main thread will now call exit() which will trigger an
181 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
182 to the thread manager. In case you are wondering how the
183 manager terminates from its loop here. */
187 sem_post((sem_t*)request.req_args.post);
191 /* Make gdb aware of new thread and gdb will restart the
192 new thread when it is ready to handle the new thread. */
193 if (__pthread_threads_debug && __pthread_sig_debug > 0)
194 raise(__pthread_sig_debug);
200 /* This is just a prod to get the manager to reap some
201 threads right away, avoiding a potential delay at shutdown. */
203 case REQ_FOR_EACH_THREAD:
204 pthread_for_each_thread(request.req_args.for_each.arg,
205 request.req_args.for_each.fn);
206 restart(request.req_thread);
209 case REQ_THREAD_EXIT:
212 L4::Cap<L4::Thread> c;
214 pthread_exited(request.req_thread);
216 c = L4::Cap<L4::Thread>(request.req_thread->p_thsem_cap);
217 e = L4::Cap<L4::Task>(L4Re::This_task)
218 ->unmap(c.fpage(), L4_FP_ALL_SPACES);
220 c = L4::Cap<L4::Thread>(request.req_thread->p_th_cap);
221 e = L4::Cap<L4::Task>(L4Re::This_task)
222 ->unmap(c.fpage(), L4_FP_ALL_SPACES);
226 tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
230 int __pthread_manager_event(void *arg)
232 pthread_descr self = (pthread_descr)arg;
233 /* If we have special thread_self processing, initialize it. */
234 #ifdef INIT_THREAD_SELF
235 INIT_THREAD_SELF(self, 1);
238 /* Get the lock the manager will free once all is correctly set up. */
239 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
240 /* Free it immediately. */
241 __pthread_unlock (THREAD_GETMEM(self, p_lock));
243 return __pthread_manager(arg);
246 /* Process creation */
249 __attribute__ ((noreturn))
250 pthread_start_thread(void *arg)
252 pthread_descr self = (pthread_descr) arg;
254 TLS_INIT_TP(self, 0);
258 struct pthread_request request;
262 hp_timing_t tmpclock;
264 /* Initialize special thread_self processing, if any. */
265 #ifdef INIT_THREAD_SELF
266 INIT_THREAD_SELF(self, self->p_nr);
269 HP_TIMING_NOW (tmpclock);
270 THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
274 /* Set the scheduling policy and priority for the new thread, if needed */
275 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
276 /* Explicit scheduling attributes were provided: apply them */
277 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
278 THREAD_GETMEM(self, p_start_args.schedpolicy),
279 &self->p_start_args.schedparam);
280 else if (manager_thread->p_priority > 0)
281 /* Default scheduling required, but thread manager runs in realtime
282 scheduling: switch new thread to SCHED_OTHER policy */
284 struct sched_param default_params;
285 default_params.sched_priority = 0;
286 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
287 SCHED_OTHER, &default_params);
289 #if !(USE_TLS && HAVE___THREAD)
290 /* Initialize thread-locale current locale to point to the global one.
291 With __thread support, the variable's initializer takes care of this. */
292 __uselocale (LC_GLOBAL_LOCALE);
294 /* Initialize __resp. */
295 __resp = &self->p_res;
297 /* Make gdb aware of new thread */
298 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
299 request.req_thread = self;
300 request.req_kind = REQ_DEBUG;
301 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
302 (char *) &request, sizeof(request)));
306 /* Run the thread code */
307 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
309 /* Exit with the given return value */
310 __pthread_do_exit(outcome, (char *)CURRENT_STACK_FRAME);
315 __attribute__ ((noreturn))
316 pthread_start_thread_event(void *arg)
318 pthread_descr self = (pthread_descr) arg;
320 #ifdef INIT_THREAD_SELF
321 INIT_THREAD_SELF(self, self->p_nr);
323 /* Make sure our pid field is initialized, just in case we get there
324 before our father has initialized it. */
325 THREAD_SETMEM(self, p_pid, __getpid());
326 /* Get the lock the manager will free once all is correctly set up. */
327 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
328 /* Free it immediately. */
329 __pthread_unlock (THREAD_GETMEM(self, p_lock));
331 /* Continue with the real function. */
332 pthread_start_thread (arg);
336 #ifdef USE_L4RE_FOR_STACK
337 static int pthread_l4_free_stack(void *stack_addr, void *guardaddr)
339 L4Re::Env const *e = L4Re::Env::env();
341 L4::Cap<L4Re::Dataspace> ds;
343 err = e->rm()->detach(stack_addr, &ds);
354 L4Re::Util::cap_alloc.free(ds);
356 return e->rm()->free_area((l4_addr_t)guardaddr);
360 static int pthread_allocate_stack(const pthread_attr_t *attr,
361 pthread_descr default_new_thread,
363 char ** out_new_thread,
364 char ** out_new_thread_bottom,
365 char ** out_guardaddr,
366 size_t * out_guardsize,
367 size_t * out_stacksize)
369 pthread_descr new_thread;
370 char * new_thread_bottom;
372 size_t stacksize, guardsize;
375 /* TLS cannot work with fixed thread descriptor addresses. */
376 assert (default_new_thread == NULL);
379 if (attr != NULL && attr->__stackaddr_set)
381 #ifdef _STACK_GROWS_UP
382 /* The user provided a stack. */
384 /* This value is not needed. */
385 new_thread = (pthread_descr) attr->__stackaddr;
386 new_thread_bottom = (char *) new_thread;
388 new_thread = (pthread_descr) attr->__stackaddr;
389 new_thread_bottom = (char *) (new_thread + 1);
391 guardaddr = attr->__stackaddr + attr->__stacksize;
394 /* The user provided a stack. For now we interpret the supplied
395 address as 1 + the highest addr. in the stack segment. If a
396 separate register stack is needed, we place it at the low end
397 of the segment, relying on the associated stacksize to
398 determine the low end of the segment. This differs from many
399 (but not all) other pthreads implementations. The intent is
400 that on machines with a single stack growing toward higher
401 addresses, stackaddr would be the lowest address in the stack
402 segment, so that it is consistently close to the initial sp
405 new_thread = (pthread_descr) attr->__stackaddr;
408 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
410 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
411 guardaddr = new_thread_bottom;
415 __pthread_nonstandard_stacks = 1;
418 /* Clear the thread data structure. */
419 memset (new_thread, '\0', sizeof (*new_thread));
421 stacksize = attr->__stacksize;
425 const size_t granularity = pagesize;
428 /* Allocate space for stack and thread descriptor at default address */
431 guardsize = page_roundup (attr->__guardsize, granularity);
432 stacksize = __pthread_max_stacksize - guardsize;
433 stacksize = MIN (stacksize,
434 page_roundup (attr->__stacksize, granularity));
438 guardsize = granularity;
439 stacksize = __pthread_max_stacksize - guardsize;
442 #ifdef USE_L4RE_FOR_STACK
444 L4Re::Env const *e = L4Re::Env::env();
447 if (e->rm()->reserve_area(&map_addr, stacksize + guardsize,
448 L4Re::Rm::Search_addr) < 0)
451 guardaddr = (char*)map_addr;
453 L4::Cap<L4Re::Dataspace> ds = L4Re::Util::cap_alloc.alloc<L4Re::Dataspace>();
457 err = e->mem_alloc()->alloc(stacksize, ds);
461 L4Re::Util::cap_alloc.free(ds);
462 e->rm()->free_area(l4_addr_t(map_addr));
466 new_thread_bottom = (char *) map_addr + guardsize;
467 err = e->rm()->attach(&new_thread_bottom, stacksize, L4Re::Rm::In_area,
472 L4Re::Util::cap_alloc.free(ds);
473 e->rm()->free_area(l4_addr_t(map_addr));
477 map_addr = mmap(NULL, stacksize + guardsize,
478 PROT_READ | PROT_WRITE | PROT_EXEC,
479 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
480 if (map_addr == MAP_FAILED)
481 /* No more memory available. */
484 guardaddr = (char *)map_addr;
486 mprotect (guardaddr, guardsize, PROT_NONE);
488 new_thread_bottom = (char *) map_addr + guardsize;
492 new_thread = ((pthread_descr) (new_thread_bottom + stacksize));
494 new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
497 *out_new_thread = (char *) new_thread;
498 *out_new_thread_bottom = new_thread_bottom;
499 *out_guardaddr = guardaddr;
500 *out_guardsize = guardsize;
501 *out_stacksize = stacksize;
506 int __pthread_mgr_create_thread(pthread_descr thread, char **tos,
507 int (*f)(void*), int prio)
509 using namespace L4Re;
510 Env const *e = Env::env();
511 L4Re::Util::Auto_cap<L4::Thread>::Cap _t = L4Re::Util::cap_alloc.alloc<L4::Thread>();
515 L4Re::Util::Auto_cap<Th_sem_cap>::Cap th_sem
516 = L4Re::Util::cap_alloc.alloc<Th_sem_cap>();
517 if (!th_sem.is_valid())
520 int err = l4_error(e->factory()->create_thread(_t.get()));
524 // needed by __alloc_thread_sem
525 thread->p_th_cap = _t.cap();
527 err = __alloc_thread_sem(thread, th_sem.get());
531 thread->p_thsem_cap = th_sem.cap();
533 L4::Thread::Attr attr;
534 l4_utcb_t *nt_utcb = (l4_utcb_t*)thread->p_tid;
536 attr.bind(nt_utcb, L4Re::This_task);
538 attr.exc_handler(e->rm());
539 if ((err = l4_error(_t->control(attr))) < 0)
540 fprintf(stderr, "ERROR: l4 thread control returned: %d\n", err);
542 l4_utcb_tcr_u(nt_utcb)->user[0] = l4_addr_t(thread);
545 l4_umword_t *&_tos = (l4_umword_t*&)*tos;
547 *(--_tos) = l4_addr_t(thread);
548 *(--_tos) = 0; /* ret addr */
549 *(--_tos) = l4_addr_t(f);
552 _t->ex_regs(l4_addr_t(__pthread_new_thread_entry), l4_addr_t(_tos), 0);
554 l4_sched_param_t sp = l4_sched_param(prio >= 0 ? prio : 2);
555 e->scheduler()->run_thread(_t.get(), sp);
557 // release the automatic capabilities
563 static int l4pthr_get_more_utcb()
565 using namespace L4Re;
568 Env const *e = Env::env();
570 if (e->rm()->reserve_area(&kumem, L4_PAGESIZE,
571 Rm::Reserved | Rm::Search_addr))
574 if (l4_error(e->task()->add_ku_mem(l4_fpage(kumem, L4_PAGESHIFT,
577 e->rm()->free_area(kumem);
581 __l4_add_utcbs(kumem, kumem + L4_PAGESIZE);
586 static inline l4_utcb_t *mgr_alloc_utcb()
588 l4_utcb_t *new_utcb = __pthread_first_free_handle;
592 __pthread_first_free_handle = (l4_utcb_t*)l4_utcb_tcr_u(new_utcb)->user[0];
596 static inline void mgr_free_utcb(l4_utcb_t *u)
601 l4_utcb_tcr_u(u)->user[0] = l4_addr_t(__pthread_first_free_handle);
602 __pthread_first_free_handle = u;
605 int __pthread_start_manager(pthread_descr mgr)
609 mgr->p_tid = mgr_alloc_utcb();
611 err = __pthread_mgr_create_thread(mgr, &__pthread_manager_thread_tos,
612 __pthread_manager, -1);
615 fprintf(stderr, "ERROR: could not start pthread manager thread\n");
619 __pthread_manager_request = mgr->p_th_cap;
624 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
625 void * (*start_routine)(void *), void *arg)
628 pthread_descr new_thread;
630 char * new_thread_bottom;
631 pthread_t new_thread_id;
632 char *guardaddr = NULL;
633 size_t guardsize = 0, stksize = 0;
634 int pagesize = L4_PAGESIZE;
638 new_thread = (_pthread_descr_struct*)_dl_allocate_tls (NULL);
639 if (new_thread == NULL)
641 # if defined(TLS_DTV_AT_TP)
642 /* pthread_descr is below TP. */
643 new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
646 /* Prevent warnings. */
649 #ifdef __NOT_FOR_L4__
650 /* First check whether we have to change the policy and if yes, whether
651 we can do this. Normally this should be done by examining the
652 return value of the __sched_setscheduler call in pthread_start_thread
653 but this is hard to implement. FIXME */
654 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
657 /* Find a free segment for the thread, and allocate a stack if needed */
659 if (__pthread_first_free_handle == 0 && l4pthr_get_more_utcb())
662 # if defined(TLS_DTV_AT_TP)
663 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
665 _dl_deallocate_tls (new_thread, true);
671 l4_utcb_t *new_utcb = mgr_alloc_utcb();
675 new_thread_id = new_utcb;
677 if (pthread_allocate_stack(attr, thread_segment(sseg),
678 pagesize, &stack_addr, &new_thread_bottom,
679 &guardaddr, &guardsize, &stksize) == 0)
682 new_thread->p_stackaddr = stack_addr;
684 new_thread = (pthread_descr) stack_addr;
689 mgr_free_utcb(new_utcb);
693 /* Allocate new thread identifier */
694 /* Initialize the thread descriptor. Elements which have to be
695 initialized to zero already have this value. */
696 #if !defined USE_TLS || !TLS_DTV_AT_TP
697 new_thread->p_header.data.tcb = new_thread;
698 new_thread->p_header.data.self = new_thread;
700 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
701 new_thread->p_multiple_threads = 1;
703 new_thread->p_tid = new_thread_id;
704 new_thread->p_lock = handle_to_lock(new_utcb);
705 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
706 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
707 #if !(USE_TLS && HAVE___THREAD)
708 new_thread->p_errnop = &new_thread->p_errno;
709 new_thread->p_h_errnop = &new_thread->p_h_errno;
711 new_thread->p_guardaddr = guardaddr;
712 new_thread->p_guardsize = guardsize;
713 new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
714 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
715 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
716 /* Initialize the thread handle */
717 __pthread_init_lock(handle_to_lock(new_utcb));
718 /* Determine scheduling parameters for the thread */
719 new_thread->p_sched_policy = -1;
722 new_thread->p_detached = attr->__detachstate;
723 new_thread->p_userstack = attr->__stackaddr_set;
725 switch(attr->__inheritsched)
727 case PTHREAD_EXPLICIT_SCHED:
728 new_thread->p_sched_policy = attr->__schedpolicy;
729 new_thread->p_priority = attr->__schedparam.sched_priority;
731 case PTHREAD_INHERIT_SCHED:
736 /* Set the scheduling policy and priority for the new thread, if needed */
737 if (new_thread->p_sched_policy >= 0)
739 /* Explicit scheduling attributes were provided: apply them */
740 prio = __pthread_l4_getprio(new_thread->p_sched_policy,
741 new_thread->p_priority);
742 /* Raise priority of thread manager if needed */
743 __pthread_manager_adjust_prio(prio);
745 else if (manager_thread->p_sched_policy > 3)
747 /* Default scheduling required, but thread manager runs in realtime
748 scheduling: switch new thread to SCHED_OTHER policy */
749 prio = __pthread_l4_getprio(SCHED_OTHER, 0);
751 /* Finish setting up arguments to pthread_start_thread */
752 new_thread->p_start_args.start_routine = start_routine;
753 new_thread->p_start_args.arg = arg;
754 /* Make the new thread ID available already now. If any of the later
755 functions fail we return an error value and the caller must not use
756 the stored thread ID. */
757 *thread = new_thread_id;
758 /* Do the cloning. We have to use two different functions depending
759 on whether we are debugging or not. */
760 err = __pthread_mgr_create_thread(new_thread, &stack_addr,
761 pthread_start_thread, prio);
764 /* Check if cloning succeeded */
766 /* Free the stack if we allocated it */
767 if (attr == NULL || !attr->__stackaddr_set)
769 #ifdef NEED_SEPARATE_REGISTER_STACK
770 size_t stacksize = ((char *)(new_thread->p_guardaddr)
771 - new_thread_bottom);
772 munmap((caddr_t)new_thread_bottom,
773 2 * stacksize + new_thread->p_guardsize);
774 #elif _STACK_GROWS_UP
776 size_t stacksize = guardaddr - stack_addr;
777 munmap(stack_addr, stacksize + guardsize);
780 size_t stacksize = guardaddr - (char *)new_thread;
781 munmap(new_thread, stacksize + guardsize);
784 #ifdef USE_L4RE_FOR_STACK
785 if (pthread_l4_free_stack(new_thread_bottom, guardaddr))
786 fprintf(stderr, "ERROR: failed to free stack\n");
789 size_t stacksize = stack_addr - new_thread_bottom;
791 size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
793 munmap(new_thread_bottom - guardsize, guardsize + stacksize);
798 # if defined(TLS_DTV_AT_TP)
799 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
801 _dl_deallocate_tls (new_thread, true);
803 mgr_free_utcb(new_utcb);
806 /* Insert new thread in doubly linked list of active threads */
807 new_thread->p_prevlive = __pthread_main_thread;
808 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
809 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
810 __pthread_main_thread->p_nextlive = new_thread;
811 /* Set pid field of the new thread, in case we get there before the
817 /* Try to free the resources of a thread when requested by pthread_join
818 or pthread_detach on a terminated thread. */
820 static void pthread_free(pthread_descr th)
822 pthread_handle handle;
823 pthread_readlock_info *iter, *next;
825 ASSERT(th->p_exited);
826 /* Make the handle invalid */
827 handle = thread_handle(th->p_tid);
828 __pthread_lock(handle_to_lock(handle), NULL);
829 mgr_free_utcb(handle);
830 __pthread_unlock(handle_to_lock(handle));
833 // free the semaphore and the thread
834 L4Re::Util::Auto_cap<void>::Cap s = L4::Cap<void>(th->p_thsem_cap);
835 L4Re::Util::Auto_cap<void>::Cap t = L4::Cap<void>(th->p_th_cap);
838 /* One fewer threads in __pthread_handles */
840 /* Destroy read lock list, and list of free read lock structures.
841 If the former is not empty, it means the thread exited while
842 holding read locks! */
844 for (iter = th->p_readlock_list; iter != NULL; iter = next)
846 next = iter->pr_next;
850 for (iter = th->p_readlock_free; iter != NULL; iter = next)
852 next = iter->pr_next;
856 /* If initial thread, nothing to free */
857 if (!th->p_userstack)
859 size_t guardsize = th->p_guardsize;
860 /* Free the stack and thread descriptor area */
861 char *guardaddr = (char*)th->p_guardaddr;
862 #ifdef _STACK_GROWS_UP
864 size_t stacksize = guardaddr - th->p_stackaddr;
866 size_t stacksize = guardaddr - (char *)th;
868 guardaddr = (char *)th;
870 /* Guardaddr is always set, even if guardsize is 0. This allows
871 us to compute everything else. */
873 //l4/size_t stacksize = th->p_stackaddr - guardaddr - guardsize;
875 //l4/size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
877 # ifdef NEED_SEPARATE_REGISTER_STACK
878 /* Take account of the register stack, which is below guardaddr. */
879 guardaddr -= stacksize;
883 #ifdef USE_L4RE_FOR_STACK
884 pthread_l4_free_stack(guardaddr + guardsize, guardaddr);
886 munmap(guardaddr, stacksize + guardsize);
892 # if defined(TLS_DTV_AT_TP)
893 th = (pthread_descr) ((char *) th + TLS_PRE_TCB_SIZE);
895 _dl_deallocate_tls (th, true);
899 /* Handle threads that have exited */
901 static void pthread_exited(pthread_descr th)
904 /* Remove thread from list of active threads */
905 th->p_nextlive->p_prevlive = th->p_prevlive;
906 th->p_prevlive->p_nextlive = th->p_nextlive;
907 /* Mark thread as exited, and if detached, free its resources */
908 __pthread_lock(th->p_lock, NULL);
910 /* If we have to signal this event do it now. */
911 detached = th->p_detached;
912 __pthread_unlock(th->p_lock);
915 /* If all threads have exited and the main thread is pending on a
916 pthread_exit, wake up the main thread and terminate ourselves. */
917 if (main_thread_exiting &&
918 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
919 restart(__pthread_main_thread);
920 /* Same logic as REQ_MAIN_THREAD_EXIT. */
925 /* Try to free the resources of a thread when requested by pthread_join
926 or pthread_detach on a terminated thread. */
928 static void pthread_handle_free(pthread_t th_id)
930 pthread_handle handle = thread_handle(th_id);
933 __pthread_lock(handle_to_lock(handle), NULL);
934 if (nonexisting_handle(handle, th_id)) {
935 /* pthread_reap_children has deallocated the thread already,
936 nothing needs to be done */
937 __pthread_unlock(handle_to_lock(handle));
940 th = handle_to_descr(handle);
941 __pthread_unlock(handle_to_lock(handle));
946 /* Send a signal to all running threads */
949 static void pthread_kill_all_threads(int main_thread_also)
951 UNIMPL("pthread_kill_all_threads");
954 for (th = __pthread_main_thread->p_nextlive;
955 th != __pthread_main_thread;
956 th = th->p_nextlive) {
957 kill(th->p_pid, sig);
959 if (main_thread_also) {
960 kill(__pthread_main_thread->p_pid, sig);
966 static void pthread_for_each_thread(void *arg,
967 void (*fn)(void *, pthread_descr))
971 for (th = __pthread_main_thread->p_nextlive;
972 th != __pthread_main_thread;
973 th = th->p_nextlive) {
977 fn(arg, __pthread_main_thread);
980 /* Process-wide exit() */
982 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
984 //l4/pthread_descr th;
985 __pthread_exit_requested = 1;
986 __pthread_exit_code = exitcode;
988 /* A forced asynchronous cancellation follows. Make sure we won't
989 get stuck later in the main thread with a system lock being held
990 by one of the cancelled threads. Ideally one would use the same
991 code as in pthread_atfork(), but we can't distinguish system and
992 user handlers there. */
994 /* Send the CANCEL signal to all running threads, including the main
995 thread, but excluding the thread from which the exit request originated
996 (that thread must complete the exit, e.g. calling atexit functions
997 and flushing stdio buffers). */
998 for (th = issuing_thread->p_nextlive;
999 th != issuing_thread;
1000 th = th->p_nextlive) {
1001 kill(th->p_pid, __pthread_sig_cancel);
1003 /* Now, wait for all these threads, so that they don't become zombies
1004 and their times are properly added to the thread manager's times. */
1005 for (th = issuing_thread->p_nextlive;
1006 th != issuing_thread;
1007 th = th->p_nextlive) {
1008 waitpid(th->p_pid, NULL, __WCLONE);
1010 __fresetlockfiles();
1012 restart(issuing_thread);
1013 #ifdef THIS_IS_THE_ORIGINAL
1016 // we do not do the exit path with kill and waitpid, so give the code here
1022 /* Handler for __pthread_sig_cancel in thread manager thread */
1024 void __pthread_manager_sighandler(int sig)
1026 int kick_manager = terminated_children == 0 && main_thread_exiting;
1027 terminated_children = 1;
1029 /* If the main thread is terminating, kick the thread manager loop
1030 each time some threads terminate. This eliminates a two second
1031 shutdown delay caused by the thread manager sleeping in the
1032 call to __poll(). Instead, the thread manager is kicked into
1033 action, reaps the outstanding threads and resumes the main thread
1034 so that it can complete the shutdown. */
1037 struct pthread_request request;
1038 request.req_thread = 0;
1039 request.req_kind = REQ_KICK;
1040 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1041 (char *) &request, sizeof(request)));
1045 /* Adjust priority of thread manager so that it always run at a priority
1046 higher than all threads */
1048 void __pthread_manager_adjust_prio(int thread_prio)
1050 if (!manager_thread)
1053 if (thread_prio <= manager_thread->p_priority)
1056 l4_sched_param_t sp = l4_sched_param(thread_prio, 0);
1057 L4Re::Env::env()->scheduler()->run_thread(L4::Cap<L4::Thread>(manager_thread->p_th_cap), sp);
1058 manager_thread->p_priority = thread_prio;