1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
29 #include <locale.h> /* for __uselocale */
31 #include <l4/sys/ipc.h>
33 #include <l4/re/mem_alloc>
34 #include <l4/re/dataspace>
36 #include <l4/re/util/cap_alloc>
37 #include <l4/sys/capability>
38 #include <l4/sys/factory>
39 #include <l4/sys/scheduler>
40 #include <l4/sys/thread>
43 #include "internals.h"
46 #include "semaphore.h"
50 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
53 extern "C" void __pthread_new_thread_entry(void);
55 /* For debugging purposes put the maximum number of threads in a variable. */
56 const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
59 /* Indicate whether at least one thread has a user-defined stack (if 1),
60 or if all threads have stacks supplied by LinuxThreads (if 0). */
61 int __pthread_nonstandard_stacks;
64 /* Number of active entries in __pthread_handles (used by gdb) */
65 __volatile__ int __pthread_handles_num = 2;
67 /* Whether to use debugger additional actions for thread creation
69 __volatile__ int __pthread_threads_debug;
71 static pthread_descr manager_thread;
73 /* Mapping from stack segment to thread descriptor. */
74 /* Stack segment numbers are also indices into the __pthread_handles array. */
75 /* Stack segment number 0 is reserved for the initial thread. */
77 # define thread_segment(seq) NULL
79 /* Flag set in signal handler to record child termination */
81 static __volatile__ int terminated_children;
83 /* Flag set when the initial thread is blocked on pthread_exit waiting
84 for all other threads to terminate */
86 static int main_thread_exiting;
88 /* Counter used to generate unique thread identifier.
89 Thread identifier is pthread_threads_counter + segment. */
91 //l4/static pthread_t pthread_threads_counter;
93 /* Forward declarations */
95 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
96 void * (*start_routine)(void *), void *arg);
97 static void pthread_handle_free(pthread_t th_id);
98 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
99 __attribute__ ((noreturn));
100 //l4/static void pthread_kill_all_threads(int main_thread_also);
101 static void pthread_for_each_thread(void *arg,
102 void (*fn)(void *, pthread_descr));
104 /* The server thread managing requests for thread creation and termination */
107 __attribute__ ((noreturn))
108 __pthread_manager(void *arg)
110 pthread_descr self = manager_thread = (pthread_descr)arg;
111 struct pthread_request request;
113 /* If we have special thread_self processing, initialize it. */
114 #ifdef INIT_THREAD_SELF
115 INIT_THREAD_SELF(self, 1);
117 #if !(USE_TLS && HAVE___THREAD)
118 /* Set the error variable. */
119 self->p_errnop = &self->p_errno;
120 self->p_h_errnop = &self->p_h_errno;
122 /* Raise our priority to match that of main thread */
123 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
126 l4_msgtag_t tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
128 /* Enter server loop */
132 tag = l4_ipc_reply_and_wait(l4_utcb(), tag, &src, L4_IPC_NEVER);
134 tag = l4_ipc_wait(l4_utcb(), &src, L4_IPC_NEVER);
136 if (l4_msgtag_has_error(tag))
142 memcpy(&request, l4_utcb_mr()->mr, sizeof(request));
145 switch(request.req_kind)
148 request.req_thread->p_retcode =
149 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
150 request.req_args.create.attr,
151 request.req_args.create.fn,
152 request.req_args.create.arg);
156 pthread_handle_free(request.req_args.free.thread_id);
158 case REQ_PROCESS_EXIT:
159 pthread_handle_exit(request.req_thread,
160 request.req_args.exit.code);
163 case REQ_MAIN_THREAD_EXIT:
164 main_thread_exiting = 1;
165 /* Reap children in case all other threads died and the signal handler
166 went off before we set main_thread_exiting to 1, and therefore did
168 //l4/pthread_reap_children();
170 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
171 restart(__pthread_main_thread);
172 /* The main thread will now call exit() which will trigger an
173 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
174 to the thread manager. In case you are wondering how the
175 manager terminates from its loop here. */
179 sem_post((sem_t*)request.req_args.post);
183 /* Make gdb aware of new thread and gdb will restart the
184 new thread when it is ready to handle the new thread. */
185 if (__pthread_threads_debug && __pthread_sig_debug > 0)
186 raise(__pthread_sig_debug);
192 /* This is just a prod to get the manager to reap some
193 threads right away, avoiding a potential delay at shutdown. */
195 case REQ_FOR_EACH_THREAD:
196 pthread_for_each_thread(request.req_args.for_each.arg,
197 request.req_args.for_each.fn);
198 restart(request.req_thread);
201 case REQ_L4_RESERVE_CONSECUTIVE_UTCBS:
202 *request.req_args.l4_reserve_consecutive_utcbs.retutcbp
203 = pthread_mgr_l4_reserve_consecutive_utcbs(request.req_args.l4_reserve_consecutive_utcbs.num);
207 tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
211 int __pthread_manager_event(void *arg)
213 pthread_descr self = (pthread_descr)arg;
214 /* If we have special thread_self processing, initialize it. */
215 #ifdef INIT_THREAD_SELF
216 INIT_THREAD_SELF(self, 1);
219 /* Get the lock the manager will free once all is correctly set up. */
220 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
221 /* Free it immediately. */
222 __pthread_unlock (THREAD_GETMEM(self, p_lock));
224 return __pthread_manager(arg);
227 /* Process creation */
230 __attribute__ ((noreturn))
231 pthread_start_thread(void *arg)
233 pthread_descr self = (pthread_descr) arg;
235 struct pthread_request request;
239 hp_timing_t tmpclock;
241 /* Initialize special thread_self processing, if any. */
242 #ifdef INIT_THREAD_SELF
243 INIT_THREAD_SELF(self, self->p_nr);
246 HP_TIMING_NOW (tmpclock);
247 THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
251 /* Set the scheduling policy and priority for the new thread, if needed */
252 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
253 /* Explicit scheduling attributes were provided: apply them */
254 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
255 THREAD_GETMEM(self, p_start_args.schedpolicy),
256 &self->p_start_args.schedparam);
257 else if (manager_thread->p_priority > 0)
258 /* Default scheduling required, but thread manager runs in realtime
259 scheduling: switch new thread to SCHED_OTHER policy */
261 struct sched_param default_params;
262 default_params.sched_priority = 0;
263 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
264 SCHED_OTHER, &default_params);
266 #if !(USE_TLS && HAVE___THREAD)
267 /* Initialize thread-locale current locale to point to the global one.
268 With __thread support, the variable's initializer takes care of this. */
269 __uselocale (LC_GLOBAL_LOCALE);
271 /* Initialize __resp. */
272 __resp = &self->p_res;
274 /* Make gdb aware of new thread */
275 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
276 request.req_thread = self;
277 request.req_kind = REQ_DEBUG;
278 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
279 (char *) &request, sizeof(request)));
283 /* Run the thread code */
284 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
286 /* Exit with the given return value */
287 __pthread_do_exit(outcome, (char *)CURRENT_STACK_FRAME);
292 __attribute__ ((noreturn))
293 pthread_start_thread_event(void *arg)
295 pthread_descr self = (pthread_descr) arg;
297 #ifdef INIT_THREAD_SELF
298 INIT_THREAD_SELF(self, self->p_nr);
300 /* Make sure our pid field is initialized, just in case we get there
301 before our father has initialized it. */
302 THREAD_SETMEM(self, p_pid, __getpid());
303 /* Get the lock the manager will free once all is correctly set up. */
304 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
305 /* Free it immediately. */
306 __pthread_unlock (THREAD_GETMEM(self, p_lock));
308 /* Continue with the real function. */
309 pthread_start_thread (arg);
313 static int pthread_allocate_stack(const pthread_attr_t *attr,
314 pthread_descr default_new_thread,
316 char ** out_new_thread,
317 char ** out_new_thread_bottom,
318 char ** out_guardaddr,
319 size_t * out_guardsize,
320 size_t * out_stacksize)
322 pthread_descr new_thread;
323 char * new_thread_bottom;
325 size_t stacksize, guardsize;
328 /* TLS cannot work with fixed thread descriptor addresses. */
329 assert (default_new_thread == NULL);
332 if (attr != NULL && attr->__stackaddr_set)
334 #ifdef _STACK_GROWS_UP
335 /* The user provided a stack. */
337 /* This value is not needed. */
338 new_thread = (pthread_descr) attr->__stackaddr;
339 new_thread_bottom = (char *) new_thread;
341 new_thread = (pthread_descr) attr->__stackaddr;
342 new_thread_bottom = (char *) (new_thread + 1);
344 guardaddr = attr->__stackaddr + attr->__stacksize;
347 /* The user provided a stack. For now we interpret the supplied
348 address as 1 + the highest addr. in the stack segment. If a
349 separate register stack is needed, we place it at the low end
350 of the segment, relying on the associated stacksize to
351 determine the low end of the segment. This differs from many
352 (but not all) other pthreads implementations. The intent is
353 that on machines with a single stack growing toward higher
354 addresses, stackaddr would be the lowest address in the stack
355 segment, so that it is consistently close to the initial sp
358 new_thread = (pthread_descr) attr->__stackaddr;
361 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
363 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
364 guardaddr = new_thread_bottom;
368 __pthread_nonstandard_stacks = 1;
371 /* Clear the thread data structure. */
372 memset (new_thread, '\0', sizeof (*new_thread));
374 stacksize = attr->__stacksize;
378 const size_t granularity = pagesize;
381 /* Allocate space for stack and thread descriptor at default address */
384 guardsize = page_roundup (attr->__guardsize, granularity);
385 stacksize = __pthread_max_stacksize - guardsize;
386 stacksize = MIN (stacksize,
387 page_roundup (attr->__stacksize, granularity));
391 guardsize = granularity;
392 stacksize = __pthread_default_stacksize - guardsize;
396 L4Re::Env const *e = L4Re::Env::env();
399 if (e->rm()->reserve_area(&map_addr, stacksize + guardsize,
400 L4Re::Rm::Search_addr) < 0)
403 guardaddr = (char*)map_addr;
405 L4::Cap<L4Re::Dataspace> ds = L4Re::Util::cap_alloc.alloc<L4Re::Dataspace>();
409 err = e->mem_alloc()->alloc(stacksize, ds);
413 L4Re::Util::cap_alloc.free(ds);
414 e->rm()->free_area(l4_addr_t(map_addr));
418 new_thread_bottom = (char *) map_addr + guardsize;
419 err = e->rm()->attach(&new_thread_bottom, stacksize, L4Re::Rm::In_area,
424 L4Re::Util::cap_alloc.free(ds);
425 e->rm()->free_area(l4_addr_t(map_addr));
430 new_thread = ((pthread_descr) (new_thread_bottom + stacksize));
432 new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
435 *out_new_thread = (char *) new_thread;
436 *out_new_thread_bottom = new_thread_bottom;
437 *out_guardaddr = guardaddr;
438 *out_guardsize = guardsize;
439 *out_stacksize = stacksize;
444 int __pthread_mgr_create_thread(pthread_descr thread, char **tos,
445 int (*f)(void*), int prio)
447 using namespace L4Re;
448 Env const *e = Env::env();
449 L4Re::Util::Auto_cap<L4::Thread>::Cap _t = L4Re::Util::cap_alloc.alloc<L4::Thread>();
453 L4Re::Util::Auto_cap<Th_sem_cap>::Cap th_sem
454 = L4Re::Util::cap_alloc.alloc<Th_sem_cap>();
455 if (!th_sem.is_valid())
458 int err = l4_error(e->factory()->create_thread(_t.get()));
462 // needed by __alloc_thread_sem
463 thread->p_th_cap = _t.cap();
465 err = __alloc_thread_sem(thread, th_sem.get());
469 thread->p_thsem_cap = th_sem.cap();
471 L4::Thread::Attr attr;
473 l4_utcb_t *nt_utcb = (l4_utcb_t*)thread->p_tid;
474 attr.bind(nt_utcb, L4Re::This_task);
476 attr.exc_handler(e->rm());
479 l4_utcb_tcr_u(nt_utcb)->user[0] = l4_addr_t(thread);
482 l4_umword_t *&_tos = (l4_umword_t*&)*tos;
484 *(--_tos) = l4_addr_t(thread);
485 *(--_tos) = 0; /* ret addr */
486 *(--_tos) = l4_addr_t(f);
489 _t->ex_regs(l4_addr_t(__pthread_new_thread_entry), l4_addr_t(_tos), 0);
491 l4_sched_param_t sp = l4_sched_param(prio >= 0 ? prio : 2);
492 e->scheduler()->run_thread(_t.get(), sp);
494 // release the automatic capabilities
500 static inline l4_utcb_t *mgr_alloc_utcb()
502 l4_utcb_t *new_utcb = __pthread_first_free_handle;
506 __pthread_first_free_handle = (l4_utcb_t*)l4_utcb_tcr_u(new_utcb)->user[0];
510 static inline void mgr_free_utcb(l4_utcb_t *u)
515 l4_utcb_tcr_u(u)->user[0] = l4_addr_t(__pthread_first_free_handle);
516 __pthread_first_free_handle = u;
519 int __pthread_start_manager(pthread_descr mgr)
523 mgr->p_tid = mgr_alloc_utcb();
525 err = __pthread_mgr_create_thread(mgr, &__pthread_manager_thread_tos,
526 __pthread_manager, -1);
529 fprintf(stderr, "ERROR: could not start pthread manager thread\n");
533 __pthread_manager_request = mgr->p_th_cap;
538 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
539 void * (*start_routine)(void *), void *arg)
542 pthread_descr new_thread;
544 char * new_thread_bottom;
545 pthread_t new_thread_id;
546 char *guardaddr = NULL;
547 size_t guardsize = 0, stksize = 0;
548 int pagesize = L4_PAGESIZE;
552 new_thread = _dl_allocate_tls (NULL);
553 if (new_thread == NULL)
555 # if defined(TLS_DTV_AT_TP)
556 /* pthread_descr is below TP. */
557 new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
560 /* Prevent warnings. */
563 #ifdef __NOT_FOR_L4__
564 /* First check whether we have to change the policy and if yes, whether
565 we can do this. Normally this should be done by examining the
566 return value of the __sched_setscheduler call in pthread_start_thread
567 but this is hard to implement. FIXME */
568 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
571 /* Find a free segment for the thread, and allocate a stack if needed */
573 if (__pthread_first_free_handle == 0)
576 # if defined(TLS_DTV_AT_TP)
577 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
579 _dl_deallocate_tls (new_thread, true);
584 l4_utcb_t *new_utcb = mgr_alloc_utcb();
588 new_thread_id = new_utcb;
590 if (pthread_allocate_stack(attr, thread_segment(sseg),
591 pagesize, &stack_addr, &new_thread_bottom,
592 &guardaddr, &guardsize, &stksize) == 0)
595 new_thread->p_stackaddr = stack_addr;
597 new_thread = (pthread_descr) stack_addr;
602 mgr_free_utcb(new_utcb);
606 /* Allocate new thread identifier */
607 /* Initialize the thread descriptor. Elements which have to be
608 initialized to zero already have this value. */
609 #if !defined USE_TLS || !TLS_DTV_AT_TP
610 new_thread->p_header.data.tcb = new_thread;
611 new_thread->p_header.data.self = new_thread;
613 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
614 new_thread->p_multiple_threads = 1;
616 new_thread->p_tid = new_thread_id;
617 new_thread->p_lock = handle_to_lock(new_utcb);
618 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
619 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
620 #if !(USE_TLS && HAVE___THREAD)
621 new_thread->p_errnop = &new_thread->p_errno;
622 new_thread->p_h_errnop = &new_thread->p_h_errno;
624 new_thread->p_guardaddr = guardaddr;
625 new_thread->p_guardsize = guardsize;
626 new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
627 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
628 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
629 /* Initialize the thread handle */
630 __pthread_init_lock(handle_to_lock(new_utcb));
631 /* Determine scheduling parameters for the thread */
632 new_thread->p_sched_policy = -1;
635 new_thread->p_detached = attr->__detachstate;
636 new_thread->p_userstack = attr->__stackaddr_set;
638 switch(attr->__inheritsched)
640 case PTHREAD_EXPLICIT_SCHED:
641 new_thread->p_sched_policy = attr->__schedpolicy;
642 new_thread->p_priority = attr->__schedparam.sched_priority;
644 case PTHREAD_INHERIT_SCHED:
649 /* Set the scheduling policy and priority for the new thread, if needed */
650 if (new_thread->p_sched_policy >= 0)
652 /* Explicit scheduling attributes were provided: apply them */
653 prio = __pthread_l4_getprio(new_thread->p_sched_policy,
654 new_thread->p_priority);
655 /* Raise priority of thread manager if needed */
656 __pthread_manager_adjust_prio(prio);
658 else if (manager_thread->p_sched_policy > 3)
660 /* Default scheduling required, but thread manager runs in realtime
661 scheduling: switch new thread to SCHED_OTHER policy */
662 prio = __pthread_l4_getprio(SCHED_OTHER, 0);
664 /* Finish setting up arguments to pthread_start_thread */
665 new_thread->p_start_args.start_routine = start_routine;
666 new_thread->p_start_args.arg = arg;
667 /* Make the new thread ID available already now. If any of the later
668 functions fail we return an error value and the caller must not use
669 the stored thread ID. */
670 *thread = new_thread_id;
671 /* Do the cloning. We have to use two different functions depending
672 on whether we are debugging or not. */
673 err = __pthread_mgr_create_thread(new_thread, &stack_addr,
674 pthread_start_thread, prio);
677 /* Check if cloning succeeded */
679 /* Free the stack if we allocated it */
680 if (attr == NULL || !attr->__stackaddr_set)
682 #ifdef NEED_SEPARATE_REGISTER_STACK
683 size_t stacksize = ((char *)(new_thread->p_guardaddr)
684 - new_thread_bottom);
685 munmap((caddr_t)new_thread_bottom,
686 2 * stacksize + new_thread->p_guardsize);
687 #elif _STACK_GROWS_UP
689 size_t stacksize = guardaddr - stack_addr;
690 munmap(stack_addr, stacksize + guardsize);
693 size_t stacksize = guardaddr - (char *)new_thread;
694 munmap(new_thread, stacksize + guardsize);
698 size_t stacksize = stack_addr - new_thread_bottom;
700 //l4/size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
702 UNIMPL("Handle failed thread create correctly!");
703 // munmap(new_thread_bottom - guardsize, guardsize + stacksize);
707 # if defined(TLS_DTV_AT_TP)
708 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
710 _dl_deallocate_tls (new_thread, true);
712 mgr_free_utcb(new_utcb);
715 /* Insert new thread in doubly linked list of active threads */
716 new_thread->p_prevlive = __pthread_main_thread;
717 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
718 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
719 __pthread_main_thread->p_nextlive = new_thread;
720 /* Set pid field of the new thread, in case we get there before the
726 /* Try to free the resources of a thread when requested by pthread_join
727 or pthread_detach on a terminated thread. */
729 static void pthread_free(pthread_descr th)
731 pthread_handle handle;
732 pthread_readlock_info *iter, *next;
734 ASSERT(th->p_exited);
735 /* Make the handle invalid */
736 handle = thread_handle(th->p_tid);
737 __pthread_lock(handle_to_lock(handle), NULL);
738 mgr_free_utcb(handle);
739 __pthread_unlock(handle_to_lock(handle));
742 // free the semaphore and the thread
743 L4Re::Util::Auto_cap<void>::Cap s = L4::Cap<void>(th->p_thsem_cap);
744 L4Re::Util::Auto_cap<void>::Cap t = L4::Cap<void>(th->p_th_cap);
747 /* One fewer threads in __pthread_handles */
749 /* Destroy read lock list, and list of free read lock structures.
750 If the former is not empty, it means the thread exited while
751 holding read locks! */
753 for (iter = th->p_readlock_list; iter != NULL; iter = next)
755 next = iter->pr_next;
759 for (iter = th->p_readlock_free; iter != NULL; iter = next)
761 next = iter->pr_next;
765 /* If initial thread, nothing to free */
766 if (!th->p_userstack)
768 size_t guardsize = th->p_guardsize;
769 /* Free the stack and thread descriptor area */
770 char *guardaddr = (char*)th->p_guardaddr;
771 #ifdef _STACK_GROWS_UP
773 size_t stacksize = guardaddr - th->p_stackaddr;
775 size_t stacksize = guardaddr - (char *)th;
777 guardaddr = (char *)th;
779 /* Guardaddr is always set, even if guardsize is 0. This allows
780 us to compute everything else. */
782 size_t stacksize = th->p_stackaddr - guardaddr - guardsize;
784 //l4/size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
786 # ifdef NEED_SEPARATE_REGISTER_STACK
787 /* Take account of the register stack, which is below guardaddr. */
788 guardaddr -= stacksize;
792 /* Unmap the stack. */
793 L4::Cap<L4Re::Dataspace> ds;
794 L4Re::Env::env()->rm()->detach(guardaddr + guardsize, &ds);
795 L4Re::Env::env()->rm()->free_area(l4_addr_t(guardaddr));
798 // munmap(guardaddr, stacksize + guardsize);
803 # if defined(TLS_DTV_AT_TP)
804 th = (pthread_descr) ((char *) th + TLS_PRE_TCB_SIZE);
806 _dl_deallocate_tls (th, true);
810 /* Handle threads that have exited */
812 static void pthread_exited(pthread_descr th)
815 /* Remove thread from list of active threads */
816 th->p_nextlive->p_prevlive = th->p_prevlive;
817 th->p_prevlive->p_nextlive = th->p_nextlive;
818 /* Mark thread as exited, and if detached, free its resources */
819 __pthread_lock(th->p_lock, NULL);
821 /* If we have to signal this event do it now. */
822 detached = th->p_detached;
823 __pthread_unlock(th->p_lock);
826 /* If all threads have exited and the main thread is pending on a
827 pthread_exit, wake up the main thread and terminate ourselves. */
828 if (main_thread_exiting &&
829 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
830 restart(__pthread_main_thread);
831 /* Same logic as REQ_MAIN_THREAD_EXIT. */
836 /* Try to free the resources of a thread when requested by pthread_join
837 or pthread_detach on a terminated thread. */
839 static void pthread_handle_free(pthread_t th_id)
841 pthread_handle handle = thread_handle(th_id);
844 __pthread_lock(handle_to_lock(handle), NULL);
845 if (nonexisting_handle(handle, th_id)) {
846 /* pthread_reap_children has deallocated the thread already,
847 nothing needs to be done */
848 __pthread_unlock(handle_to_lock(handle));
851 th = handle_to_descr(handle);
852 __pthread_unlock(handle_to_lock(handle));
857 /* Send a signal to all running threads */
860 static void pthread_kill_all_threads(int main_thread_also)
862 UNIMPL("pthread_kill_all_threads");
865 for (th = __pthread_main_thread->p_nextlive;
866 th != __pthread_main_thread;
867 th = th->p_nextlive) {
868 kill(th->p_pid, sig);
870 if (main_thread_also) {
871 kill(__pthread_main_thread->p_pid, sig);
877 static void pthread_for_each_thread(void *arg,
878 void (*fn)(void *, pthread_descr))
882 for (th = __pthread_main_thread->p_nextlive;
883 th != __pthread_main_thread;
884 th = th->p_nextlive) {
888 fn(arg, __pthread_main_thread);
891 /* Process-wide exit() */
893 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
895 //l4/pthread_descr th;
896 __pthread_exit_requested = 1;
897 __pthread_exit_code = exitcode;
899 /* A forced asynchronous cancellation follows. Make sure we won't
900 get stuck later in the main thread with a system lock being held
901 by one of the cancelled threads. Ideally one would use the same
902 code as in pthread_atfork(), but we can't distinguish system and
903 user handlers there. */
905 /* Send the CANCEL signal to all running threads, including the main
906 thread, but excluding the thread from which the exit request originated
907 (that thread must complete the exit, e.g. calling atexit functions
908 and flushing stdio buffers). */
909 for (th = issuing_thread->p_nextlive;
910 th != issuing_thread;
911 th = th->p_nextlive) {
912 kill(th->p_pid, __pthread_sig_cancel);
914 /* Now, wait for all these threads, so that they don't become zombies
915 and their times are properly added to the thread manager's times. */
916 for (th = issuing_thread->p_nextlive;
917 th != issuing_thread;
918 th = th->p_nextlive) {
919 waitpid(th->p_pid, NULL, __WCLONE);
923 restart(issuing_thread);
924 #ifdef THIS_IS_THE_ORIGINAL
927 // we do not do the exit path with kill and waitpid, so give the code here
933 /* Handler for __pthread_sig_cancel in thread manager thread */
935 void __pthread_manager_sighandler(int sig)
937 int kick_manager = terminated_children == 0 && main_thread_exiting;
938 terminated_children = 1;
940 /* If the main thread is terminating, kick the thread manager loop
941 each time some threads terminate. This eliminates a two second
942 shutdown delay caused by the thread manager sleeping in the
943 call to __poll(). Instead, the thread manager is kicked into
944 action, reaps the outstanding threads and resumes the main thread
945 so that it can complete the shutdown. */
948 struct pthread_request request;
949 request.req_thread = 0;
950 request.req_kind = REQ_KICK;
951 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
952 (char *) &request, sizeof(request)));
956 /* Adjust priority of thread manager so that it always run at a priority
957 higher than all threads */
959 void __pthread_manager_adjust_prio(int thread_prio)
964 if (thread_prio <= manager_thread->p_priority)
967 l4_sched_param_t sp = l4_sched_param(thread_prio, 0);
968 L4Re::Env::env()->scheduler()->run_thread(L4::Cap<L4::Thread>(manager_thread->p_th_cap), sp);
969 manager_thread->p_priority = thread_prio;