1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
29 #include <locale.h> /* for __uselocale */
31 #include <l4/sys/ipc.h>
33 #include <l4/re/mem_alloc>
34 #include <l4/re/dataspace>
36 #include <l4/re/util/cap_alloc>
37 #include <l4/sys/capability>
38 #include <l4/sys/factory>
39 #include <l4/sys/scheduler>
40 #include <l4/sys/thread>
43 #include "internals.h"
46 #include "semaphore.h"
50 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
53 extern "C" void __pthread_new_thread_entry(void);
55 /* For debugging purposes put the maximum number of threads in a variable. */
56 const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
59 /* Indicate whether at least one thread has a user-defined stack (if 1),
60 or if all threads have stacks supplied by LinuxThreads (if 0). */
61 int __pthread_nonstandard_stacks;
64 /* Number of active entries in __pthread_handles (used by gdb) */
65 __volatile__ int __pthread_handles_num = 2;
67 /* Whether to use debugger additional actions for thread creation
69 __volatile__ int __pthread_threads_debug;
71 static pthread_descr manager_thread;
73 /* Mapping from stack segment to thread descriptor. */
74 /* Stack segment numbers are also indices into the __pthread_handles array. */
75 /* Stack segment number 0 is reserved for the initial thread. */
77 # define thread_segment(seq) NULL
79 /* Flag set in signal handler to record child termination */
81 static __volatile__ int terminated_children;
83 /* Flag set when the initial thread is blocked on pthread_exit waiting
84 for all other threads to terminate */
86 static int main_thread_exiting;
88 /* Counter used to generate unique thread identifier.
89 Thread identifier is pthread_threads_counter + segment. */
91 //l4/static pthread_t pthread_threads_counter;
93 /* Forward declarations */
95 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
96 void * (*start_routine)(void *), void *arg);
97 static void pthread_handle_free(pthread_t th_id);
98 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
99 __attribute__ ((noreturn));
100 //l4/static void pthread_kill_all_threads(int main_thread_also);
101 static void pthread_for_each_thread(void *arg,
102 void (*fn)(void *, pthread_descr));
104 /* The server thread managing requests for thread creation and termination */
107 __attribute__ ((noreturn))
108 __pthread_manager(void *arg)
110 pthread_descr self = manager_thread = (pthread_descr)arg;
111 struct pthread_request request;
113 /* If we have special thread_self processing, initialize it. */
114 #ifdef INIT_THREAD_SELF
115 INIT_THREAD_SELF(self, 1);
117 #if !(USE_TLS && HAVE___THREAD)
118 /* Set the error variable. */
119 self->p_errnop = &self->p_errno;
120 self->p_h_errnop = &self->p_h_errno;
122 /* Raise our priority to match that of main thread */
123 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
126 l4_msgtag_t tag = l4_msgtag(0,0,0,0);
128 /* Enter server loop */
132 tag = l4_ipc_reply_and_wait(l4_utcb(), tag, &src, L4_IPC_NEVER);
134 tag = l4_ipc_wait(l4_utcb(), &src, L4_IPC_NEVER);
136 if (l4_msgtag_has_error(tag))
142 memcpy(&request, l4_utcb_mr()->mr, sizeof(request));
145 switch(request.req_kind)
148 request.req_thread->p_retcode =
149 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
150 request.req_args.create.attr,
151 request.req_args.create.fn,
152 request.req_args.create.arg);
156 pthread_handle_free(request.req_args.free.thread_id);
158 case REQ_PROCESS_EXIT:
159 pthread_handle_exit(request.req_thread,
160 request.req_args.exit.code);
163 case REQ_MAIN_THREAD_EXIT:
164 main_thread_exiting = 1;
165 /* Reap children in case all other threads died and the signal handler
166 went off before we set main_thread_exiting to 1, and therefore did
168 //l4/pthread_reap_children();
170 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
171 restart(__pthread_main_thread);
172 /* The main thread will now call exit() which will trigger an
173 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
174 to the thread manager. In case you are wondering how the
175 manager terminates from its loop here. */
179 sem_post((sem_t*)request.req_args.post);
183 /* Make gdb aware of new thread and gdb will restart the
184 new thread when it is ready to handle the new thread. */
185 if (__pthread_threads_debug && __pthread_sig_debug > 0)
186 raise(__pthread_sig_debug);
192 /* This is just a prod to get the manager to reap some
193 threads right away, avoiding a potential delay at shutdown. */
195 case REQ_FOR_EACH_THREAD:
196 pthread_for_each_thread(request.req_args.for_each.arg,
197 request.req_args.for_each.fn);
198 restart(request.req_thread);
202 tag = l4_msgtag(0,0,0,0);
206 int __pthread_manager_event(void *arg)
208 pthread_descr self = (pthread_descr)arg;
209 /* If we have special thread_self processing, initialize it. */
210 #ifdef INIT_THREAD_SELF
211 INIT_THREAD_SELF(self, 1);
214 /* Get the lock the manager will free once all is correctly set up. */
215 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
216 /* Free it immediately. */
217 __pthread_unlock (THREAD_GETMEM(self, p_lock));
219 return __pthread_manager(arg);
222 /* Process creation */
225 __attribute__ ((noreturn))
226 pthread_start_thread(void *arg)
228 pthread_descr self = (pthread_descr) arg;
230 struct pthread_request request;
234 hp_timing_t tmpclock;
236 /* Initialize special thread_self processing, if any. */
237 #ifdef INIT_THREAD_SELF
238 INIT_THREAD_SELF(self, self->p_nr);
241 HP_TIMING_NOW (tmpclock);
242 THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
246 /* Set the scheduling policy and priority for the new thread, if needed */
247 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
248 /* Explicit scheduling attributes were provided: apply them */
249 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
250 THREAD_GETMEM(self, p_start_args.schedpolicy),
251 &self->p_start_args.schedparam);
252 else if (manager_thread->p_priority > 0)
253 /* Default scheduling required, but thread manager runs in realtime
254 scheduling: switch new thread to SCHED_OTHER policy */
256 struct sched_param default_params;
257 default_params.sched_priority = 0;
258 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
259 SCHED_OTHER, &default_params);
261 #if !(USE_TLS && HAVE___THREAD)
262 /* Initialize thread-locale current locale to point to the global one.
263 With __thread support, the variable's initializer takes care of this. */
264 __uselocale (LC_GLOBAL_LOCALE);
266 /* Initialize __resp. */
267 __resp = &self->p_res;
269 /* Make gdb aware of new thread */
270 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
271 request.req_thread = self;
272 request.req_kind = REQ_DEBUG;
273 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
274 (char *) &request, sizeof(request)));
278 /* Run the thread code */
279 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
281 /* Exit with the given return value */
282 __pthread_do_exit(outcome, (char *)CURRENT_STACK_FRAME);
287 __attribute__ ((noreturn))
288 pthread_start_thread_event(void *arg)
290 pthread_descr self = (pthread_descr) arg;
292 #ifdef INIT_THREAD_SELF
293 INIT_THREAD_SELF(self, self->p_nr);
295 /* Make sure our pid field is initialized, just in case we get there
296 before our father has initialized it. */
297 THREAD_SETMEM(self, p_pid, __getpid());
298 /* Get the lock the manager will free once all is correctly set up. */
299 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
300 /* Free it immediately. */
301 __pthread_unlock (THREAD_GETMEM(self, p_lock));
303 /* Continue with the real function. */
304 pthread_start_thread (arg);
308 static int pthread_allocate_stack(const pthread_attr_t *attr,
309 pthread_descr default_new_thread,
311 char ** out_new_thread,
312 char ** out_new_thread_bottom,
313 char ** out_guardaddr,
314 size_t * out_guardsize,
315 size_t * out_stacksize)
317 pthread_descr new_thread;
318 char * new_thread_bottom;
320 size_t stacksize, guardsize;
323 /* TLS cannot work with fixed thread descriptor addresses. */
324 assert (default_new_thread == NULL);
327 if (attr != NULL && attr->__stackaddr_set)
329 #ifdef _STACK_GROWS_UP
330 /* The user provided a stack. */
332 /* This value is not needed. */
333 new_thread = (pthread_descr) attr->__stackaddr;
334 new_thread_bottom = (char *) new_thread;
336 new_thread = (pthread_descr) attr->__stackaddr;
337 new_thread_bottom = (char *) (new_thread + 1);
339 guardaddr = attr->__stackaddr + attr->__stacksize;
342 /* The user provided a stack. For now we interpret the supplied
343 address as 1 + the highest addr. in the stack segment. If a
344 separate register stack is needed, we place it at the low end
345 of the segment, relying on the associated stacksize to
346 determine the low end of the segment. This differs from many
347 (but not all) other pthreads implementations. The intent is
348 that on machines with a single stack growing toward higher
349 addresses, stackaddr would be the lowest address in the stack
350 segment, so that it is consistently close to the initial sp
353 new_thread = (pthread_descr) attr->__stackaddr;
356 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
358 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
359 guardaddr = new_thread_bottom;
363 __pthread_nonstandard_stacks = 1;
366 /* Clear the thread data structure. */
367 memset (new_thread, '\0', sizeof (*new_thread));
369 stacksize = attr->__stacksize;
373 const size_t granularity = pagesize;
376 /* Allocate space for stack and thread descriptor at default address */
379 guardsize = page_roundup (attr->__guardsize, granularity);
380 stacksize = __pthread_max_stacksize - guardsize;
381 stacksize = MIN (stacksize,
382 page_roundup (attr->__stacksize, granularity));
386 guardsize = granularity;
387 stacksize = __pthread_default_stacksize - guardsize;
391 L4Re::Env const *e = L4Re::Env::env();
394 if (e->rm()->reserve_area(&map_addr, stacksize + guardsize,
395 L4Re::Rm::Search_addr) < 0)
398 guardaddr = (char*)map_addr;
400 L4::Cap<L4Re::Dataspace> ds = L4Re::Util::cap_alloc.alloc<L4Re::Dataspace>();
404 err = e->mem_alloc()->alloc(stacksize, ds);
408 L4Re::Util::cap_alloc.free(ds);
409 e->rm()->free_area(l4_addr_t(map_addr));
413 new_thread_bottom = (char *) map_addr + guardsize;
414 err = e->rm()->attach(&new_thread_bottom, stacksize, L4Re::Rm::In_area,
419 L4Re::Util::cap_alloc.free(ds);
420 e->rm()->free_area(l4_addr_t(map_addr));
425 new_thread = ((pthread_descr) (new_thread_bottom + stacksize));
427 new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
430 *out_new_thread = (char *) new_thread;
431 *out_new_thread_bottom = new_thread_bottom;
432 *out_guardaddr = guardaddr;
433 *out_guardsize = guardsize;
434 *out_stacksize = stacksize;
439 l4_utcb_t *__pthread_utcb_for_thread(int nr)
441 using namespace L4Re;
442 Env const *e = Env::env();
443 return (l4_utcb_t*)((char *)e->first_free_utcb() + nr * L4_UTCB_OFFSET);
447 int __pthread_mgr_create_thread(pthread_descr thread, char **tos,
448 int (*f)(void*), int prio)
450 using namespace L4Re;
451 Env const *e = Env::env();
452 L4Re::Util::Auto_cap<L4::Thread>::Cap _t = L4Re::Util::cap_alloc.alloc<L4::Thread>();
456 L4Re::Util::Auto_cap<Th_sem_cap>::Cap th_sem
457 = L4Re::Util::cap_alloc.alloc<Th_sem_cap>();
458 if (!th_sem.is_valid())
461 int err = l4_error(e->factory()->create_thread(_t.get()));
465 // needed by __alloc_thread_sem
466 thread->p_th_cap = _t.cap();
468 err = __alloc_thread_sem(thread, th_sem.get());
472 thread->p_thsem_cap = th_sem.cap();
474 L4::Thread::Attr attr;
476 l4_utcb_t *nt_utcb = (l4_utcb_t*)thread->p_tid;
477 attr.bind(nt_utcb, L4Re::This_task);
479 attr.exc_handler(e->rm());
482 l4_utcb_tcr_u(nt_utcb)->user[0] = l4_addr_t(thread);
485 l4_umword_t *&_tos = (l4_umword_t*&)*tos;
487 *(--_tos) = l4_addr_t(thread);
488 *(--_tos) = 0; /* ret addr */
489 *(--_tos) = l4_addr_t(f);
492 _t->ex_regs(l4_addr_t(__pthread_new_thread_entry), l4_addr_t(_tos), 0);
494 l4_sched_param_t sp = l4_sched_param(prio >= 0 ? prio : 2);
495 e->scheduler()->run_thread(_t.get(), sp);
497 // release the automatic capabilities
503 static inline l4_utcb_t *mgr_alloc_utcb()
505 l4_utcb_t *new_utcb = __pthread_first_free_handle;
509 __pthread_first_free_handle = (l4_utcb_t*)l4_utcb_tcr_u(new_utcb)->user[0];
513 static inline void mgr_free_utcb(l4_utcb_t *u)
518 l4_utcb_tcr_u(u)->user[0] = l4_addr_t(__pthread_first_free_handle);
519 __pthread_first_free_handle = u;
522 int __pthread_start_manager(pthread_descr mgr)
526 mgr->p_tid = mgr_alloc_utcb();
528 err = __pthread_mgr_create_thread(mgr, &__pthread_manager_thread_tos,
529 __pthread_manager, -1);
532 fprintf(stderr, "ERROR: could not start pthread manager thread\n");
536 __pthread_manager_request = mgr->p_th_cap;
541 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
542 void * (*start_routine)(void *), void *arg)
545 pthread_descr new_thread;
547 char * new_thread_bottom;
548 pthread_t new_thread_id;
549 char *guardaddr = NULL;
550 size_t guardsize = 0, stksize = 0;
551 int pagesize = L4_PAGESIZE;
555 new_thread = _dl_allocate_tls (NULL);
556 if (new_thread == NULL)
558 # if defined(TLS_DTV_AT_TP)
559 /* pthread_descr is below TP. */
560 new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
563 /* Prevent warnings. */
566 #ifdef __NOT_FOR_L4__
567 /* First check whether we have to change the policy and if yes, whether
568 we can do this. Normally this should be done by examining the
569 return value of the __sched_setscheduler call in pthread_start_thread
570 but this is hard to implement. FIXME */
571 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
574 /* Find a free segment for the thread, and allocate a stack if needed */
576 if (__pthread_first_free_handle == 0)
579 # if defined(TLS_DTV_AT_TP)
580 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
582 _dl_deallocate_tls (new_thread, true);
587 l4_utcb_t *new_utcb = mgr_alloc_utcb();
588 new_thread_id = new_utcb;
590 if (pthread_allocate_stack(attr, thread_segment(sseg),
591 pagesize, &stack_addr, &new_thread_bottom,
592 &guardaddr, &guardsize, &stksize) == 0)
595 new_thread->p_stackaddr = stack_addr;
597 new_thread = (pthread_descr) stack_addr;
602 mgr_free_utcb(new_utcb);
606 /* Allocate new thread identifier */
607 /* Initialize the thread descriptor. Elements which have to be
608 initialized to zero already have this value. */
609 #if !defined USE_TLS || !TLS_DTV_AT_TP
610 new_thread->p_header.data.tcb = new_thread;
611 new_thread->p_header.data.self = new_thread;
613 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
614 new_thread->p_multiple_threads = 1;
616 new_thread->p_tid = new_thread_id;
617 new_thread->p_lock = handle_to_lock(new_utcb);
618 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
619 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
620 #if !(USE_TLS && HAVE___THREAD)
621 new_thread->p_errnop = &new_thread->p_errno;
622 new_thread->p_h_errnop = &new_thread->p_h_errno;
624 new_thread->p_guardaddr = guardaddr;
625 new_thread->p_guardsize = guardsize;
626 new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
627 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
628 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
629 /* Initialize the thread handle */
630 __pthread_init_lock(handle_to_lock(new_utcb));
631 /* Determine scheduling parameters for the thread */
632 new_thread->p_sched_policy = -1;
635 new_thread->p_detached = attr->__detachstate;
636 new_thread->p_userstack = attr->__stackaddr_set;
638 switch(attr->__inheritsched)
640 case PTHREAD_EXPLICIT_SCHED:
641 new_thread->p_sched_policy = attr->__schedpolicy;
642 new_thread->p_priority = attr->__schedparam.sched_priority;
644 case PTHREAD_INHERIT_SCHED:
649 /* Set the scheduling policy and priority for the new thread, if needed */
650 if (new_thread->p_sched_policy >= 0)
652 /* Explicit scheduling attributes were provided: apply them */
653 prio = __pthread_l4_getprio(new_thread->p_sched_policy,
654 new_thread->p_priority);
655 /* Raise priority of thread manager if needed */
656 __pthread_manager_adjust_prio(prio);
658 else if (manager_thread->p_sched_policy > 3)
660 /* Default scheduling required, but thread manager runs in realtime
661 scheduling: switch new thread to SCHED_OTHER policy */
662 prio = __pthread_l4_getprio(SCHED_OTHER, 0);
664 /* Finish setting up arguments to pthread_start_thread */
665 new_thread->p_start_args.start_routine = start_routine;
666 new_thread->p_start_args.arg = arg;
667 /* Make the new thread ID available already now. If any of the later
668 functions fail we return an error value and the caller must not use
669 the stored thread ID. */
670 *thread = new_thread_id;
671 /* Do the cloning. We have to use two different functions depending
672 on whether we are debugging or not. */
673 err = __pthread_mgr_create_thread(new_thread, &stack_addr,
674 pthread_start_thread, prio);
677 /* Check if cloning succeeded */
679 /* Free the stack if we allocated it */
680 if (attr == NULL || !attr->__stackaddr_set)
682 #ifdef NEED_SEPARATE_REGISTER_STACK
683 size_t stacksize = ((char *)(new_thread->p_guardaddr)
684 - new_thread_bottom);
685 munmap((caddr_t)new_thread_bottom,
686 2 * stacksize + new_thread->p_guardsize);
687 #elif _STACK_GROWS_UP
689 size_t stacksize = guardaddr - stack_addr;
690 munmap(stack_addr, stacksize + guardsize);
693 size_t stacksize = guardaddr - (char *)new_thread;
694 munmap(new_thread, stacksize + guardsize);
698 size_t stacksize = stack_addr - new_thread_bottom;
700 //l4/size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
702 UNIMPL("Handle failed thread create correctly!");
703 // munmap(new_thread_bottom - guardsize, guardsize + stacksize);
707 # if defined(TLS_DTV_AT_TP)
708 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
710 _dl_deallocate_tls (new_thread, true);
712 mgr_free_utcb(new_utcb);
715 /* Insert new thread in doubly linked list of active threads */
716 new_thread->p_prevlive = __pthread_main_thread;
717 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
718 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
719 __pthread_main_thread->p_nextlive = new_thread;
720 /* Set pid field of the new thread, in case we get there before the
726 /* Try to free the resources of a thread when requested by pthread_join
727 or pthread_detach on a terminated thread. */
729 static void pthread_free(pthread_descr th)
731 pthread_handle handle;
732 pthread_readlock_info *iter, *next;
734 ASSERT(th->p_exited);
735 /* Make the handle invalid */
736 handle = thread_handle(th->p_tid);
737 __pthread_lock(handle_to_lock(handle), NULL);
738 mgr_free_utcb(handle);
739 __pthread_unlock(handle_to_lock(handle));
742 // free the semaphore and the thread
743 L4Re::Util::Auto_cap<void>::Cap s = L4::Cap<void>(th->p_thsem_cap);
744 L4Re::Util::Auto_cap<void>::Cap t = L4::Cap<void>(th->p_th_cap);
747 /* One fewer threads in __pthread_handles */
749 /* Destroy read lock list, and list of free read lock structures.
750 If the former is not empty, it means the thread exited while
751 holding read locks! */
753 for (iter = th->p_readlock_list; iter != NULL; iter = next)
755 next = iter->pr_next;
759 for (iter = th->p_readlock_free; iter != NULL; iter = next)
761 next = iter->pr_next;
765 /* If initial thread, nothing to free */
766 if (!th->p_userstack)
768 size_t guardsize = th->p_guardsize;
769 /* Free the stack and thread descriptor area */
770 char *guardaddr = (char*)th->p_guardaddr;
771 #ifdef _STACK_GROWS_UP
773 size_t stacksize = guardaddr - th->p_stackaddr;
775 size_t stacksize = guardaddr - (char *)th;
777 guardaddr = (char *)th;
779 /* Guardaddr is always set, even if guardsize is 0. This allows
780 us to compute everything else. */
782 size_t stacksize = th->p_stackaddr - guardaddr - guardsize;
784 //l4/size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
786 # ifdef NEED_SEPARATE_REGISTER_STACK
787 /* Take account of the register stack, which is below guardaddr. */
788 guardaddr -= stacksize;
792 /* Unmap the stack. */
793 L4::Cap<L4Re::Dataspace> ds;
794 L4Re::Env::env()->rm()->detach(guardaddr + guardsize, &ds);
795 L4Re::Env::env()->rm()->free_area(l4_addr_t(guardaddr));
798 // munmap(guardaddr, stacksize + guardsize);
803 # if defined(TLS_DTV_AT_TP)
804 th = (pthread_descr) ((char *) th + TLS_PRE_TCB_SIZE);
806 _dl_deallocate_tls (th, true);
810 /* Handle threads that have exited */
812 static void pthread_exited(pthread_descr th)
815 /* Remove thread from list of active threads */
816 th->p_nextlive->p_prevlive = th->p_prevlive;
817 th->p_prevlive->p_nextlive = th->p_nextlive;
818 /* Mark thread as exited, and if detached, free its resources */
819 __pthread_lock(th->p_lock, NULL);
821 /* If we have to signal this event do it now. */
822 detached = th->p_detached;
823 __pthread_unlock(th->p_lock);
826 /* If all threads have exited and the main thread is pending on a
827 pthread_exit, wake up the main thread and terminate ourselves. */
828 if (main_thread_exiting &&
829 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
830 restart(__pthread_main_thread);
831 /* Same logic as REQ_MAIN_THREAD_EXIT. */
836 /* Try to free the resources of a thread when requested by pthread_join
837 or pthread_detach on a terminated thread. */
839 static void pthread_handle_free(pthread_t th_id)
841 pthread_handle handle = thread_handle(th_id);
844 __pthread_lock(handle_to_lock(handle), NULL);
845 if (nonexisting_handle(handle, th_id)) {
846 /* pthread_reap_children has deallocated the thread already,
847 nothing needs to be done */
848 __pthread_unlock(handle_to_lock(handle));
851 th = handle_to_descr(handle);
852 __pthread_unlock(handle_to_lock(handle));
857 /* Send a signal to all running threads */
860 static void pthread_kill_all_threads(int main_thread_also)
862 UNIMPL("pthread_kill_all_threads");
865 for (th = __pthread_main_thread->p_nextlive;
866 th != __pthread_main_thread;
867 th = th->p_nextlive) {
868 kill(th->p_pid, sig);
870 if (main_thread_also) {
871 kill(__pthread_main_thread->p_pid, sig);
877 static void pthread_for_each_thread(void *arg,
878 void (*fn)(void *, pthread_descr))
882 for (th = __pthread_main_thread->p_nextlive;
883 th != __pthread_main_thread;
884 th = th->p_nextlive) {
888 fn(arg, __pthread_main_thread);
891 /* Process-wide exit() */
893 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
895 //l4/pthread_descr th;
896 __pthread_exit_requested = 1;
897 __pthread_exit_code = exitcode;
899 /* A forced asynchronous cancellation follows. Make sure we won't
900 get stuck later in the main thread with a system lock being held
901 by one of the cancelled threads. Ideally one would use the same
902 code as in pthread_atfork(), but we can't distinguish system and
903 user handlers there. */
905 /* Send the CANCEL signal to all running threads, including the main
906 thread, but excluding the thread from which the exit request originated
907 (that thread must complete the exit, e.g. calling atexit functions
908 and flushing stdio buffers). */
909 for (th = issuing_thread->p_nextlive;
910 th != issuing_thread;
911 th = th->p_nextlive) {
912 kill(th->p_pid, __pthread_sig_cancel);
914 /* Now, wait for all these threads, so that they don't become zombies
915 and their times are properly added to the thread manager's times. */
916 for (th = issuing_thread->p_nextlive;
917 th != issuing_thread;
918 th = th->p_nextlive) {
919 waitpid(th->p_pid, NULL, __WCLONE);
923 restart(issuing_thread);
924 #ifdef THIS_IS_THE_ORIGINAL
927 // we do not do the exit path with kill and waitpid, so give the code here
933 /* Handler for __pthread_sig_cancel in thread manager thread */
935 void __pthread_manager_sighandler(int sig)
937 int kick_manager = terminated_children == 0 && main_thread_exiting;
938 terminated_children = 1;
940 /* If the main thread is terminating, kick the thread manager loop
941 each time some threads terminate. This eliminates a two second
942 shutdown delay caused by the thread manager sleeping in the
943 call to __poll(). Instead, the thread manager is kicked into
944 action, reaps the outstanding threads and resumes the main thread
945 so that it can complete the shutdown. */
948 struct pthread_request request;
949 request.req_thread = 0;
950 request.req_kind = REQ_KICK;
951 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
952 (char *) &request, sizeof(request)));
956 /* Adjust priority of thread manager so that it always run at a priority
957 higher than all threads */
959 void __pthread_manager_adjust_prio(int thread_prio)
964 if (thread_prio <= manager_thread->p_priority)
967 l4_sched_param_t sp = l4_sched_param(thread_prio, 0);
968 L4Re::Env::env()->scheduler()->run_thread(L4::Cap<L4::Thread>(manager_thread->p_th_cap), sp);
969 manager_thread->p_priority = thread_prio;