2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
29 #include <l4/re/env.h>
32 #include "internals.h"
36 #include <not-cancel.h>
41 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
42 # error "This must not happen"
45 /* mods for uClibc: __libc_sigaction is not in any standard headers */
46 extern __typeof(sigaction) __libc_sigaction;
48 #if !(USE_TLS && HAVE___THREAD)
49 /* These variables are used by the setup code. */
53 # if defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__
54 /* We need the global/static resolver state here. */
57 extern struct __res_state *__resp;
63 /* We need only a few variables. */
64 #define manager_thread __pthread_manager_threadp
65 pthread_descr __pthread_manager_threadp attribute_hidden;
69 /* Descriptor of the initial thread */
71 struct _pthread_descr_struct __pthread_initial_thread = {
72 .p_header.data.self = &__pthread_initial_thread,
73 .p_nextlive = &__pthread_initial_thread,
74 .p_prevlive = &__pthread_initial_thread,
76 .p_tid = PTHREAD_THREADS_MAX,
77 .p_lock = &__pthread_handles[0].h_lock,
79 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
80 #if !(USE_TLS && HAVE___THREAD)
82 .p_h_errnop = &_h_errno,
85 .p_resume_count = __ATOMIC_INITIALIZER,
86 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
89 /* Descriptor of the manager thread; none of this is used but the error
90 variables, the p_pid and p_priority fields,
91 and the address for identification. */
93 #define manager_thread (&__pthread_manager_thread)
94 struct _pthread_descr_struct __pthread_manager_thread = {
95 .p_header.data.self = &__pthread_manager_thread,
96 .p_header.data.multiple_threads = 1,
98 .p_lock = &__pthread_handles[1].h_lock,
100 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
101 #if !(USE_TLS && HAVE___THREAD)
102 .p_errnop = &__pthread_manager_thread.p_errno,
107 .p_resume_count = __ATOMIC_INITIALIZER,
108 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
112 /* Pointer to the main thread (the father of the thread manager thread) */
113 /* Originally, this is the initial thread, but this changes after fork() */
116 pthread_descr __pthread_main_thread;
118 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
121 /* Limit between the stack of the initial thread (above) and the
122 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
124 char *__pthread_initial_thread_bos;
126 /* File descriptor for sending requests to the thread manager. */
127 /* Initially -1, meaning that the thread manager is not running. */
129 l4_cap_idx_t __pthread_manager_request = L4_INVALID_CAP;
131 int __pthread_multiple_threads attribute_hidden;
133 /* Other end of the pipe for sending requests to the thread manager. */
135 int __pthread_manager_reader;
137 /* Limits of the thread manager stack */
139 char *__pthread_manager_thread_bos;
140 char *__pthread_manager_thread_tos;
142 /* For process-wide exit() */
144 int __pthread_exit_requested;
145 int __pthread_exit_code;
147 /* Maximum stack size. */
148 size_t __pthread_max_stacksize;
150 /* Nozero if the machine has more than one processor. */
151 int __pthread_smp_kernel;
154 #if !__ASSUME_REALTIME_SIGNALS
155 /* Pointers that select new or old suspend/resume functions
156 based on availability of rt signals. */
159 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
160 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
161 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
163 #endif /* __ASSUME_REALTIME_SIGNALS */
165 /* Communicate relevant LinuxThreads constants to gdb */
168 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
169 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
170 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
172 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
175 const int __linuxthreads_pthread_sizeof_descr
176 = sizeof(struct _pthread_descr_struct);
178 const int __linuxthreads_initial_report_events;
180 const char __linuxthreads_version[] = VERSION;
182 /* Forward declarations */
184 static void pthread_onexit_process(int retcode, void *arg);
185 #ifndef HAVE_Z_NODELETE
186 static void pthread_atexit_process(void *arg, int retcode);
187 static void pthread_atexit_retcode(void *arg, int retcode);
189 static void pthread_handle_sigcancel(int sig);
190 static void pthread_handle_sigrestart(int sig);
191 static void pthread_handle_sigdebug(int sig);
193 /* Signal numbers used for the communication.
194 In these variables we keep track of the used variables. If the
195 platform does not support any real-time signals we will define the
196 values to some unreasonable value which will signal failing of all
197 the functions below. */
198 int __pthread_sig_restart = __SIGRTMIN;
199 int __pthread_sig_cancel = __SIGRTMIN + 1;
200 int __pthread_sig_debug = __SIGRTMIN + 2;
202 extern int __libc_current_sigrtmin_private (void);
205 #if !__ASSUME_REALTIME_SIGNALS
206 static int rtsigs_initialized;
211 if (rtsigs_initialized)
214 if (__libc_current_sigrtmin_private () == -1)
216 __pthread_sig_restart = SIGUSR1;
217 __pthread_sig_cancel = SIGUSR2;
218 __pthread_sig_debug = 0;
222 __pthread_restart = __pthread_restart_new;
223 __pthread_suspend = __pthread_wait_for_restart_signal;
224 __pthread_timedsuspend = __pthread_timedsuspend_new;
227 rtsigs_initialized = 1;
233 /* Initialize the pthread library.
234 Initialization is split in two functions:
235 - a constructor function that blocks the __pthread_sig_restart signal
236 (must do this very early, since the program could capture the signal
237 mask with e.g. sigsetjmp before creating the first thread);
238 - a regular function called from pthread_create when needed. */
240 static void pthread_initialize(void) __attribute__((constructor));
242 #ifndef HAVE_Z_NODELETE
243 extern void *__dso_handle __attribute__ ((weak));
247 #if defined USE_TLS && !defined SHARED
248 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
251 struct pthread_functions __pthread_functions =
253 #if !(USE_TLS && HAVE___THREAD)
254 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
255 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
256 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
258 #ifdef __NOT_FOR_L4__
259 .ptr_pthread_fork = __pthread_fork,
261 .ptr_pthread_fork = NULL,
263 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
264 .ptr_pthread_attr_init = __pthread_attr_init,
265 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
266 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
267 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
268 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
269 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
270 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
271 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
272 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
273 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
274 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
275 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
276 .ptr_pthread_condattr_init = __pthread_condattr_init,
277 .ptr_pthread_cond_broadcast = __pthread_cond_broadcast,
278 .ptr_pthread_cond_destroy = __pthread_cond_destroy,
279 .ptr_pthread_cond_init = __pthread_cond_init,
280 .ptr_pthread_cond_signal = __pthread_cond_signal,
281 .ptr_pthread_cond_wait = __pthread_cond_wait,
282 .ptr_pthread_cond_timedwait = __pthread_cond_timedwait,
283 .ptr_pthread_equal = __pthread_equal,
284 .ptr___pthread_exit = __pthread_exit,
285 .ptr_pthread_getschedparam = __pthread_getschedparam,
286 .ptr_pthread_setschedparam = __pthread_setschedparam,
287 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
288 .ptr_pthread_mutex_init = __pthread_mutex_init,
289 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
290 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
291 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
292 .ptr_pthread_self = __pthread_self,
293 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
294 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
295 .ptr_pthread_do_exit = __pthread_do_exit,
296 .ptr_pthread_thread_self = __pthread_thread_self,
297 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
298 #ifdef __NOT_FOR_L4__
299 .ptr_pthread_sigaction = __pthread_sigaction,
300 .ptr_pthread_sigwait = __pthread_sigwait,
301 .ptr_pthread_raise = __pthread_raise,
303 .ptr_pthread_sigaction = NULL,
304 .ptr_pthread_sigwait = NULL,
305 .ptr_pthread_raise = NULL,
307 .ptr__pthread_cleanup_push = _pthread_cleanup_push,
308 .ptr__pthread_cleanup_push_defer = _pthread_cleanup_push_defer,
309 .ptr__pthread_cleanup_pop = _pthread_cleanup_pop,
310 .ptr__pthread_cleanup_pop_restore = _pthread_cleanup_pop_restore,
313 # define ptr_pthread_functions &__pthread_functions
315 # define ptr_pthread_functions NULL
318 static int *__libc_multiple_threads_ptr;
319 l4_utcb_t *__pthread_first_free_handle attribute_hidden;
322 __l4_add_utcbs(l4_addr_t start, l4_addr_t utcbs_end)
324 l4_addr_t free_utcb = start;
326 l4_utcb_t **last_free = &__pthread_first_free_handle;
327 while ((l4_addr_t)free_utcb + L4_UTCB_OFFSET <= utcbs_end)
329 l4_utcb_t *u = (l4_utcb_t*)free_utcb;
330 l4_thread_regs_t *tcr = l4_utcb_tcr_u(u);
332 __pthread_init_lock(handle_to_lock(u));
334 last_free = (l4_utcb_t**)(&tcr->user[0]);
335 free_utcb += L4_UTCB_OFFSET;
340 /* Do some minimal initialization which has to be done during the
341 startup of the C library. */
343 __pthread_initialize_minimal(void)
345 static int initialized;
351 /* initialize free list */
352 l4_fpage_t utcb_area = l4re_env()->utcb_area;
353 l4_addr_t free_utcb = l4re_env()->first_free_utcb;
354 l4_addr_t utcbs_end = ((l4_addr_t)l4_fpage_page(utcb_area) << 12UL)
355 + (1UL << (l4_addr_t)l4_fpage_size(utcb_area));
356 __l4_add_utcbs(free_utcb, utcbs_end);
357 /* All in the free pool now so indicate that first_free_utcb not available
359 l4re_env()->first_free_utcb = ~0UL;
361 __pthread_init_lock(handle_to_lock(l4_utcb()));
366 /* First of all init __pthread_handles[0] and [1] if needed. */
367 # if __LT_SPINLOCK_INIT != 0
368 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
369 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
372 /* Unlike in the dynamically linked case the dynamic linker has not
373 taken care of initializing the TLS data structures. */
374 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
376 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
380 /* There is no actual TLS being used, so the thread register
381 was not initialized in the dynamic linker. */
383 /* We need to install special hooks so that the malloc and memalign
384 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
385 malloc initialization that will try to set up its thread state. */
387 extern void __libc_malloc_pthread_startup (bool first_time);
388 __libc_malloc_pthread_startup (true);
390 if (__builtin_expect (_dl_tls_setup (), 0)
391 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
393 static const char msg[] = "\
394 cannot allocate TLS data structures for initial thread\n";
395 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
396 msg, sizeof msg - 1));
399 const char *lossage = TLS_INIT_TP (tcbp, 0);
400 if (__builtin_expect (lossage != NULL, 0))
402 static const char msg[] = "cannot set up thread-local storage: ";
403 const char nl = '\n';
404 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
405 msg, sizeof msg - 1));
406 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
407 lossage, strlen (lossage)));
408 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
411 /* Though it was allocated with libc's malloc, that was done without
412 the user's __malloc_hook installed. A later realloc that uses
413 the hooks might not work with that block from the plain malloc.
414 So we record this block as unfreeable just as the dynamic linker
415 does when it allocates the DTV before the libc malloc exists. */
416 GL(dl_initial_dtv) = GET_DTV (tcbp);
418 __libc_malloc_pthread_startup (false);
424 /* The memory for the thread descriptor was allocated elsewhere as
425 part of the TLS allocation. We have to initialize the data
426 structure by hand. This initialization must mirror the struct
428 self->p_nextlive = self->p_prevlive = self;
429 #if defined NOT_FOR_L4
430 self->p_tid = PTHREAD_THREADS_MAX;
431 self->p_lock = &__pthread_handles[0].h_lock;
433 # ifndef HAVE___THREAD
434 self->p_errnop = &_errno;
435 self->p_h_errnop = &_h_errno;
437 /* self->p_start_args need not be initialized, it's all zero. */
438 self->p_userstack = 1;
439 # if __LT_SPINLOCK_INIT != 0
440 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
442 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
444 /* Another variable which points to the thread descriptor. */
445 __pthread_main_thread = self;
447 /* And fill in the pointer the the thread __pthread_handles array. */
449 __pthread_handles[0].h_descr = self;
454 /* First of all init __pthread_handles[0] and [1]. */
455 # if __LT_SPINLOCK_INIT != 0
456 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
457 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
460 __pthread_handles[0].h_descr = &__pthread_initial_thread;
461 __pthread_handles[1].h_descr = &__pthread_manager_thread;
464 /* If we have special thread_self processing, initialize that for the
466 # ifdef INIT_THREAD_SELF
467 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
473 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
475 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
480 if (__pthread_l4_initialize_main_thread(self))
482 if (__pthread_l4_initialize_main_thread(&__pthread_initial_thread))
486 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
491 __pthread_init_max_stacksize(void)
497 getrlimit(RLIMIT_STACK, &limit);
498 #ifdef FLOATING_STACKS
499 if (limit.rlim_cur == RLIM_INFINITY)
500 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
501 # ifdef NEED_SEPARATE_REGISTER_STACK
502 max_stack = limit.rlim_cur / 2;
504 max_stack = limit.rlim_cur;
507 /* Play with the stack size limit to make sure that no stack ever grows
508 beyond STACK_SIZE minus one page (to act as a guard page). */
509 # ifdef NEED_SEPARATE_REGISTER_STACK
510 /* STACK_SIZE bytes hold both the main stack and register backing
511 store. The rlimit value applies to each individually. */
512 max_stack = STACK_SIZE/2 - __getpagesize ();
514 max_stack = STACK_SIZE - __getpagesize();
516 if (limit.rlim_cur > max_stack) {
517 limit.rlim_cur = max_stack;
518 setrlimit(RLIMIT_STACK, &limit);
524 max_stack = STACK_SIZE - L4_PAGESIZE;
526 __pthread_max_stacksize = max_stack;
527 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
530 pthread_descr self = THREAD_SELF;
531 self->p_alloca_cutoff = max_stack / 4;
533 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
538 /* psm: we do not have any ld.so support yet
539 * remove the USE_TLS guard if nptl is added */
540 #if defined SHARED && defined USE_TLS
542 /* When using __thread for this, we do it in libc so as not
543 to give libpthread its own TLS segment just for this. */
544 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
546 static void ** __attribute__ ((const))
547 __libc_dl_error_tsd (void)
549 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
555 static __inline__ void __attribute__((always_inline))
556 init_one_static_tls (pthread_descr descr, struct link_map *map)
558 # if defined(TLS_TCB_AT_TP)
559 dtv_t *dtv = GET_DTV (descr);
560 void *dest = (char *) descr - map->l_tls_offset;
561 # elif defined(TLS_DTV_AT_TP)
562 dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE));
563 void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE;
565 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
568 /* Fill in the DTV slot so that a later LD/GD access will find it. */
569 dtv[map->l_tls_modid].pointer.val = dest;
570 dtv[map->l_tls_modid].pointer.is_static = true;
572 /* Initialize the memory. */
573 memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
574 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
578 __pthread_init_static_tls (struct link_map *map)
582 for (th = __pthread_main_thread->p_nextlive;
583 th != __pthread_main_thread;
586 init_one_static_tls(th, map);
591 static void pthread_initialize(void)
596 /* If already done (e.g. by a constructor called earlier!), bail out */
597 if (__pthread_initial_thread_bos != NULL) return;
598 #ifdef TEST_FOR_COMPARE_AND_SWAP
599 /* Test if compare-and-swap is available */
600 __pthread_has_cas = compare_and_swap_is_available();
602 /* We don't need to know the bottom of the stack. Give the pointer some
603 value to signal that initialization happened. */
604 __pthread_initial_thread_bos = (void *) -1l;
607 /* Update the descriptor for the initial thread. */
608 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
610 # if !defined HAVE___THREAD && (defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__)
611 /* Likewise for the resolver state _res. */
612 THREAD_SETMEM (((pthread_descr) NULL), p_resp, __resp);
616 /* Register an exit function to kill all other threads. */
617 /* Do it early so that user-registered atexit functions are called
618 before pthread_*exit_process. */
619 #ifndef HAVE_Z_NODELETE
620 if (__builtin_expect (&__dso_handle != NULL, 1))
621 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
625 __on_exit (pthread_onexit_process, NULL);
626 /* How many processors. */
627 __pthread_smp_kernel = is_smp_system ();
629 /* psm: we do not have any ld.so support yet
630 * remove the USE_TLS guard if nptl is added */
631 #if defined SHARED && defined USE_TLS
632 /* Transfer the old value from the dynamic linker's internal location. */
633 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
634 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
639 GL(dl_init_static_tls) = &__pthread_init_static_tls;
642 /* uClibc-specific stdio initialization for threads. */
645 _stdio_user_locking = 0; /* 2 if threading not initialized */
646 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
647 if (fp->__user_locking != 1) {
648 fp->__user_locking = 0;
654 void __pthread_initialize(void)
656 pthread_initialize();
659 int __pthread_initialize_manager(void)
664 struct pthread_request request;
672 __pthread_multiple_threads = 1;
673 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
674 __pthread_main_thread->p_multiple_threads = 1;
676 *__libc_multiple_threads_ptr = 1;
678 #ifndef HAVE_Z_NODELETE
679 if (__builtin_expect (&__dso_handle != NULL, 1))
680 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
684 if (__pthread_max_stacksize == 0)
685 __pthread_init_max_stacksize ();
686 /* If basic initialization not done yet (e.g. we're called from a
687 constructor run before our constructor), do it now */
688 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
689 /* Setup stack for thread manager */
690 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
691 if (__pthread_manager_thread_bos == NULL)
693 __pthread_manager_thread_tos =
694 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
696 /* Setup pipe to communicate with thread manager */
697 if (pipe(manager_pipe) == -1) {
698 free(__pthread_manager_thread_bos);
704 /* Allocate memory for the thread descriptor and the dtv. */
705 tcbp = _dl_allocate_tls (NULL);
707 free(__pthread_manager_thread_bos);
709 close_not_cancel(manager_pipe[0]);
710 close_not_cancel(manager_pipe[1]);
715 # if defined(TLS_TCB_AT_TP)
716 mgr = (pthread_descr) tcbp;
717 # elif defined(TLS_DTV_AT_TP)
718 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
720 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
723 __pthread_handles[1].h_descr = manager_thread = mgr;
726 /* Initialize the descriptor. */
727 #if !defined USE_TLS || !TLS_DTV_AT_TP
728 mgr->p_header.data.tcb = tcbp;
729 mgr->p_header.data.self = mgr;
730 mgr->p_header.data.multiple_threads = 1;
731 #elif TLS_MULTIPLE_THREADS_IN_TCB
732 mgr->p_multiple_threads = 1;
735 mgr->p_lock = &__pthread_handles[1].h_lock;
737 # ifndef HAVE___THREAD
738 mgr->p_errnop = &mgr->p_errno;
740 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
744 # if __LT_SPINLOCK_INIT != 0
745 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
747 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
749 mgr = &__pthread_manager_thread;
753 __pthread_manager_request = manager_pipe[1]; /* writing end */
754 __pthread_manager_reader = manager_pipe[0]; /* reading end */
757 /* Start the thread manager */
761 if (__linuxthreads_initial_report_events != 0)
762 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
763 __linuxthreads_initial_report_events);
764 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
766 if (__linuxthreads_initial_report_events != 0)
767 __pthread_initial_thread.p_report_events
768 = __linuxthreads_initial_report_events;
769 report_events = __pthread_initial_thread.p_report_events;
771 if (__builtin_expect (report_events, 0))
773 /* It's a bit more complicated. We have to report the creation of
774 the manager thread. */
775 int idx = __td_eventword (TD_CREATE);
776 uint32_t mask = __td_eventmask (TD_CREATE);
780 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
781 p_eventbuf.eventmask.event_bits[idx]);
783 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
786 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
789 __pthread_lock(mgr->p_lock, NULL);
791 #ifdef NEED_SEPARATE_REGISTER_STACK
792 pid = __clone2(__pthread_manager_event,
793 (void **) __pthread_manager_thread_bos,
794 THREAD_MANAGER_STACK_SIZE,
795 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
797 #elif _STACK_GROWS_UP
798 pid = __clone(__pthread_manager_event,
799 (void **) __pthread_manager_thread_bos,
800 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
803 pid = __clone(__pthread_manager_event,
804 (void **) __pthread_manager_thread_tos,
805 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
811 /* Now fill in the information about the new thread in
812 the newly created thread's data structure. We cannot let
813 the new thread do this since we don't know whether it was
814 already scheduled when we send the event. */
815 mgr->p_eventbuf.eventdata = mgr;
816 mgr->p_eventbuf.eventnum = TD_CREATE;
817 __pthread_last_event = mgr;
818 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
821 /* Now call the function which signals the event. */
822 __linuxthreads_create_event ();
825 /* Now restart the thread. */
826 __pthread_unlock(mgr->p_lock);
830 if (__builtin_expect (pid, 0) == 0)
832 #ifdef NEED_SEPARATE_REGISTER_STACK
833 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
834 THREAD_MANAGER_STACK_SIZE,
835 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
836 #elif _STACK_GROWS_UP
837 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
838 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
840 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
841 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
846 int err = __pthread_start_manager(mgr);
849 if (__builtin_expect (err, 0) == -1) {
851 _dl_deallocate_tls (tcbp, true);
853 free(__pthread_manager_thread_bos);
855 close_not_cancel(manager_pipe[0]);
856 close_not_cancel(manager_pipe[1]);
861 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
863 /* Make gdb aware of new thread manager */
864 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
866 raise(__pthread_sig_debug);
867 /* We suspend ourself and gdb will wake us up when it is
868 ready to handle us. */
869 __pthread_wait_for_restart_signal(thread_self());
871 /* Synchronize debugging of the thread manager */
872 request.req_kind = REQ_DEBUG;
873 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
874 (char *) &request, sizeof(request)));
879 /* Thread creation */
881 int __pthread_create(pthread_t *thread, const pthread_attr_t *attr,
882 void * (*start_routine)(void *), void *arg)
884 pthread_descr self = thread_self();
885 struct pthread_request request;
887 if (__builtin_expect (l4_is_invalid_cap(__pthread_manager_request), 0)) {
888 if (__pthread_initialize_manager() < 0)
891 request.req_thread = self;
892 request.req_kind = REQ_CREATE;
893 request.req_args.create.attr = attr;
894 request.req_args.create.fn = start_routine;
895 request.req_args.create.arg = arg;
897 sigprocmask(SIG_SETMASK, NULL, &request.req_args.create.mask);
898 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
899 (char *) &request, sizeof(request)));
902 __pthread_send_manager_rq(&request, 1);
904 retval = THREAD_GETMEM(self, p_retcode);
905 if (__builtin_expect (retval, 0) == 0)
906 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
909 strong_alias (__pthread_create, pthread_create)
911 /* Simple operations on thread identifiers */
913 pthread_descr __pthread_thread_self(void)
915 return thread_self();
918 pthread_t __pthread_self(void)
920 pthread_descr self = thread_self();
921 return THREAD_GETMEM(self, p_tid);
923 strong_alias (__pthread_self, pthread_self)
925 int __pthread_equal(pthread_t thread1, pthread_t thread2)
927 return thread1 == thread2;
929 strong_alias (__pthread_equal, pthread_equal)
932 /* Helper function for thread_self in the case of user-provided stacks */
936 pthread_descr __pthread_find_self(void)
938 char * sp = CURRENT_STACK_FRAME;
941 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
942 the manager threads handled specially in thread_self(), so start at 2 */
943 h = __pthread_handles + 2;
944 # ifdef _STACK_GROWS_UP
945 while (! (sp >= (char *) h->h_descr && sp < (char *) h->h_descr->p_guardaddr)) h++;
947 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
954 pthread_descr __pthread_self_stack(void)
956 char *sp = CURRENT_STACK_FRAME;
959 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
960 return manager_thread;
961 h = __pthread_handles + 2;
963 # ifdef _STACK_GROWS_UP
964 while (h->h_descr == NULL
965 || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr))
968 while (h->h_descr == NULL
969 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
973 # ifdef _STACK_GROWS_UP
974 while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
977 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
986 /* Thread scheduling */
988 int __pthread_setschedparam(pthread_t thread, int policy,
989 const struct sched_param *param)
991 pthread_handle handle = thread_handle(thread);
994 __pthread_lock(&handle->h_lock, NULL);
995 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
996 __pthread_unlock(&handle->h_lock);
999 th = handle->h_descr;
1000 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
1002 __pthread_unlock(&handle->h_lock);
1005 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
1006 __pthread_unlock(&handle->h_lock);
1007 if (__pthread_manager_request >= 0)
1008 __pthread_manager_adjust_prio(th->p_priority);
1011 strong_alias (__pthread_setschedparam, pthread_setschedparam)
1013 int __pthread_getschedparam(pthread_t thread, int *policy,
1014 struct sched_param *param)
1016 pthread_handle handle = thread_handle(thread);
1019 __pthread_lock(&handle->h_lock, NULL);
1020 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
1021 __pthread_unlock(&handle->h_lock);
1024 pid = handle->h_descr->p_pid;
1025 __pthread_unlock(&handle->h_lock);
1026 pol = __sched_getscheduler(pid);
1027 if (__builtin_expect (pol, 0) == -1) return errno;
1028 if (__sched_getparam(pid, param) == -1) return errno;
1032 strong_alias (__pthread_getschedparam, pthread_getschedparam)
1035 /* Process-wide exit() request */
1037 static void pthread_onexit_process(int retcode, void *arg)
1039 //l4/if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
1040 if (!l4_is_invalid_cap(__pthread_manager_request)) {
1041 struct pthread_request request;
1042 pthread_descr self = thread_self();
1044 /* Make sure we come back here after suspend(), in case we entered
1045 from a signal handler. */
1046 //l4/THREAD_SETMEM(self, p_signal_jmp, NULL);
1048 request.req_thread = self;
1049 request.req_kind = REQ_PROCESS_EXIT;
1050 request.req_args.exit.code = retcode;
1052 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1053 (char *) &request, sizeof(request)));
1056 __pthread_send_manager_rq(&request, 1);
1058 /* Main thread should accumulate times for thread manager and its
1059 children, so that timings for main thread account for all threads. */
1060 if (self == __pthread_main_thread)
1065 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1067 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1069 /* Since all threads have been asynchronously terminated
1070 (possibly holding locks), free cannot be used any more.
1071 For mtrace, we'd like to print something though. */
1073 tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
1074 # if defined(TLS_DTV_AT_TP)
1075 tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
1077 _dl_deallocate_tls (tcbp, true);
1079 free (__pthread_manager_thread_bos); */
1081 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1086 #ifndef HAVE_Z_NODELETE
1087 static int __pthread_atexit_retcode;
1089 static void pthread_atexit_process(void *arg, int retcode)
1091 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
1094 static void pthread_atexit_retcode(void *arg, int retcode)
1096 __pthread_atexit_retcode = retcode;
1101 /* The handler for the RESTART signal just records the signal received
1102 in the thread descriptor, and optionally performs a siglongjmp
1103 (for pthread_cond_timedwait). */
1105 static void pthread_handle_sigrestart(int sig)
1107 pthread_descr self = check_thread_self();
1108 THREAD_SETMEM(self, p_signal, sig);
1109 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
1110 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
1113 /* The handler for the CANCEL signal checks for cancellation
1114 (in asynchronous mode), for process-wide exit and exec requests.
1115 For the thread manager thread, redirect the signal to
1116 __pthread_manager_sighandler. */
1118 static void pthread_handle_sigcancel(int sig)
1120 pthread_descr self = check_thread_self();
1121 sigjmp_buf * jmpbuf;
1123 if (self == manager_thread)
1125 __pthread_manager_sighandler(sig);
1128 if (__builtin_expect (__pthread_exit_requested, 0)) {
1129 /* Main thread should accumulate times for thread manager and its
1130 children, so that timings for main thread account for all threads. */
1131 if (self == __pthread_main_thread) {
1133 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1135 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1138 _exit(__pthread_exit_code);
1140 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1141 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1142 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1143 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1144 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1145 if (jmpbuf != NULL) {
1146 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1147 siglongjmp(*jmpbuf, 1);
1152 /* Handler for the DEBUG signal.
1153 The debugging strategy is as follows:
1154 On reception of a REQ_DEBUG request (sent by new threads created to
1155 the thread manager under debugging mode), the thread manager throws
1156 __pthread_sig_debug to itself. The debugger (if active) intercepts
1157 this signal, takes into account new threads and continue execution
1158 of the thread manager by propagating the signal because it doesn't
1159 know what it is specifically done for. In the current implementation,
1160 the thread manager simply discards it. */
1162 static void pthread_handle_sigdebug(int sig)
1168 /* Reset the state of the thread machinery after a fork().
1169 Close the pipe used for requests and set the main thread to the forked
1171 Notice that we can't free the stack segments, as the forked thread
1172 may hold pointers into them. */
1175 void __pthread_reset_main_thread(void)
1177 pthread_descr self = thread_self();
1179 if (__pthread_manager_request != -1) {
1180 /* Free the thread manager stack */
1181 free(__pthread_manager_thread_bos);
1182 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1183 /* Close the two ends of the pipe */
1184 close_not_cancel(__pthread_manager_request);
1185 close_not_cancel(__pthread_manager_reader);
1186 __pthread_manager_request = __pthread_manager_reader = -1;
1189 /* Update the pid of the main thread */
1190 THREAD_SETMEM(self, p_pid, __getpid());
1191 /* Make the forked thread the main thread */
1192 __pthread_main_thread = self;
1193 THREAD_SETMEM(self, p_nextlive, self);
1194 THREAD_SETMEM(self, p_prevlive, self);
1195 #if !(USE_TLS && HAVE___THREAD)
1196 /* Now this thread modifies the global variables. */
1197 THREAD_SETMEM(self, p_errnop, &_errno);
1198 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1199 # if defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__
1200 THREAD_SETMEM(self, p_resp, __resp);
1204 #ifndef FLOATING_STACKS
1205 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1206 XXX This can be wrong if the user set the limit during the run. */
1208 struct rlimit limit;
1209 if (getrlimit (RLIMIT_STACK, &limit) == 0
1210 && limit.rlim_cur != limit.rlim_max)
1212 limit.rlim_cur = limit.rlim_max;
1213 setrlimit(RLIMIT_STACK, &limit);
1219 /* Process-wide exec() request */
1221 void __pthread_kill_other_threads_np(void)
1223 struct sigaction sa;
1224 /* Terminate all other threads and thread manager */
1225 pthread_onexit_process(0, NULL);
1226 /* Make current thread the main thread in case the calling thread
1227 changes its mind, does not exec(), and creates new threads instead. */
1228 __pthread_reset_main_thread();
1230 /* Reset the signal handlers behaviour for the signals the
1231 implementation uses since this would be passed to the new
1233 memset(&sa, 0, sizeof(sa));
1234 if (SIG_DFL) /* if it's constant zero, it's already done */
1235 sa.sa_handler = SIG_DFL;
1236 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1237 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1238 if (__pthread_sig_debug > 0)
1239 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1241 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1244 /* Concurrency symbol level. */
1245 static int current_level;
1247 int __pthread_setconcurrency(int level)
1249 /* We don't do anything unless we have found a useful interpretation. */
1250 current_level = level;
1253 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1255 int __pthread_getconcurrency(void)
1257 return current_level;
1259 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1261 /* Primitives for controlling thread execution */
1264 void __pthread_wait_for_restart_signal(pthread_descr self)
1268 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1269 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1270 THREAD_SETMEM(self, p_signal, 0);
1272 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1273 cancellation point. */
1274 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1276 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1279 #if !__ASSUME_REALTIME_SIGNALS
1280 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1282 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1283 Since the restart signal does not queue, we use an atomic counter to create
1284 queuing semantics. This is needed to resolve a rare race condition in
1285 pthread_cond_timedwait_relative. */
1287 void __pthread_restart_old(pthread_descr th)
1289 if (pthread_atomic_increment(&th->p_resume_count) == -1)
1290 kill(th->p_pid, __pthread_sig_restart);
1293 void __pthread_suspend_old(pthread_descr self)
1295 if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
1296 __pthread_wait_for_restart_signal(self);
1300 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1302 sigset_t unblock, initial_mask;
1303 int was_signalled = 0;
1306 if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
1307 /* Set up a longjmp handler for the restart signal, unblock
1308 the signal and sleep. */
1310 if (sigsetjmp(jmpbuf, 1) == 0) {
1311 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1312 THREAD_SETMEM(self, p_signal, 0);
1313 /* Unblock the restart signal */
1314 __sigemptyset(&unblock);
1315 sigaddset(&unblock, __pthread_sig_restart);
1316 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1320 struct timespec reltime;
1322 /* Compute a time offset relative to now. */
1323 __gettimeofday (&now, NULL);
1324 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1325 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1326 if (reltime.tv_nsec < 0) {
1327 reltime.tv_nsec += 1000000000;
1328 reltime.tv_sec -= 1;
1331 /* Sleep for the required duration. If woken by a signal,
1332 resume waiting as required by Single Unix Specification. */
1333 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1337 /* Block the restart signal again */
1338 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1343 THREAD_SETMEM(self, p_signal_jmp, NULL);
1346 /* Now was_signalled is true if we exited the above code
1347 due to the delivery of a restart signal. In that case,
1348 we know we have been dequeued and resumed and that the
1349 resume count is balanced. Otherwise, there are some
1350 cases to consider. First, try to bump up the resume count
1351 back to zero. If it goes to 1, it means restart() was
1352 invoked on this thread. The signal must be consumed
1353 and the count bumped down and everything is cool. We
1354 can return a 1 to the caller.
1355 Otherwise, no restart was delivered yet, so a potential
1356 race exists; we return a 0 to the caller which must deal
1357 with this race in an appropriate way; for example by
1358 atomically removing the thread from consideration for a
1359 wakeup---if such a thing fails, it means a restart is
1362 if (!was_signalled) {
1363 if (pthread_atomic_increment(&self->p_resume_count) != -1) {
1364 __pthread_wait_for_restart_signal(self);
1365 pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
1366 /* woke spontaneously and consumed restart signal */
1369 /* woke spontaneously but did not consume restart---caller must resolve */
1372 /* woken due to restart signal */
1375 #endif /* __ASSUME_REALTIME_SIGNALS */
1377 void __pthread_restart_new(pthread_descr th)
1379 /* The barrier is proabably not needed, in which case it still documents
1380 our assumptions. The intent is to commit previous writes to shared
1381 memory so the woken thread will have a consistent view. Complementary
1382 read barriers are present to the suspend functions. */
1383 WRITE_MEMORY_BARRIER();
1384 kill(th->p_pid, __pthread_sig_restart);
1387 /* There is no __pthread_suspend_new because it would just
1388 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1391 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1393 sigset_t unblock, initial_mask;
1394 int was_signalled = 0;
1397 if (sigsetjmp(jmpbuf, 1) == 0) {
1398 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1399 THREAD_SETMEM(self, p_signal, 0);
1400 /* Unblock the restart signal */
1401 __sigemptyset(&unblock);
1402 sigaddset(&unblock, __pthread_sig_restart);
1403 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1407 struct timespec reltime;
1409 /* Compute a time offset relative to now. */
1410 __gettimeofday (&now, NULL);
1411 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1412 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1413 if (reltime.tv_nsec < 0) {
1414 reltime.tv_nsec += 1000000000;
1415 reltime.tv_sec -= 1;
1418 /* Sleep for the required duration. If woken by a signal,
1419 resume waiting as required by Single Unix Specification. */
1420 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1424 /* Block the restart signal again */
1425 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1430 THREAD_SETMEM(self, p_signal_jmp, NULL);
1432 /* Now was_signalled is true if we exited the above code
1433 due to the delivery of a restart signal. In that case,
1434 everything is cool. We have been removed from whatever
1435 we were waiting on by the other thread, and consumed its signal.
1437 Otherwise we this thread woke up spontaneously, or due to a signal other
1438 than restart. This is an ambiguous case that must be resolved by
1439 the caller; the thread is still eligible for a restart wakeup
1440 so there is a race. */
1442 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1443 return was_signalled;
1453 void __pthread_message(const char * fmt, ...)
1457 sprintf(buffer, "%05d : ", __getpid());
1458 va_start(args, fmt);
1459 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1461 TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer)));