1 /* Helper code for POSIX timer implementation on LinuxThreads.
2 Copyright (C) 2000, 2001, 2002, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Kaz Kylheku <kaz@ashi.footprints.net>.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
29 #include <sys/syscall.h>
31 #include "posix-timer.h"
34 /* Number of threads used. */
35 #define THREAD_MAXNODES 16
37 /* Array containing the descriptors for the used threads. */
38 static struct thread_node thread_array[THREAD_MAXNODES];
40 /* Static array with the structures for all the timers. */
41 struct timer_node __timer_array[TIMER_MAX];
43 /* Global lock to protect operation on the lists. */
44 pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER;
46 /* Variable to protext initialization. */
47 pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT;
49 /* Nonzero if initialization of timer implementation failed. */
50 int __timer_init_failed;
52 /* Node for the thread used to deliver signals. */
53 struct thread_node __timer_signal_thread_rclk;
55 /* Lists to keep free and used timers and threads. */
56 struct list_links timer_free_list;
57 struct list_links thread_free_list;
58 struct list_links thread_active_list;
61 #ifdef __NR_rt_sigqueueinfo
62 extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *);
66 /* List handling functions. */
67 static __inline__ void
68 list_init (struct list_links *list)
70 list->next = list->prev = list;
73 static __inline__ void
74 list_append (struct list_links *list, struct list_links *newp)
76 newp->prev = list->prev;
78 list->prev->next = newp;
82 static __inline__ void
83 list_insbefore (struct list_links *list, struct list_links *newp)
85 list_append (list, newp);
89 * Like list_unlink_ip, except that calling it on a node that
90 * is already unlinked is disastrous rather than a noop.
93 static __inline__ void
94 list_unlink (struct list_links *list)
96 struct list_links *lnext = list->next, *lprev = list->prev;
102 static __inline__ struct list_links *
103 list_first (struct list_links *list)
108 static __inline__ struct list_links *
109 list_null (struct list_links *list)
114 static __inline__ struct list_links *
115 list_next (struct list_links *list)
120 static __inline__ int
121 list_isempty (struct list_links *list)
123 return list->next == list;
127 /* Functions build on top of the list functions. */
128 static __inline__ struct thread_node *
129 thread_links2ptr (struct list_links *list)
131 return (struct thread_node *) ((char *) list
132 - offsetof (struct thread_node, links));
135 static __inline__ struct timer_node *
136 timer_links2ptr (struct list_links *list)
138 return (struct timer_node *) ((char *) list
139 - offsetof (struct timer_node, links));
143 /* Initialize a newly allocated thread structure. */
145 thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id)
148 thread->attr = *attr;
151 pthread_attr_init (&thread->attr);
152 pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED);
156 list_init (&thread->timer_queue);
157 pthread_cond_init (&thread->cond, 0);
158 thread->current_timer = 0;
159 thread->captured = pthread_self ();
160 thread->clock_id = clock_id;
164 /* Initialize the global lists, and acquire global resources. Error
165 reporting is done by storing a non-zero value to the global variable
166 timer_init_failed. */
172 list_init (&timer_free_list);
173 list_init (&thread_free_list);
174 list_init (&thread_active_list);
176 for (i = 0; i < TIMER_MAX; ++i)
178 list_append (&timer_free_list, &__timer_array[i].links);
179 __timer_array[i].inuse = TIMER_FREE;
182 for (i = 0; i < THREAD_MAXNODES; ++i)
183 list_append (&thread_free_list, &thread_array[i].links);
185 thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME);
189 /* This is a handler executed in a child process after a fork()
190 occurs. It reinitializes the module, resetting all of the data
191 structures to their initial state. The mutex is initialized in
192 case it was locked in the parent process. */
194 reinit_after_fork (void)
197 pthread_mutex_init (&__timer_mutex, 0);
201 /* Called once form pthread_once in timer_init. This initializes the
202 module and ensures that reinit_after_fork will be executed in any
205 __timer_init_once (void)
208 pthread_atfork (0, 0, reinit_after_fork);
212 /* Deinitialize a thread that is about to be deallocated. */
214 thread_deinit (struct thread_node *thread)
216 assert (list_isempty (&thread->timer_queue));
217 pthread_cond_destroy (&thread->cond);
221 /* Allocate a thread structure from the global free list. Global
222 mutex lock must be held by caller. The thread is moved to
225 __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id)
227 struct list_links *node = list_first (&thread_free_list);
229 if (node != list_null (&thread_free_list))
231 struct thread_node *thread = thread_links2ptr (node);
233 thread_init (thread, desired_attr, clock_id);
234 list_append (&thread_active_list, node);
242 /* Return a thread structure to the global free list. Global lock
243 must be held by caller. */
245 __timer_thread_dealloc (struct thread_node *thread)
247 thread_deinit (thread);
248 list_unlink (&thread->links);
249 list_append (&thread_free_list, &thread->links);
253 /* Each of our threads which terminates executes this cleanup
254 handler. We never terminate threads ourselves; if a thread gets here
255 it means that the evil application has killed it. If the thread has
256 timers, these require servicing and so we must hire a replacement
257 thread right away. We must also unblock another thread that may
258 have been waiting for this thread to finish servicing a timer (see
262 thread_cleanup (void *val)
266 struct thread_node *thread = val;
268 /* How did the signal thread get killed? */
269 assert (thread != &__timer_signal_thread_rclk);
271 pthread_mutex_lock (&__timer_mutex);
275 /* We are no longer processing a timer event. */
276 thread->current_timer = 0;
278 if (list_isempty (&thread->timer_queue))
279 __timer_thread_dealloc (thread);
281 (void) __timer_thread_start (thread);
283 pthread_mutex_unlock (&__timer_mutex);
285 /* Unblock potentially blocked timer_delete(). */
286 pthread_cond_broadcast (&thread->cond);
291 /* Handle a timer which is supposed to go off now. */
293 thread_expire_timer (struct thread_node *self, struct timer_node *timer)
295 self->current_timer = timer; /* Lets timer_delete know timer is running. */
297 pthread_mutex_unlock (&__timer_mutex);
299 switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL))
305 #ifdef __NR_rt_sigqueueinfo
309 /* First, clear the siginfo_t structure, so that we don't pass our
310 stack content to other tasks. */
311 memset (&info, 0, sizeof (siginfo_t));
312 /* We must pass the information about the data in a siginfo_t
314 info.si_signo = timer->event.sigev_signo;
315 info.si_code = SI_TIMER;
316 info.si_pid = timer->creator_pid;
317 info.si_uid = getuid ();
318 info.si_value = timer->event.sigev_value;
320 INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info);
323 if (pthread_kill (self->captured, timer->event.sigev_signo) != 0)
325 if (pthread_kill (self->id, timer->event.sigev_signo) != 0)
332 timer->event.sigev_notify_function (timer->event.sigev_value);
336 assert (! "unknown event");
340 pthread_mutex_lock (&__timer_mutex);
342 self->current_timer = 0;
344 pthread_cond_broadcast (&self->cond);
348 /* Thread function; executed by each timer thread. The job of this
349 function is to wait on the thread's timer queue and expire the
350 timers in chronological order as close to their scheduled time as
353 __attribute__ ((noreturn))
354 thread_func (void *arg)
356 struct thread_node *self = arg;
358 /* Register cleanup handler, in case rogue application terminates
359 this thread. (This cannot happen to __timer_signal_thread, which
360 doesn't invoke application callbacks). */
362 pthread_cleanup_push (thread_cleanup, self);
364 pthread_mutex_lock (&__timer_mutex);
368 struct list_links *first;
369 struct timer_node *timer = NULL;
371 /* While the timer queue is not empty, inspect the first node. */
372 first = list_first (&self->timer_queue);
373 if (first != list_null (&self->timer_queue))
377 timer = timer_links2ptr (first);
379 /* This assumes that the elements of the list of one thread
380 are all for the same clock. */
381 clock_gettime (timer->clock, &now);
385 /* If the timer is due or overdue, remove it from the queue.
386 If it's a periodic timer, re-compute its new time and
387 requeue it. Either way, perform the timer expiry. */
388 if (timespec_compare (&now, &timer->expirytime) < 0)
391 list_unlink_ip (first);
393 if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0
394 || timer->value.it_interval.tv_nsec != 0)
396 timer->overrun_count = 0;
397 timespec_add (&timer->expirytime, &timer->expirytime,
398 &timer->value.it_interval);
399 while (timespec_compare (&timer->expirytime, &now) < 0)
401 timespec_add (&timer->expirytime, &timer->expirytime,
402 &timer->value.it_interval);
403 if (timer->overrun_count < DELAYTIMER_MAX)
404 ++timer->overrun_count;
406 __timer_thread_queue_timer (self, timer);
409 thread_expire_timer (self, timer);
411 first = list_first (&self->timer_queue);
412 if (first == list_null (&self->timer_queue))
415 timer = timer_links2ptr (first);
419 /* If the queue is not empty, wait until the expiry time of the
420 first node. Otherwise wait indefinitely. Insertions at the
421 head of the queue must wake up the thread by broadcasting
422 this condition variable. */
424 pthread_cond_timedwait (&self->cond, &__timer_mutex,
427 pthread_cond_wait (&self->cond, &__timer_mutex);
429 /* This macro will never be executed since the while loop loops
430 forever - but we have to add it for proper nesting. */
431 pthread_cleanup_pop (1);
435 /* Enqueue a timer in wakeup order in the thread's timer queue.
436 Returns 1 if the timer was inserted at the head of the queue,
437 causing the queue's next wakeup time to change. */
440 __timer_thread_queue_timer (struct thread_node *thread,
441 struct timer_node *insert)
443 struct list_links *iter;
446 for (iter = list_first (&thread->timer_queue);
447 iter != list_null (&thread->timer_queue);
448 iter = list_next (iter))
450 struct timer_node *timer = timer_links2ptr (iter);
452 if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0)
457 list_insbefore (iter, &insert->links);
462 /* Start a thread and associate it with the given thread node. Global
463 lock must be held by caller. */
465 __timer_thread_start (struct thread_node *thread)
469 assert (!thread->exists);
472 if (pthread_create (&thread->id, &thread->attr,
473 (void *(*) (void *)) thread_func, thread) != 0)
484 __timer_thread_wakeup (struct thread_node *thread)
486 pthread_cond_broadcast (&thread->cond);
490 /* Compare two pthread_attr_t thread attributes for exact equality.
491 Returns 1 if they are equal, otherwise zero if they are not equal or
492 contain illegal values. This version is LinuxThreads-specific for
493 performance reason. One could use the access functions to get the
494 values of all the fields of the attribute structure. */
496 thread_attr_compare (const pthread_attr_t *left, const pthread_attr_t *right)
498 return (left->__detachstate == right->__detachstate
499 && left->__schedpolicy == right->__schedpolicy
500 && left->__guardsize == right->__guardsize
501 && (left->__schedparam.sched_priority
502 == right->__schedparam.sched_priority)
503 && left->__inheritsched == right->__inheritsched
504 && left->__scope == right->__scope
505 && left->__stacksize == right->__stacksize
506 && left->__stackaddr_set == right->__stackaddr_set
507 && (left->__stackaddr_set
508 || left->__stackaddr == right->__stackaddr));
512 /* Search the list of active threads and find one which has matching
513 attributes. Global mutex lock must be held by caller. */
515 __timer_thread_find_matching (const pthread_attr_t *desired_attr,
516 clockid_t desired_clock_id)
518 struct list_links *iter = list_first (&thread_active_list);
520 while (iter != list_null (&thread_active_list))
522 struct thread_node *candidate = thread_links2ptr (iter);
524 if (thread_attr_compare (desired_attr, &candidate->attr)
525 && desired_clock_id == candidate->clock_id)
528 iter = list_next (iter);
535 /* Grab a free timer structure from the global free list. The global
536 lock must be held by the caller. */
540 struct list_links *node = list_first (&timer_free_list);
542 if (node != list_null (&timer_free_list))
544 struct timer_node *timer = timer_links2ptr (node);
545 list_unlink_ip (node);
546 timer->inuse = TIMER_INUSE;
555 /* Return a timer structure to the global free list. The global lock
556 must be held by the caller. */
558 __timer_dealloc (struct timer_node *timer)
560 assert (timer->refcount == 0);
561 timer->thread = NULL; /* Break association between timer and thread. */
562 timer->inuse = TIMER_FREE;
563 list_append (&timer_free_list, &timer->links);
567 /* Thread cancellation handler which unlocks a mutex. */
569 __timer_mutex_cancel_handler (void *arg)
571 pthread_mutex_unlock (arg);