1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
23 #include "internals.h"
27 libpthread_hidden_proto(nanosleep)
29 static void __pthread_acquire(int * spinlock);
31 static __inline__ void __pthread_release(int * spinlock)
33 WRITE_MEMORY_BARRIER();
34 *spinlock = __LT_SPINLOCK_INIT;
35 __asm__ __volatile__ ("" : "=m" (*spinlock) : "m" (*spinlock));
39 /* The status field of a spinlock is a pointer whose least significant
42 Thus the field values have the following meanings:
44 status == 0: spinlock is free
45 status == 1: spinlock is taken; no thread is waiting on it
47 (status & 1) == 1: spinlock is taken and (status & ~1L) is a
48 pointer to the first waiting thread; other
49 waiting threads are linked via the p_nextlock
51 (status & 1) == 0: same as above, but spinlock is not taken.
53 The waiting list is not sorted by priority order.
54 Actually, we always insert at top of list (sole insertion mode
55 that can be performed without locking).
56 For __pthread_unlock, we perform a linear search in the list
57 to find the highest-priority, oldest waiting thread.
58 This is safe because there are no concurrent __pthread_unlock
59 operations -- only the thread that locked the mutex can unlock it. */
62 void internal_function __pthread_lock(struct _pthread_fastlock * lock,
65 #if defined HAS_COMPARE_AND_SWAP
66 long oldstatus, newstatus;
67 int successful_seizure, spurious_wakeup_count;
70 #if defined TEST_FOR_COMPARE_AND_SWAP
71 if (!__pthread_has_cas)
73 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
75 __pthread_acquire(&lock->__spinlock);
80 #if defined HAS_COMPARE_AND_SWAP
81 /* First try it without preparation. Maybe it's a completely
83 if (lock->__status == 0 && __compare_and_swap (&lock->__status, 0, 1))
86 spurious_wakeup_count = 0;
88 /* On SMP, try spinning to get the lock. */
90 if (__pthread_smp_kernel) {
92 int max_count = lock->__spinlock * 2 + 10;
94 if (max_count > MAX_ADAPTIVE_SPIN_COUNT)
95 max_count = MAX_ADAPTIVE_SPIN_COUNT;
97 for (spin_count = 0; spin_count < max_count; spin_count++) {
98 if (((oldstatus = lock->__status) & 1) == 0) {
99 if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
102 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
103 READ_MEMORY_BARRIER();
110 __asm__ __volatile__ ("" : "=m" (lock->__status) : "m" (lock->__status));
113 lock->__spinlock += (spin_count - lock->__spinlock) / 8;
119 /* No luck, try once more or suspend. */
122 oldstatus = lock->__status;
123 successful_seizure = 0;
125 if ((oldstatus & 1) == 0) {
126 newstatus = oldstatus | 1;
127 successful_seizure = 1;
130 self = thread_self();
131 newstatus = (long) self | 1;
135 THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus));
136 /* Make sure the store in p_nextlock completes before performing
137 the compare-and-swap */
140 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
142 /* Suspend with guard against spurious wakeup.
143 This can happen in pthread_cond_timedwait_relative, when the thread
144 wakes up due to timeout and is still on the condvar queue, and then
145 locks the queue to remove itself. At that point it may still be on the
146 queue, and may be resumed by a condition signal. */
148 if (!successful_seizure) {
151 if (self->p_nextlock != NULL) {
152 /* Count resumes that don't belong to us. */
153 spurious_wakeup_count++;
161 /* Put back any resumes we caught that don't belong to us. */
162 while (spurious_wakeup_count--)
165 READ_MEMORY_BARRIER();
169 int __pthread_unlock(struct _pthread_fastlock * lock)
171 #if defined HAS_COMPARE_AND_SWAP
173 pthread_descr thr, * ptr, * maxptr;
177 #if defined TEST_FOR_COMPARE_AND_SWAP
178 if (!__pthread_has_cas)
180 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
182 __pthread_release(&lock->__spinlock);
187 #if defined HAS_COMPARE_AND_SWAP
188 WRITE_MEMORY_BARRIER();
191 while ((oldstatus = lock->__status) == 1) {
192 if (__compare_and_swap_with_release_semantics(&lock->__status,
197 /* Find thread in waiting queue with maximal priority */
198 ptr = (pthread_descr *) &lock->__status;
199 thr = (pthread_descr) (oldstatus & ~1L);
203 /* Before we iterate over the wait queue, we need to execute
204 a read barrier, otherwise we may read stale contents of nodes that may
205 just have been inserted by other processors. One read barrier is enough to
206 ensure we have a stable list; we don't need one for each pointer chase
207 through the list, because we are the owner of the lock; other threads
208 can only add nodes at the front; if a front node is consistent,
209 the ones behind it must also be. */
211 READ_MEMORY_BARRIER();
214 if (thr->p_priority >= maxprio) {
216 maxprio = thr->p_priority;
218 ptr = &(thr->p_nextlock);
219 thr = (pthread_descr)((long)(thr->p_nextlock) & ~1L);
222 /* Remove max prio thread from waiting list. */
223 if (maxptr == (pthread_descr *) &lock->__status) {
224 /* If max prio thread is at head, remove it with compare-and-swap
225 to guard against concurrent lock operation. This removal
226 also has the side effect of marking the lock as released
227 because the new status comes from thr->p_nextlock whose
228 least significant bit is clear. */
229 thr = (pthread_descr) (oldstatus & ~1L);
230 if (! __compare_and_swap_with_release_semantics
231 (&lock->__status, oldstatus, (long)(thr->p_nextlock) & ~1L))
234 /* No risk of concurrent access, remove max prio thread normally.
235 But in this case we must also flip the least significant bit
236 of the status to mark the lock as released. */
237 thr = (pthread_descr)((long)*maxptr & ~1L);
238 *maxptr = thr->p_nextlock;
240 /* Ensure deletion from linked list completes before we
242 WRITE_MEMORY_BARRIER();
245 oldstatus = lock->__status;
246 } while (!__compare_and_swap_with_release_semantics(&lock->__status,
247 oldstatus, oldstatus & ~1L));
250 /* Wake up the selected waiting thread. Woken thread can check
251 its own p_nextlock field for NULL to detect that it has been removed. No
252 barrier is needed here, since restart() and suspend() take
253 care of memory synchronization. */
255 thr->p_nextlock = NULL;
263 * Alternate fastlocks do not queue threads directly. Instead, they queue
264 * these wait queue node structures. When a timed wait wakes up due to
265 * a timeout, it can leave its wait node in the queue (because there
266 * is no safe way to remove from the quue). Some other thread will
267 * deallocate the abandoned node.
272 struct wait_node *next; /* Next node in null terminated linked list */
273 pthread_descr thr; /* The thread waiting with this node */
274 int abandoned; /* Atomic flag */
277 static long wait_node_free_list;
278 static int wait_node_free_list_spinlock;
280 /* Allocate a new node from the head of the free list using an atomic
281 operation, or else using malloc if that list is empty. A fundamental
282 assumption here is that we can safely access wait_node_free_list->next.
283 That's because we never free nodes once we allocate them, so a pointer to a
284 node remains valid indefinitely. */
286 static struct wait_node *wait_node_alloc(void)
288 struct wait_node *new_node = 0;
290 __pthread_acquire(&wait_node_free_list_spinlock);
291 if (wait_node_free_list != 0) {
292 new_node = (struct wait_node *) wait_node_free_list;
293 wait_node_free_list = (long) new_node->next;
295 WRITE_MEMORY_BARRIER();
296 __pthread_release(&wait_node_free_list_spinlock);
299 return malloc(sizeof *wait_node_alloc());
304 /* Return a node to the head of the free list using an atomic
307 static void wait_node_free(struct wait_node *wn)
309 __pthread_acquire(&wait_node_free_list_spinlock);
310 wn->next = (struct wait_node *) wait_node_free_list;
311 wait_node_free_list = (long) wn;
312 WRITE_MEMORY_BARRIER();
313 __pthread_release(&wait_node_free_list_spinlock);
317 #if defined HAS_COMPARE_AND_SWAP
319 /* Remove a wait node from the specified queue. It is assumed
320 that the removal takes place concurrently with only atomic insertions at the
321 head of the queue. */
323 static void wait_node_dequeue(struct wait_node **pp_head,
324 struct wait_node **pp_node,
325 struct wait_node *p_node)
327 /* If the node is being deleted from the head of the
328 list, it must be deleted using atomic compare-and-swap.
329 Otherwise it can be deleted in the straightforward way. */
331 if (pp_node == pp_head) {
332 /* We don't need a read barrier between these next two loads,
333 because it is assumed that the caller has already ensured
334 the stability of *p_node with respect to p_node. */
336 long oldvalue = (long) p_node;
337 long newvalue = (long) p_node->next;
339 if (__compare_and_swap((long *) pp_node, oldvalue, newvalue))
342 /* Oops! Compare and swap failed, which means the node is
343 no longer first. We delete it using the ordinary method. But we don't
344 know the identity of the node which now holds the pointer to the node
345 being deleted, so we must search from the beginning. */
347 for (pp_node = pp_head; p_node != *pp_node; ) {
348 pp_node = &(*pp_node)->next;
349 READ_MEMORY_BARRIER(); /* Stabilize *pp_node for next iteration. */
353 *pp_node = p_node->next;
359 void __pthread_alt_lock(struct _pthread_fastlock * lock,
362 #if defined HAS_COMPARE_AND_SWAP
363 long oldstatus, newstatus;
365 struct wait_node wait_node;
367 #if defined TEST_FOR_COMPARE_AND_SWAP
368 if (!__pthread_has_cas)
370 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
372 int suspend_needed = 0;
373 __pthread_acquire(&lock->__spinlock);
375 if (lock->__status == 0)
379 self = thread_self();
381 wait_node.abandoned = 0;
382 wait_node.next = (struct wait_node *) lock->__status;
383 wait_node.thr = self;
384 lock->__status = (long) &wait_node;
388 __pthread_release(&lock->__spinlock);
396 #if defined HAS_COMPARE_AND_SWAP
398 oldstatus = lock->__status;
399 if (oldstatus == 0) {
403 self = thread_self();
404 wait_node.thr = self;
405 newstatus = (long) &wait_node;
407 wait_node.abandoned = 0;
408 wait_node.next = (struct wait_node *) oldstatus;
409 /* Make sure the store in wait_node.next completes before performing
410 the compare-and-swap */
412 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
414 /* Suspend. Note that unlike in __pthread_lock, we don't worry
415 here about spurious wakeup. That's because this lock is not
416 used in situations where that can happen; the restart can
417 only come from the previous lock owner. */
422 READ_MEMORY_BARRIER();
426 /* Timed-out lock operation; returns 0 to indicate timeout. */
428 int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
429 pthread_descr self, const struct timespec *abstime)
432 #if defined HAS_COMPARE_AND_SWAP
435 struct wait_node *p_wait_node = wait_node_alloc();
437 /* Out of memory, just give up and do ordinary lock. */
438 if (p_wait_node == 0) {
439 __pthread_alt_lock(lock, self);
443 #if defined TEST_FOR_COMPARE_AND_SWAP
444 if (!__pthread_has_cas)
446 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
448 __pthread_acquire(&lock->__spinlock);
450 if (lock->__status == 0)
454 self = thread_self();
456 p_wait_node->abandoned = 0;
457 p_wait_node->next = (struct wait_node *) lock->__status;
458 p_wait_node->thr = self;
459 lock->__status = (long) p_wait_node;
460 oldstatus = 1; /* force suspend */
463 __pthread_release(&lock->__spinlock);
468 #if defined HAS_COMPARE_AND_SWAP
470 oldstatus = lock->__status;
471 if (oldstatus == 0) {
475 self = thread_self();
476 p_wait_node->thr = self;
477 newstatus = (long) p_wait_node;
479 p_wait_node->abandoned = 0;
480 p_wait_node->next = (struct wait_node *) oldstatus;
481 /* Make sure the store in wait_node.next completes before performing
482 the compare-and-swap */
484 } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
487 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
491 /* If we did not get the lock, do a timed suspend. If we wake up due
492 to a timeout, then there is a race; the old lock owner may try
493 to remove us from the queue. This race is resolved by us and the owner
494 doing an atomic testandset() to change the state of the wait node from 0
495 to 1. If we succeed, then it's a timeout and we abandon the node in the
496 queue. If we fail, it means the owner gave us the lock. */
498 if (oldstatus != 0) {
499 if (timedsuspend(self, abstime) == 0) {
500 if (!testandset(&p_wait_node->abandoned))
501 return 0; /* Timeout! */
503 /* Eat oustanding resume from owner, otherwise wait_node_free() below
504 will race with owner's wait_node_dequeue(). */
509 wait_node_free(p_wait_node);
511 READ_MEMORY_BARRIER();
513 return 1; /* Got the lock! */
516 void __pthread_alt_unlock(struct _pthread_fastlock *lock)
518 struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
519 struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
522 WRITE_MEMORY_BARRIER();
524 #if defined TEST_FOR_COMPARE_AND_SWAP
525 if (!__pthread_has_cas)
527 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
529 __pthread_acquire(&lock->__spinlock);
535 /* If no threads are waiting for this lock, try to just
536 atomically release it. */
537 #if defined TEST_FOR_COMPARE_AND_SWAP
538 if (!__pthread_has_cas)
540 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
542 if (lock->__status == 0 || lock->__status == 1) {
549 #if defined TEST_FOR_COMPARE_AND_SWAP
553 #if defined HAS_COMPARE_AND_SWAP
555 long oldstatus = lock->__status;
556 if (oldstatus == 0 || oldstatus == 1) {
557 if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0))
565 /* Process the entire queue of wait nodes. Remove all abandoned
566 wait nodes and put them into the global free queue, and
567 remember the one unabandoned node which refers to the thread
568 having the highest priority. */
570 pp_max_prio = pp_node = pp_head;
571 p_max_prio = p_node = *pp_head;
574 READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */
576 while (p_node != (struct wait_node *) 1) {
579 if (p_node->abandoned) {
580 /* Remove abandoned node. */
581 #if defined TEST_FOR_COMPARE_AND_SWAP
582 if (!__pthread_has_cas)
584 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
585 *pp_node = p_node->next;
587 #if defined TEST_FOR_COMPARE_AND_SWAP
590 #if defined HAS_COMPARE_AND_SWAP
591 wait_node_dequeue(pp_head, pp_node, p_node);
593 wait_node_free(p_node);
594 /* Note that the next assignment may take us to the beginning
595 of the queue, to newly inserted nodes, if pp_node == pp_head.
596 In that case we need a memory barrier to stabilize the first of
599 if (pp_node == pp_head)
600 READ_MEMORY_BARRIER(); /* No stale reads through p_node */
602 } else if ((prio = p_node->thr->p_priority) >= maxprio) {
603 /* Otherwise remember it if its thread has a higher or equal priority
604 compared to that of any node seen thus far. */
606 pp_max_prio = pp_node;
610 /* This canno6 jump backward in the list, so no further read
611 barrier is needed. */
612 pp_node = &p_node->next;
616 /* If all threads abandoned, go back to top */
617 if (maxprio == INT_MIN)
620 /* Now we want to to remove the max priority thread's wait node from
621 the list. Before we can do this, we must atomically try to change the
622 node's abandon state from zero to nonzero. If we succeed, that means we
623 have the node that we will wake up. If we failed, then it means the
624 thread timed out and abandoned the node in which case we repeat the
625 whole unlock operation. */
627 if (!testandset(&p_max_prio->abandoned)) {
628 #if defined TEST_FOR_COMPARE_AND_SWAP
629 if (!__pthread_has_cas)
631 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
632 *pp_max_prio = p_max_prio->next;
634 #if defined TEST_FOR_COMPARE_AND_SWAP
637 #if defined HAS_COMPARE_AND_SWAP
638 wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
640 restart(p_max_prio->thr);
645 #if defined TEST_FOR_COMPARE_AND_SWAP
646 if (!__pthread_has_cas)
648 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
650 __pthread_release(&lock->__spinlock);
656 /* Compare-and-swap emulation with a spinlock */
658 #ifdef TEST_FOR_COMPARE_AND_SWAP
659 int __pthread_has_cas = 0;
662 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
664 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
669 __pthread_acquire(spinlock);
671 if (*ptr == oldval) {
672 *ptr = newval; res = 1;
677 __pthread_release(spinlock);
684 /* The retry strategy is as follows:
685 - We test and set the spinlock MAX_SPIN_COUNT times, calling
686 sched_yield() each time. This gives ample opportunity for other
687 threads with priority >= our priority to make progress and
688 release the spinlock.
689 - If a thread with priority < our priority owns the spinlock,
690 calling sched_yield() repeatedly is useless, since we're preventing
691 the owning thread from making progress and releasing the spinlock.
692 So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
693 using nanosleep(). This again should give time to the owning thread
694 for releasing the spinlock.
695 Notice that the nanosleep() interval must not be too small,
696 since the kernel does busy-waiting for short intervals in a realtime
697 process (!). The smallest duration that guarantees thread
698 suspension is currently 2ms.
699 - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
700 sched_yield(), then sleeping again if needed. */
702 static void __pthread_acquire(int * spinlock)
707 READ_MEMORY_BARRIER();
709 while (testandset(spinlock)) {
710 if (cnt < MAX_SPIN_COUNT) {
715 tm.tv_nsec = SPIN_SLEEP_DURATION;
716 nanosleep(&tm, NULL);