]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/uclibc/lib/contrib/uclibc/libpthread/nptl/pthread_mutex_trylock.c
update
[l4.git] / l4 / pkg / uclibc / lib / contrib / uclibc / libpthread / nptl / pthread_mutex_trylock.c
1 /* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, write to the Free
17    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18    02111-1307 USA.  */
19
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdlib.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25
26
27 int
28 __pthread_mutex_trylock (
29      pthread_mutex_t *mutex)
30 {
31   int oldval;
32   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
33
34   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
35                             PTHREAD_MUTEX_TIMED_NP))
36     {
37       /* Recursive mutex.  */
38     case PTHREAD_MUTEX_RECURSIVE_NP:
39       /* Check whether we already hold the mutex.  */
40       if (mutex->__data.__owner == id)
41         {
42           /* Just bump the counter.  */
43           if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
44             /* Overflow of the counter.  */
45             return EAGAIN;
46
47           ++mutex->__data.__count;
48           return 0;
49         }
50
51       if (lll_trylock (mutex->__data.__lock) == 0)
52         {
53           /* Record the ownership.  */
54           mutex->__data.__owner = id;
55           mutex->__data.__count = 1;
56           ++mutex->__data.__nusers;
57           return 0;
58         }
59       break;
60
61     case PTHREAD_MUTEX_ERRORCHECK_NP:
62     case PTHREAD_MUTEX_TIMED_NP:
63     case PTHREAD_MUTEX_ADAPTIVE_NP:
64       /* Normal mutex.  */
65       if (lll_trylock (mutex->__data.__lock) != 0)
66         break;
67
68       /* Record the ownership.  */
69       mutex->__data.__owner = id;
70       ++mutex->__data.__nusers;
71
72       return 0;
73
74     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
75     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
76     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
77     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
78       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
79                      &mutex->__data.__list.__next);
80
81       oldval = mutex->__data.__lock;
82       do
83         {
84         again:
85           if ((oldval & FUTEX_OWNER_DIED) != 0)
86             {
87               /* The previous owner died.  Try locking the mutex.  */
88               int newval = id | (oldval & FUTEX_WAITERS);
89
90               newval
91                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
92                                                        newval, oldval);
93
94               if (newval != oldval)
95                 {
96                   oldval = newval;
97                   goto again;
98                 }
99
100               /* We got the mutex.  */
101               mutex->__data.__count = 1;
102               /* But it is inconsistent unless marked otherwise.  */
103               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
104
105               ENQUEUE_MUTEX (mutex);
106               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
107
108               /* Note that we deliberately exist here.  If we fall
109                  through to the end of the function __nusers would be
110                  incremented which is not correct because the old
111                  owner has to be discounted.  */
112               return EOWNERDEAD;
113             }
114
115           /* Check whether we already hold the mutex.  */
116           if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
117             {
118               int kind = PTHREAD_MUTEX_TYPE (mutex);
119               if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
120                 {
121                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
122                                  NULL);
123                   return EDEADLK;
124                 }
125
126               if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
127                 {
128                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
129                                  NULL);
130
131                   /* Just bump the counter.  */
132                   if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
133                     /* Overflow of the counter.  */
134                     return EAGAIN;
135
136                   ++mutex->__data.__count;
137
138                   return 0;
139                 }
140             }
141
142           oldval = lll_robust_trylock (mutex->__data.__lock, id);
143           if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
144             {
145               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
146
147               return EBUSY;
148             }
149
150           if (__builtin_expect (mutex->__data.__owner
151                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
152             {
153               /* This mutex is now not recoverable.  */
154               mutex->__data.__count = 0;
155               if (oldval == id)
156                 lll_unlock (mutex->__data.__lock,
157                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
158               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
159               return ENOTRECOVERABLE;
160             }
161         }
162       while ((oldval & FUTEX_OWNER_DIED) != 0);
163
164       ENQUEUE_MUTEX (mutex);
165       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
166
167       mutex->__data.__owner = id;
168       ++mutex->__data.__nusers;
169       mutex->__data.__count = 1;
170
171       return 0;
172
173     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
174     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
175     case PTHREAD_MUTEX_PI_NORMAL_NP:
176     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
177     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
178     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
179     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
180     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
181       {
182         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
183         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
184
185         if (robust)
186           /* Note: robust PI futexes are signaled by setting bit 0.  */
187           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
188                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
189                                    | 1));
190
191         oldval = mutex->__data.__lock;
192
193         /* Check whether we already hold the mutex.  */
194         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
195           {
196             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
197               {
198                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199                 return EDEADLK;
200               }
201
202             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
203               {
204                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
205
206                 /* Just bump the counter.  */
207                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
208                   /* Overflow of the counter.  */
209                   return EAGAIN;
210
211                 ++mutex->__data.__count;
212
213                 return 0;
214               }
215           }
216
217         oldval
218           = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
219                                                  id, 0);
220
221         if (oldval != 0)
222           {
223             if ((oldval & FUTEX_OWNER_DIED) == 0)
224               {
225                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
226
227                 return EBUSY;
228               }
229
230             assert (robust);
231
232             /* The mutex owner died.  The kernel will now take care of
233                everything.  */
234             int private = (robust
235                            ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
236                            : PTHREAD_MUTEX_PSHARED (mutex));
237             INTERNAL_SYSCALL_DECL (__err);
238             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
239                                       __lll_private_flag (FUTEX_TRYLOCK_PI,
240                                                           private), 0, 0);
241
242             if (INTERNAL_SYSCALL_ERROR_P (e, __err)
243                 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
244               {
245                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
246
247                 return EBUSY;
248               }
249
250             oldval = mutex->__data.__lock;
251           }
252
253         if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
254           {
255             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
256
257             /* We got the mutex.  */
258             mutex->__data.__count = 1;
259             /* But it is inconsistent unless marked otherwise.  */
260             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
261
262             ENQUEUE_MUTEX (mutex);
263             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
264
265             /* Note that we deliberately exit here.  If we fall
266                through to the end of the function __nusers would be
267                incremented which is not correct because the old owner
268                has to be discounted.  */
269             return EOWNERDEAD;
270           }
271
272         if (robust
273             && __builtin_expect (mutex->__data.__owner
274                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
275           {
276             /* This mutex is now not recoverable.  */
277             mutex->__data.__count = 0;
278
279             INTERNAL_SYSCALL_DECL (__err);
280             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
281                               __lll_private_flag (FUTEX_UNLOCK_PI,
282                                                   PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
283                               0, 0);
284
285             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
286             return ENOTRECOVERABLE;
287           }
288
289         if (robust)
290           {
291             ENQUEUE_MUTEX_PI (mutex);
292             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
293           }
294
295         mutex->__data.__owner = id;
296         ++mutex->__data.__nusers;
297         mutex->__data.__count = 1;
298
299         return 0;
300       }
301
302     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
303     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
304     case PTHREAD_MUTEX_PP_NORMAL_NP:
305     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
306       {
307         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
308
309         oldval = mutex->__data.__lock;
310
311         /* Check whether we already hold the mutex.  */
312         if (mutex->__data.__owner == id)
313           {
314             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
315               return EDEADLK;
316
317             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
318               {
319                 /* Just bump the counter.  */
320                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
321                   /* Overflow of the counter.  */
322                   return EAGAIN;
323
324                 ++mutex->__data.__count;
325
326                 return 0;
327               }
328           }
329
330         int oldprio = -1, ceilval;
331         do
332           {
333             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
334                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
335
336             if (__pthread_current_priority () > ceiling)
337               {
338                 if (oldprio != -1)
339                   __pthread_tpp_change_priority (oldprio, -1);
340                 return EINVAL;
341               }
342
343             int retval = __pthread_tpp_change_priority (oldprio, ceiling);
344             if (retval)
345               return retval;
346
347             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
348             oldprio = ceiling;
349
350             oldval
351               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
352                                                      ceilval | 1, ceilval);
353
354             if (oldval == ceilval)
355               break;
356           }
357         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
358
359         if (oldval != ceilval)
360           {
361             __pthread_tpp_change_priority (oldprio, -1);
362             break;
363           }
364
365         assert (mutex->__data.__owner == 0);
366         /* Record the ownership.  */
367         mutex->__data.__owner = id;
368         ++mutex->__data.__nusers;
369         mutex->__data.__count = 1;
370
371         return 0;
372       }
373       break;
374
375     default:
376       /* Correct code cannot set any other type.  */
377       return EINVAL;
378     }
379
380   return EBUSY;
381 }
382 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)