1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18 #include <assert.h>
19 #include <errno.h>
20 #include <stdlib.h>
21 #include <unistd.h>
22 #include <sys/param.h>
23 #include <not-cancel.h>
24 #include "pthreadP.h"
25 #include <atomic.h>
26 #include <futex-internal.h>
27 #include <stap-probe.h>
28 #include <shlib-compat.h>
29
30 /* Some of the following definitions differ when pthread_mutex_cond_lock.c
31 includes this file. */
32 #ifndef LLL_MUTEX_LOCK
33 /* lll_lock with single-thread optimization. */
34 static inline void
lll_mutex_lock_optimized(pthread_mutex_t * mutex)35 lll_mutex_lock_optimized (pthread_mutex_t *mutex)
36 {
37 /* The single-threaded optimization is only valid for private
38 mutexes. For process-shared mutexes, the mutex could be in a
39 shared mapping, so synchronization with another process is needed
40 even without any threads. If the lock is already marked as
41 acquired, POSIX requires that pthread_mutex_lock deadlocks for
42 normal mutexes, so skip the optimization in that case as
43 well. */
44 int private = PTHREAD_MUTEX_PSHARED (mutex);
45 if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0)
46 mutex->__data.__lock = 1;
47 else
48 lll_lock (mutex->__data.__lock, private);
49 }
50
51 # define LLL_MUTEX_LOCK(mutex) \
52 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
53 # define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
54 # define LLL_MUTEX_TRYLOCK(mutex) \
55 lll_trylock ((mutex)->__data.__lock)
56 # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
57 # define LLL_MUTEX_LOCK_ELISION(mutex) \
58 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
59 PTHREAD_MUTEX_PSHARED (mutex))
60 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
61 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
62 PTHREAD_MUTEX_PSHARED (mutex))
63 # define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
64 # define PTHREAD_MUTEX_VERSIONS 1
65 #endif
66
67 #ifndef LLL_MUTEX_READ_LOCK
68 # define LLL_MUTEX_READ_LOCK(mutex) \
69 atomic_load_relaxed (&(mutex)->__data.__lock)
70 #endif
71
72 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
73 __attribute_noinline__;
74
75 int
PTHREAD_MUTEX_LOCK(pthread_mutex_t * mutex)76 PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
77 {
78 /* See concurrency notes regarding mutex type which is loaded from __kind
79 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
80 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
81
82 LIBC_PROBE (mutex_entry, 1, mutex);
83
84 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
85 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
86 return __pthread_mutex_lock_full (mutex);
87
88 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
89 {
90 FORCE_ELISION (mutex, goto elision);
91 simple:
92 /* Normal mutex. */
93 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
94 assert (mutex->__data.__owner == 0);
95 }
96 #if ENABLE_ELISION_SUPPORT
97 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
98 {
99 elision: __attribute__((unused))
100 /* This case can never happen on a system without elision,
101 as the mutex type initialization functions will not
102 allow to set the elision flags. */
103 /* Don't record owner or users for elision case. This is a
104 tail call. */
105 return LLL_MUTEX_LOCK_ELISION (mutex);
106 }
107 #endif
108 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
109 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
110 {
111 /* Recursive mutex. */
112 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
113
114 /* Check whether we already hold the mutex. */
115 if (mutex->__data.__owner == id)
116 {
117 /* Just bump the counter. */
118 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
119 /* Overflow of the counter. */
120 return EAGAIN;
121
122 ++mutex->__data.__count;
123
124 return 0;
125 }
126
127 /* We have to get the mutex. */
128 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
129
130 assert (mutex->__data.__owner == 0);
131 mutex->__data.__count = 1;
132 }
133 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
134 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
135 {
136 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
137 {
138 int cnt = 0;
139 int max_cnt = MIN (max_adaptive_count (),
140 mutex->__data.__spins * 2 + 10);
141 do
142 {
143 if (cnt++ >= max_cnt)
144 {
145 LLL_MUTEX_LOCK (mutex);
146 break;
147 }
148 atomic_spin_nop ();
149 if (LLL_MUTEX_READ_LOCK (mutex) != 0)
150 continue;
151 }
152 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
153
154 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
155 }
156 assert (mutex->__data.__owner == 0);
157 }
158 else
159 {
160 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
161 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
162 /* Check whether we already hold the mutex. */
163 if (__glibc_unlikely (mutex->__data.__owner == id))
164 return EDEADLK;
165 goto simple;
166 }
167
168 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
169
170 /* Record the ownership. */
171 mutex->__data.__owner = id;
172 #ifndef NO_INCR
173 ++mutex->__data.__nusers;
174 #endif
175
176 LIBC_PROBE (mutex_acquired, 1, mutex);
177
178 return 0;
179 }
180
181 static int
__pthread_mutex_lock_full(pthread_mutex_t * mutex)182 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
183 {
184 int oldval;
185 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
186
187 switch (PTHREAD_MUTEX_TYPE (mutex))
188 {
189 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
190 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
191 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
192 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
193 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
194 &mutex->__data.__list.__next);
195 /* We need to set op_pending before starting the operation. Also
196 see comments at ENQUEUE_MUTEX. */
197 __asm ("" ::: "memory");
198
199 oldval = mutex->__data.__lock;
200 /* This is set to FUTEX_WAITERS iff we might have shared the
201 FUTEX_WAITERS flag with other threads, and therefore need to keep it
202 set to avoid lost wake-ups. We have the same requirement in the
203 simple mutex algorithm.
204 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
205 are building the special case mutexes for use from within condition
206 variables. */
207 unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
208 while (1)
209 {
210 /* Try to acquire the lock through a CAS from 0 (not acquired) to
211 our TID | assume_other_futex_waiters. */
212 if (__glibc_likely (oldval == 0))
213 {
214 oldval
215 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
216 id | assume_other_futex_waiters, 0);
217 if (__glibc_likely (oldval == 0))
218 break;
219 }
220
221 if ((oldval & FUTEX_OWNER_DIED) != 0)
222 {
223 /* The previous owner died. Try locking the mutex. */
224 int newval = id;
225 #ifdef NO_INCR
226 /* We are not taking assume_other_futex_waiters into accoount
227 here simply because we'll set FUTEX_WAITERS anyway. */
228 newval |= FUTEX_WAITERS;
229 #else
230 newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
231 #endif
232
233 newval
234 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
235 newval, oldval);
236
237 if (newval != oldval)
238 {
239 oldval = newval;
240 continue;
241 }
242
243 /* We got the mutex. */
244 mutex->__data.__count = 1;
245 /* But it is inconsistent unless marked otherwise. */
246 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
247
248 /* We must not enqueue the mutex before we have acquired it.
249 Also see comments at ENQUEUE_MUTEX. */
250 __asm ("" ::: "memory");
251 ENQUEUE_MUTEX (mutex);
252 /* We need to clear op_pending after we enqueue the mutex. */
253 __asm ("" ::: "memory");
254 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
255
256 /* Note that we deliberately exit here. If we fall
257 through to the end of the function __nusers would be
258 incremented which is not correct because the old
259 owner has to be discounted. If we are not supposed
260 to increment __nusers we actually have to decrement
261 it here. */
262 #ifdef NO_INCR
263 --mutex->__data.__nusers;
264 #endif
265
266 return EOWNERDEAD;
267 }
268
269 /* Check whether we already hold the mutex. */
270 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
271 {
272 int kind = PTHREAD_MUTEX_TYPE (mutex);
273 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
274 {
275 /* We do not need to ensure ordering wrt another memory
276 access. Also see comments at ENQUEUE_MUTEX. */
277 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
278 NULL);
279 return EDEADLK;
280 }
281
282 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
283 {
284 /* We do not need to ensure ordering wrt another memory
285 access. */
286 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
287 NULL);
288
289 /* Just bump the counter. */
290 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
291 /* Overflow of the counter. */
292 return EAGAIN;
293
294 ++mutex->__data.__count;
295
296 return 0;
297 }
298 }
299
300 /* We cannot acquire the mutex nor has its owner died. Thus, try
301 to block using futexes. Set FUTEX_WAITERS if necessary so that
302 other threads are aware that there are potentially threads
303 blocked on the futex. Restart if oldval changed in the
304 meantime. */
305 if ((oldval & FUTEX_WAITERS) == 0)
306 {
307 int val = atomic_compare_and_exchange_val_acq
308 (&mutex->__data.__lock, oldval | FUTEX_WAITERS, oldval);
309 if (val != oldval)
310 {
311 oldval = val;
312 continue;
313 }
314 oldval |= FUTEX_WAITERS;
315 }
316
317 /* It is now possible that we share the FUTEX_WAITERS flag with
318 another thread; therefore, update assume_other_futex_waiters so
319 that we do not forget about this when handling other cases
320 above and thus do not cause lost wake-ups. */
321 assume_other_futex_waiters |= FUTEX_WAITERS;
322
323 /* Block using the futex and reload current lock value. */
324 futex_wait ((unsigned int *) &mutex->__data.__lock, oldval,
325 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
326 oldval = mutex->__data.__lock;
327 }
328
329 /* We have acquired the mutex; check if it is still consistent. */
330 if (__builtin_expect (mutex->__data.__owner
331 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
332 {
333 /* This mutex is now not recoverable. */
334 mutex->__data.__count = 0;
335 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
336 lll_unlock (mutex->__data.__lock, private);
337 /* FIXME This violates the mutex destruction requirements. See
338 __pthread_mutex_unlock_full. */
339 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
340 return ENOTRECOVERABLE;
341 }
342
343 mutex->__data.__count = 1;
344 /* We must not enqueue the mutex before we have acquired it.
345 Also see comments at ENQUEUE_MUTEX. */
346 __asm ("" ::: "memory");
347 ENQUEUE_MUTEX (mutex);
348 /* We need to clear op_pending after we enqueue the mutex. */
349 __asm ("" ::: "memory");
350 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351 break;
352
353 /* The PI support requires the Linux futex system call. If that's not
354 available, pthread_mutex_init should never have allowed the type to
355 be set. So it will get the default case for an invalid type. */
356 #ifdef __NR_futex
357 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
358 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
359 case PTHREAD_MUTEX_PI_NORMAL_NP:
360 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
361 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
362 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
363 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
364 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
365 {
366 int kind, robust;
367 {
368 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
369 in sysdeps/nptl/bits/thread-shared-types.h. */
370 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
371 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
372 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
373 }
374
375 if (robust)
376 {
377 /* Note: robust PI futexes are signaled by setting bit 0. */
378 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
379 (void *) (((uintptr_t) &mutex->__data.__list.__next)
380 | 1));
381 /* We need to set op_pending before starting the operation. Also
382 see comments at ENQUEUE_MUTEX. */
383 __asm ("" ::: "memory");
384 }
385
386 oldval = mutex->__data.__lock;
387
388 /* Check whether we already hold the mutex. */
389 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
390 {
391 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
392 {
393 /* We do not need to ensure ordering wrt another memory
394 access. */
395 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
396 return EDEADLK;
397 }
398
399 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
400 {
401 /* We do not need to ensure ordering wrt another memory
402 access. */
403 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
404
405 /* Just bump the counter. */
406 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
407 /* Overflow of the counter. */
408 return EAGAIN;
409
410 ++mutex->__data.__count;
411
412 return 0;
413 }
414 }
415
416 int newval = id;
417 # ifdef NO_INCR
418 newval |= FUTEX_WAITERS;
419 # endif
420 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
421 newval, 0);
422
423 if (oldval != 0)
424 {
425 /* The mutex is locked. The kernel will now take care of
426 everything. */
427 int private = (robust
428 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
429 : PTHREAD_MUTEX_PSHARED (mutex));
430 int e = __futex_lock_pi64 (&mutex->__data.__lock, 0 /* ununsed */,
431 NULL, private);
432 if (e == ESRCH || e == EDEADLK)
433 {
434 assert (e != EDEADLK
435 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
436 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
437 /* ESRCH can happen only for non-robust PI mutexes where
438 the owner of the lock died. */
439 assert (e != ESRCH || !robust);
440
441 /* Delay the thread indefinitely. */
442 while (1)
443 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
444 0 /* ignored */, NULL, private);
445 }
446
447 oldval = mutex->__data.__lock;
448
449 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
450 }
451
452 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
453 {
454 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
455
456 /* We got the mutex. */
457 mutex->__data.__count = 1;
458 /* But it is inconsistent unless marked otherwise. */
459 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
460
461 /* We must not enqueue the mutex before we have acquired it.
462 Also see comments at ENQUEUE_MUTEX. */
463 __asm ("" ::: "memory");
464 ENQUEUE_MUTEX_PI (mutex);
465 /* We need to clear op_pending after we enqueue the mutex. */
466 __asm ("" ::: "memory");
467 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
468
469 /* Note that we deliberately exit here. If we fall
470 through to the end of the function __nusers would be
471 incremented which is not correct because the old owner
472 has to be discounted. If we are not supposed to
473 increment __nusers we actually have to decrement it here. */
474 # ifdef NO_INCR
475 --mutex->__data.__nusers;
476 # endif
477
478 return EOWNERDEAD;
479 }
480
481 if (robust
482 && __builtin_expect (mutex->__data.__owner
483 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
484 {
485 /* This mutex is now not recoverable. */
486 mutex->__data.__count = 0;
487
488 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
489 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
490
491 /* To the kernel, this will be visible after the kernel has
492 acquired the mutex in the syscall. */
493 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
494 return ENOTRECOVERABLE;
495 }
496
497 mutex->__data.__count = 1;
498 if (robust)
499 {
500 /* We must not enqueue the mutex before we have acquired it.
501 Also see comments at ENQUEUE_MUTEX. */
502 __asm ("" ::: "memory");
503 ENQUEUE_MUTEX_PI (mutex);
504 /* We need to clear op_pending after we enqueue the mutex. */
505 __asm ("" ::: "memory");
506 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
507 }
508 }
509 break;
510 #endif /* __NR_futex. */
511
512 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
513 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
514 case PTHREAD_MUTEX_PP_NORMAL_NP:
515 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
516 {
517 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
518 in sysdeps/nptl/bits/thread-shared-types.h. */
519 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
520 & PTHREAD_MUTEX_KIND_MASK_NP;
521
522 oldval = mutex->__data.__lock;
523
524 /* Check whether we already hold the mutex. */
525 if (mutex->__data.__owner == id)
526 {
527 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
528 return EDEADLK;
529
530 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
531 {
532 /* Just bump the counter. */
533 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
534 /* Overflow of the counter. */
535 return EAGAIN;
536
537 ++mutex->__data.__count;
538
539 return 0;
540 }
541 }
542
543 int oldprio = -1, ceilval;
544 do
545 {
546 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
547 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
548
549 if (__pthread_current_priority () > ceiling)
550 {
551 if (oldprio != -1)
552 __pthread_tpp_change_priority (oldprio, -1);
553 return EINVAL;
554 }
555
556 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
557 if (retval)
558 return retval;
559
560 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
561 oldprio = ceiling;
562
563 oldval
564 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
565 #ifdef NO_INCR
566 ceilval | 2,
567 #else
568 ceilval | 1,
569 #endif
570 ceilval);
571
572 if (oldval == ceilval)
573 break;
574
575 do
576 {
577 oldval
578 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
579 ceilval | 2,
580 ceilval | 1);
581
582 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
583 break;
584
585 if (oldval != ceilval)
586 futex_wait ((unsigned int * ) &mutex->__data.__lock,
587 ceilval | 2,
588 PTHREAD_MUTEX_PSHARED (mutex));
589 }
590 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
591 ceilval | 2, ceilval)
592 != ceilval);
593 }
594 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
595
596 assert (mutex->__data.__owner == 0);
597 mutex->__data.__count = 1;
598 }
599 break;
600
601 default:
602 /* Correct code cannot set any other type. */
603 return EINVAL;
604 }
605
606 /* Record the ownership. */
607 mutex->__data.__owner = id;
608 #ifndef NO_INCR
609 ++mutex->__data.__nusers;
610 #endif
611
612 LIBC_PROBE (mutex_acquired, 1, mutex);
613
614 return 0;
615 }
616
617 #if PTHREAD_MUTEX_VERSIONS
618 libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock)
619 # ifndef SHARED
620 strong_alias (___pthread_mutex_lock, __pthread_mutex_lock)
621 # endif
622 versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock,
623 GLIBC_2_0);
624
625 # if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
626 compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock,
627 GLIBC_2_0);
628 # endif
629 #endif /* PTHREAD_MUTEX_VERSIONS */
630
631
632 #ifdef NO_INCR
633 void
__pthread_mutex_cond_lock_adjust(pthread_mutex_t * mutex)634 __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
635 {
636 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
637 in sysdeps/nptl/bits/thread-shared-types.h. */
638 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
639 assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
640 assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
641 assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
642
643 /* Record the ownership. */
644 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
645 mutex->__data.__owner = id;
646
647 if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
648 ++mutex->__data.__count;
649 }
650 #endif
651