1
2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
15
16 /* Thread creation, initialization, and basic low-level routines */
17
18 #include <errno.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/wait.h>
26 #include <sys/resource.h>
27 #include <sys/time.h>
28
29 #include <l4/re/env.h>
30 #include <l4/sys/task.h>
31 #include <l4/util/util.h>
32 #include <l4/re/consts.h>
33
34 #include "pthread.h"
35 #include "internals.h"
36 #include "spinlock.h"
37 #include "restart.h"
38 #include "smp.h"
39 #include <not-cancel.h>
40 #include <link.h>
41 #include <ldsodefs.h>
42
43 /* Sanity check. */
44 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
45 # error "This must not happen"
46 #endif
47
48 /* mods for uClibc: __libc_sigaction is not in any standard headers */
49 extern __typeof(sigaction) __libc_sigaction;
50
51 #if !(USE_TLS && HAVE___THREAD)
52 /* These variables are used by the setup code. */
53 extern int _errno;
54 extern int _h_errno;
55
56 # if defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__
57 /* We need the global/static resolver state here. */
58 # include <resolv.h>
59 # undef _res
60 extern struct __res_state *__resp;
61 # endif
62 #endif
63
64 #ifdef USE_TLS
65
66 /* We need only a few variables. */
67 #define manager_thread __pthread_manager_threadp
68 pthread_descr __pthread_manager_threadp attribute_hidden;
69
70 #else
71
72 /* Descriptor of the initial thread */
73
74 struct _pthread_descr_struct __pthread_initial_thread = {
75 .p_header.data.self = &__pthread_initial_thread,
76 .p_nextlive = &__pthread_initial_thread,
77 .p_prevlive = &__pthread_initial_thread,
78 #ifdef NOT_FOR_L4
79 .p_tid = PTHREAD_THREADS_MAX,
80 .p_lock = &__pthread_handles[0].h_lock,
81 #endif
82 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
83 #if !(USE_TLS && HAVE___THREAD)
84 .p_errnop = &_errno,
85 .p_h_errnop = &_h_errno,
86 #endif
87 .p_userstack = 1,
88 .p_resume_count = __ATOMIC_INITIALIZER,
89 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
90 };
91
92 /* Descriptor of the manager thread; none of this is used but the error
93 variables, the p_pid and p_priority fields,
94 and the address for identification. */
95
96 #define manager_thread (&__pthread_manager_thread)
97 struct _pthread_descr_struct __pthread_manager_thread = {
98 .p_header.data.self = &__pthread_manager_thread,
99 .p_header.data.multiple_threads = 1,
100 #ifdef NOT_FOR_L4
101 .p_lock = &__pthread_handles[1].h_lock,
102 #endif
103 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
104 #if !(USE_TLS && HAVE___THREAD)
105 .p_errnop = &__pthread_manager_thread.p_errno,
106 #endif
107 #ifdef NOT_FOR_L4
108 .p_nr = 1,
109 #endif
110 .p_resume_count = __ATOMIC_INITIALIZER,
111 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
112 };
113 #endif
114
115 /* Pointer to the main thread (the father of the thread manager thread) */
116 /* Originally, this is the initial thread, but this changes after fork() */
117
118 #ifdef USE_TLS
119 pthread_descr __pthread_main_thread;
120 #else
121 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
122 #endif
123
124 /* Limit between the stack of the initial thread (above) and the
125 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
126
127 char *__pthread_initial_thread_bos;
128
129 /* File descriptor for sending requests to the thread manager. */
130 /* Initially -1, meaning that the thread manager is not running. */
131
132 l4_cap_idx_t __pthread_manager_request = L4_INVALID_CAP;
133
134 int __pthread_multiple_threads attribute_hidden;
135
136 /* Other end of the pipe for sending requests to the thread manager. */
137
138 int __pthread_manager_reader;
139
140 /* Limits of the thread manager stack */
141
142 char *__pthread_manager_thread_bos;
143 char *__pthread_manager_thread_tos;
144
145 /* For process-wide exit() */
146
147 int __pthread_exit_requested;
148 int __pthread_exit_code;
149
150 /* Maximum stack size. */
151 size_t __pthread_max_stacksize;
152
153 /* Nozero if the machine has more than one processor. */
154 int __pthread_smp_kernel;
155
156
157 #if !__ASSUME_REALTIME_SIGNALS
158 /* Pointers that select new or old suspend/resume functions
159 based on availability of rt signals. */
160
161 #ifdef NOT_FOR_L4
162 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
163 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
164 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
165 #endif
166 #endif /* __ASSUME_REALTIME_SIGNALS */
167
168 /* Communicate relevant LinuxThreads constants to gdb */
169
170 #ifdef NOT_FOR_L4
171 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
172 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
173 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
174 h_descr);
175 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
176 p_pid);
177 #endif
178 const int __linuxthreads_pthread_sizeof_descr
179 = sizeof(struct pthread);
180
181 const int __linuxthreads_initial_report_events;
182
183 const char __linuxthreads_version[] = VERSION;
184
185 /* Forward declarations */
186
187 static void pthread_onexit_process(int retcode, void *arg);
188 #ifndef HAVE_Z_NODELETE
189 static void pthread_atexit_process(void *arg, int retcode);
190 static void pthread_atexit_retcode(void *arg, int retcode);
191 #endif
192 #ifdef NOT_FOR_L4
193 static void pthread_handle_sigcancel(int sig);
194 static void pthread_handle_sigrestart(int sig);
195 static void pthread_handle_sigdebug(int sig);
196 #endif
197
198 /* Signal numbers used for the communication.
199 In these variables we keep track of the used variables. If the
200 platform does not support any real-time signals we will define the
201 values to some unreasonable value which will signal failing of all
202 the functions below. */
203 int __pthread_sig_restart = __SIGRTMIN;
204 int __pthread_sig_cancel = __SIGRTMIN + 1;
205 int __pthread_sig_debug = __SIGRTMIN + 2;
206
207 extern int __libc_current_sigrtmin_private (void);
208
209 #ifdef NOT_FOR_L4
210 #if !__ASSUME_REALTIME_SIGNALS
211 static int rtsigs_initialized;
212
213 static void
init_rtsigs(void)214 init_rtsigs (void)
215 {
216 if (rtsigs_initialized)
217 return;
218
219 if (__libc_current_sigrtmin_private () == -1)
220 {
221 __pthread_sig_restart = SIGUSR1;
222 __pthread_sig_cancel = SIGUSR2;
223 __pthread_sig_debug = 0;
224 }
225 else
226 {
227 __pthread_restart = __pthread_restart_new;
228 __pthread_suspend = __pthread_wait_for_restart_signal;
229 __pthread_timedsuspend = __pthread_timedsuspend_new;
230 }
231
232 rtsigs_initialized = 1;
233 }
234 #endif
235 #endif
236
237
238 /* Initialize the pthread library.
239 Initialization is split in two functions:
240 - a constructor function that blocks the __pthread_sig_restart signal
241 (must do this very early, since the program could capture the signal
242 mask with e.g. sigsetjmp before creating the first thread);
243 - a regular function called from pthread_create when needed. */
244
245 static void pthread_initialize(void) __attribute__((constructor));
246
247 #ifndef HAVE_Z_NODELETE
248 extern void *__dso_handle __attribute__ ((weak));
249 #endif
250
251
252 #if defined USE_TLS && !defined SHARED
253 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
254 #endif
255
256 struct pthread_functions __pthread_functions =
257 {
258 #if !(USE_TLS && HAVE___THREAD)
259 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
260 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
261 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
262 #endif
263 #ifdef __NOT_FOR_L4__
264 .ptr_pthread_fork = __pthread_fork,
265 #else
266 .ptr_pthread_fork = NULL,
267 #endif
268 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
269 .ptr_pthread_attr_init = __pthread_attr_init,
270 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
271 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
272 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
273 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
274 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
275 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
276 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
277 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
278 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
279 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
280 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
281 .ptr_pthread_condattr_init = __pthread_condattr_init,
282 .ptr_pthread_cond_broadcast = __pthread_cond_broadcast,
283 .ptr_pthread_cond_destroy = __pthread_cond_destroy,
284 .ptr_pthread_cond_init = __pthread_cond_init,
285 .ptr_pthread_cond_signal = __pthread_cond_signal,
286 .ptr_pthread_cond_wait = __pthread_cond_wait,
287 .ptr_pthread_cond_timedwait = __pthread_cond_timedwait,
288 .ptr_pthread_equal = __pthread_equal,
289 .ptr___pthread_exit = __pthread_exit,
290 .ptr_pthread_getschedparam = __pthread_getschedparam,
291 .ptr_pthread_setschedparam = __pthread_setschedparam,
292 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
293 .ptr_pthread_mutex_init = __pthread_mutex_init,
294 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
295 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
296 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
297 .ptr_pthread_self = __pthread_self,
298 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
299 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
300 .ptr_pthread_do_exit = __pthread_do_exit,
301 .ptr_pthread_thread_self = __pthread_thread_self,
302 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
303 #ifdef __NOT_FOR_L4__
304 .ptr_pthread_sigaction = __pthread_sigaction,
305 .ptr_pthread_sigwait = __pthread_sigwait,
306 .ptr_pthread_raise = __pthread_raise,
307 #else
308 .ptr_pthread_sigaction = NULL,
309 .ptr_pthread_sigwait = NULL,
310 .ptr_pthread_raise = NULL,
311 #endif
312 .ptr__pthread_cleanup_push = __pthread_cleanup_push,
313 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
314 .ptr__pthread_cleanup_pop = __pthread_cleanup_pop,
315 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
316 };
317 #ifdef SHARED
318 # define ptr_pthread_functions &__pthread_functions
319 #else
320 # define ptr_pthread_functions NULL
321 #endif
322
323 static int *__libc_multiple_threads_ptr;
324 l4_utcb_t *__pthread_first_free_handle attribute_hidden;
325
326 void
__l4_add_utcbs(l4_addr_t start,l4_addr_t utcbs_end)327 __l4_add_utcbs(l4_addr_t start, l4_addr_t utcbs_end)
328 {
329 l4_addr_t free_utcb = start;
330
331 l4_utcb_t **last_free = &__pthread_first_free_handle;
332 while ((l4_addr_t)free_utcb + L4_UTCB_OFFSET <= utcbs_end)
333 {
334 l4_utcb_t *u = (l4_utcb_t*)free_utcb;
335 l4_thread_regs_t *tcr = l4_utcb_tcr_u(u);
336 tcr->user[0] = 0;
337 __pthread_init_lock(handle_to_lock(u));
338 *last_free = u;
339 last_free = (l4_utcb_t**)(&tcr->user[0]);
340 free_utcb += L4_UTCB_OFFSET;
341 }
342
343 }
344
345 /* Do some minimal initialization which has to be done during the
346 startup of the C library. */
347 void
__pthread_initialize_minimal(void)348 __pthread_initialize_minimal(void)
349 {
350 static int initialized;
351 if (initialized)
352 return;
353
354 initialized = 1;
355
356 /* initialize free list */
357 l4_fpage_t utcb_area = l4re_env()->utcb_area;
358 l4_addr_t free_utcb = l4re_env()->first_free_utcb;
359 l4_addr_t utcbs_end =
360 l4_fpage_memaddr(utcb_area) + (1UL << (l4_addr_t)l4_fpage_size(utcb_area));
361 __l4_add_utcbs(free_utcb, utcbs_end);
362 /* All in the free pool now so indicate that first_free_utcb not available
363 * anymore */
364 l4re_env()->first_free_utcb = ~0UL;
365
366 __pthread_init_lock(handle_to_lock(l4_utcb()));
367
368 #ifdef USE_TLS
369 pthread_descr self;
370
371 /* First of all init __pthread_handles[0] and [1] if needed. */
372 # if __LT_SPINLOCK_INIT != 0
373 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
374 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
375 # endif
376 # ifndef SHARED
377 /* Unlike in the dynamically linked case the dynamic linker has not
378 taken care of initializing the TLS data structures. */
379 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
380 # elif !USE___THREAD
381 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
382 {
383 tcbhead_t *tcbp;
384
385 /* There is no actual TLS being used, so the thread register
386 was not initialized in the dynamic linker. */
387
388 /* We need to install special hooks so that the malloc and memalign
389 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
390 malloc initialization that will try to set up its thread state. */
391
392 extern void __libc_malloc_pthread_startup (bool first_time);
393 __libc_malloc_pthread_startup (true);
394
395 if (__builtin_expect (_dl_tls_setup (), 0)
396 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
397 {
398 static const char msg[] = "\
399 cannot allocate TLS data structures for initial thread\n";
400 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
401 msg, sizeof msg - 1));
402 abort ();
403 }
404 const char *lossage = TLS_INIT_TP (tcbp, 0);
405 if (__builtin_expect (lossage != NULL, 0))
406 {
407 static const char msg[] = "cannot set up thread-local storage: ";
408 const char nl = '\n';
409 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
410 msg, sizeof msg - 1));
411 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
412 lossage, strlen (lossage)));
413 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
414 }
415
416 /* Though it was allocated with libc's malloc, that was done without
417 the user's __malloc_hook installed. A later realloc that uses
418 the hooks might not work with that block from the plain malloc.
419 So we record this block as unfreeable just as the dynamic linker
420 does when it allocates the DTV before the libc malloc exists. */
421 GL(dl_initial_dtv) = GET_DTV (tcbp);
422
423 __libc_malloc_pthread_startup (false);
424 }
425 # endif
426
427 self = THREAD_SELF;
428
429 /* The memory for the thread descriptor was allocated elsewhere as
430 part of the TLS allocation. We have to initialize the data
431 structure by hand. This initialization must mirror the struct
432 definition above. */
433 self->p_nextlive = self->p_prevlive = self;
434 #if defined NOT_FOR_L4
435 self->p_tid = PTHREAD_THREADS_MAX;
436 self->p_lock = &__pthread_handles[0].h_lock;
437 #endif
438 # ifndef HAVE___THREAD
439 self->p_errnop = &_errno;
440 self->p_h_errnop = &_h_errno;
441 # endif
442 /* self->p_start_args need not be initialized, it's all zero. */
443 self->p_userstack = 1;
444 # if __LT_SPINLOCK_INIT != 0
445 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
446 # endif
447 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
448
449 /* Another variable which points to the thread descriptor. */
450 __pthread_main_thread = self;
451
452 /* And fill in the pointer the the thread __pthread_handles array. */
453 #ifdef NOT_FOR_L4
454 __pthread_handles[0].h_descr = self;
455 #endif
456
457 #else /* USE_TLS */
458
459 /* First of all init __pthread_handles[0] and [1]. */
460 # if __LT_SPINLOCK_INIT != 0
461 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
462 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
463 # endif
464 #ifdef NOT_FOR_L4
465 __pthread_handles[0].h_descr = &__pthread_initial_thread;
466 __pthread_handles[1].h_descr = &__pthread_manager_thread;
467 #endif
468
469 /* If we have special thread_self processing, initialize that for the
470 main thread now. */
471 # ifdef INIT_THREAD_SELF
472 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
473 # endif
474 #endif
475
476 #if HP_TIMING_AVAIL
477 # ifdef USE_TLS
478 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
479 # else
480 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
481 # endif
482 #endif
483 #ifndef NOT_FOR_L4
484 # ifdef USE_TLS
485 if (__pthread_l4_initialize_main_thread(self))
486 # else
487 if (__pthread_l4_initialize_main_thread(&__pthread_initial_thread))
488 # endif
489 exit(1);
490 #endif
491 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
492 }
493
494
495 void
__pthread_init_max_stacksize(void)496 __pthread_init_max_stacksize(void)
497 {
498 #ifdef NOT_FOR_L4
499 struct rlimit limit;
500 #endif
501 size_t max_stack;
502
503 #ifdef NOT_FOR_L4
504 getrlimit(RLIMIT_STACK, &limit);
505 #ifdef FLOATING_STACKS
506 if (limit.rlim_cur == RLIM_INFINITY)
507 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
508 # ifdef NEED_SEPARATE_REGISTER_STACK
509 max_stack = limit.rlim_cur / 2;
510 # else
511 max_stack = limit.rlim_cur;
512 # endif
513 #else
514 /* Play with the stack size limit to make sure that no stack ever grows
515 beyond STACK_SIZE minus one page (to act as a guard page). */
516 # ifdef NEED_SEPARATE_REGISTER_STACK
517 /* STACK_SIZE bytes hold both the main stack and register backing
518 store. The rlimit value applies to each individually. */
519 max_stack = STACK_SIZE/2 - __getpagesize ();
520 # else
521 max_stack = STACK_SIZE - __getpagesize();
522 # endif
523 if (limit.rlim_cur > max_stack) {
524 limit.rlim_cur = max_stack;
525 setrlimit(RLIMIT_STACK, &limit);
526 }
527 #endif
528 #endif
529
530 // L4
531 max_stack = STACK_SIZE - L4_PAGESIZE;
532
533 __pthread_max_stacksize = max_stack;
534 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
535 {
536 #ifdef USE_TLS
537 pthread_descr self = THREAD_SELF;
538 self->p_alloca_cutoff = max_stack / 4;
539 #else
540 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
541 #endif
542 }
543 }
544
545 /* psm: we do not have any ld.so support yet
546 * remove the USE_TLS guard if nptl is added */
547 #if defined SHARED && defined USE_TLS
548 # if USE___THREAD
549 /* When using __thread for this, we do it in libc so as not
550 to give libpthread its own TLS segment just for this. */
551 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
552 # else
553 static void ** __attribute__ ((const))
__libc_dl_error_tsd(void)554 __libc_dl_error_tsd (void)
555 {
556 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
557 }
558 # endif
559 #endif
560
561 #ifdef USE_TLS
562 static __inline__ void __attribute__((always_inline))
init_one_static_tls(pthread_descr descr,struct link_map * map)563 init_one_static_tls (pthread_descr descr, struct link_map *map)
564 {
565 # if defined(TLS_TCB_AT_TP)
566 dtv_t *dtv = GET_DTV (descr);
567 void *dest = (char *) descr - map->l_tls_offset;
568 # elif defined(TLS_DTV_AT_TP)
569 dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE));
570 void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE;
571 # else
572 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
573 # endif
574
575 /* Fill in the DTV slot so that a later LD/GD access will find it. */
576 dtv[map->l_tls_modid].pointer.val = dest;
577 dtv[map->l_tls_modid].pointer.is_static = true;
578
579 /* Initialize the memory. */
580 memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
581 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
582 }
583
584 static void
__pthread_init_static_tls(struct link_map * map)585 __pthread_init_static_tls (struct link_map *map)
586 {
587 pthread_descr th;
588
589 for (th = __pthread_main_thread->p_nextlive;
590 th != __pthread_main_thread;
591 th = th->p_nextlive)
592 {
593 init_one_static_tls(th, map);
594 }
595 }
596 #endif
597
pthread_initialize(void)598 static void pthread_initialize(void)
599 {
600 #ifdef NOT_USED
601 struct sigaction sa;
602 sigset_t mask;
603 #endif
604
605 /* If already done (e.g. by a constructor called earlier!), bail out */
606 if (__pthread_initial_thread_bos != NULL) return;
607 #ifdef TEST_FOR_COMPARE_AND_SWAP
608 /* Test if compare-and-swap is available */
609 __pthread_has_cas = compare_and_swap_is_available();
610 #endif
611 /* We don't need to know the bottom of the stack. Give the pointer some
612 value to signal that initialization happened. */
613 __pthread_initial_thread_bos = (void *) -1l;
614 #ifdef USE_TLS
615 #ifdef NOT_FOR_L4
616 /* Update the descriptor for the initial thread. */
617 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
618 #endif
619 # if !defined HAVE___THREAD && (defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__)
620 /* Likewise for the resolver state _res. */
621 THREAD_SETMEM (((pthread_descr) NULL), p_resp, __resp);
622 # endif
623 #else
624 #endif
625 /* Register an exit function to kill all other threads. */
626 /* Do it early so that user-registered atexit functions are called
627 before pthread_*exit_process. */
628 #ifndef HAVE_Z_NODELETE
629 if (__builtin_expect (&__dso_handle != NULL, 1))
630 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
631 __dso_handle);
632 else
633 #endif
634 __on_exit (pthread_onexit_process, NULL);
635 /* How many processors. */
636 __pthread_smp_kernel = is_smp_system ();
637
638 /* psm: we do not have any ld.so support yet
639 * remove the USE_TLS guard if nptl is added */
640 #if defined SHARED && defined USE_TLS
641 /* Transfer the old value from the dynamic linker's internal location. */
642 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
643 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
644
645 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
646 keep the lock count from the ld.so implementation. */
647 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
648 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
649 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__m_count;
650 GL(dl_load_lock).mutex.__m_count = 0;
651 while (rtld_lock_count-- > 0)
652 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
653 #endif
654
655 #ifdef USE_TLS
656 GL(dl_init_static_tls) = &__pthread_init_static_tls;
657 #endif
658
659 /* uClibc-specific stdio initialization for threads. */
660 {
661 FILE *fp;
662 _stdio_user_locking = 0; /* 2 if threading not initialized */
663 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
664 if (fp->__user_locking != 1) {
665 fp->__user_locking = 0;
666 }
667 }
668 }
669 }
670
__pthread_initialize(void)671 void __pthread_initialize(void)
672 {
673 pthread_initialize();
674 }
675
__pthread_initialize_manager(void)676 int __pthread_initialize_manager(void)
677 {
678 #ifdef NOT_FOR_L4
679 int manager_pipe[2];
680 int pid;
681 struct pthread_request request;
682 int report_events;
683 #endif
684 pthread_descr mgr;
685 #ifdef USE_TLS
686 tcbhead_t *tcbp;
687 #endif
688
689 __pthread_multiple_threads = 1;
690 __pthread_main_thread->header.multiple_threads = 1;
691 *__libc_multiple_threads_ptr = 1;
692
693 #ifndef HAVE_Z_NODELETE
694 if (__builtin_expect (&__dso_handle != NULL, 1))
695 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
696 __dso_handle);
697 #endif
698
699 if (__pthread_max_stacksize == 0)
700 __pthread_init_max_stacksize ();
701 /* If basic initialization not done yet (e.g. we're called from a
702 constructor run before our constructor), do it now */
703 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
704 /* Setup stack for thread manager */
705 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
706 if (__pthread_manager_thread_bos == NULL)
707 return -1;
708 __pthread_manager_thread_tos =
709 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
710 // L4: force 16-byte stack alignment
711 __pthread_manager_thread_tos =
712 (char *)((uintptr_t)__pthread_manager_thread_tos & ~0xfUL);
713 #ifdef NOT_FOR_L4
714 /* Setup pipe to communicate with thread manager */
715 if (pipe(manager_pipe) == -1) {
716 free(__pthread_manager_thread_bos);
717 return -1;
718 }
719 #endif
720
721 #ifdef USE_TLS
722 /* Allocate memory for the thread descriptor and the dtv. */
723 tcbp = _dl_allocate_tls (NULL);
724 if (tcbp == NULL) {
725 free(__pthread_manager_thread_bos);
726 #ifdef NOT_FOR_L4
727 close_not_cancel(manager_pipe[0]);
728 close_not_cancel(manager_pipe[1]);
729 #endif
730 return -1;
731 }
732
733 # if defined(TLS_TCB_AT_TP)
734 mgr = (pthread_descr) tcbp;
735 # elif defined(TLS_DTV_AT_TP)
736 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
737 returns. */
738 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
739 # endif
740 #ifdef NOT_FOR_L4
741 __pthread_handles[1].h_descr = manager_thread = mgr;
742 #endif
743
744 /* Initialize the descriptor. */
745 #if !defined USE_TLS || !TLS_DTV_AT_TP
746 mgr->header.tcb = tcbp;
747 mgr->header.self = mgr;
748 #endif
749 mgr->header.multiple_threads = 1;
750 #ifdef NOT_FOR_L4
751 mgr->p_lock = &__pthread_handles[1].h_lock;
752 #endif
753 # ifndef HAVE___THREAD
754 mgr->p_errnop = &mgr->p_errno;
755 # endif
756 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
757 #ifdef NOT_FOR_L4
758 mgr->p_nr = 1;
759 #endif
760 # if __LT_SPINLOCK_INIT != 0
761 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
762 # endif
763 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
764 #else
765 mgr = &__pthread_manager_thread;
766 #endif
767
768 #ifdef NOT_FOR_L4
769 __pthread_manager_request = manager_pipe[1]; /* writing end */
770 __pthread_manager_reader = manager_pipe[0]; /* reading end */
771 #endif
772
773 /* Start the thread manager */
774 #ifdef NOT_FOR_L4
775 pid = 0;
776 #ifdef USE_TLS
777 if (__linuxthreads_initial_report_events != 0)
778 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
779 __linuxthreads_initial_report_events);
780 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
781 #else
782 if (__linuxthreads_initial_report_events != 0)
783 __pthread_initial_thread.p_report_events
784 = __linuxthreads_initial_report_events;
785 report_events = __pthread_initial_thread.p_report_events;
786 #endif
787 if (__builtin_expect (report_events, 0))
788 {
789 /* It's a bit more complicated. We have to report the creation of
790 the manager thread. */
791 int idx = __td_eventword (TD_CREATE);
792 uint32_t mask = __td_eventmask (TD_CREATE);
793 uint32_t event_bits;
794
795 #ifdef USE_TLS
796 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
797 p_eventbuf.eventmask.event_bits[idx]);
798 #else
799 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
800 #endif
801
802 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
803 != 0)
804 {
805 __pthread_lock(mgr->p_lock, NULL);
806
807 #ifdef NEED_SEPARATE_REGISTER_STACK
808 pid = __clone2(__pthread_manager_event,
809 (void **) __pthread_manager_thread_bos,
810 THREAD_MANAGER_STACK_SIZE,
811 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
812 mgr);
813 #elif _STACK_GROWS_UP
814 pid = __clone(__pthread_manager_event,
815 (void **) __pthread_manager_thread_bos,
816 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
817 mgr);
818 #else
819 pid = __clone(__pthread_manager_event,
820 (void **) __pthread_manager_thread_tos,
821 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
822 mgr);
823 #endif
824
825 if (pid != -1)
826 {
827 /* Now fill in the information about the new thread in
828 the newly created thread's data structure. We cannot let
829 the new thread do this since we don't know whether it was
830 already scheduled when we send the event. */
831 mgr->p_eventbuf.eventdata = mgr;
832 mgr->p_eventbuf.eventnum = TD_CREATE;
833 __pthread_last_event = mgr;
834 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
835 mgr->p_pid = pid;
836
837 /* Now call the function which signals the event. */
838 __linuxthreads_create_event ();
839 }
840
841 /* Now restart the thread. */
842 __pthread_unlock(mgr->p_lock);
843 }
844 }
845
846 if (__builtin_expect (pid, 0) == 0)
847 {
848 #ifdef NEED_SEPARATE_REGISTER_STACK
849 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
850 THREAD_MANAGER_STACK_SIZE,
851 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
852 #elif _STACK_GROWS_UP
853 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
854 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
855 #else
856 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
857 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
858 #endif
859 }
860 #else
861 // l4
862 int err = __pthread_start_manager(mgr);
863 #endif
864
865 if (__builtin_expect (err, 0) == -1) {
866 #ifdef USE_TLS
867 _dl_deallocate_tls (tcbp, true);
868 #endif
869 free(__pthread_manager_thread_bos);
870 #ifdef NOT_FOR_L4
871 close_not_cancel(manager_pipe[0]);
872 close_not_cancel(manager_pipe[1]);
873 #endif
874 return -1;
875 }
876 #ifdef NOT_FOR_L4
877 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
878 mgr->p_pid = pid;
879 /* Make gdb aware of new thread manager */
880 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
881 {
882 raise(__pthread_sig_debug);
883 /* We suspend ourself and gdb will wake us up when it is
884 ready to handle us. */
885 __pthread_wait_for_restart_signal(thread_self());
886 }
887 /* Synchronize debugging of the thread manager */
888 request.req_kind = REQ_DEBUG;
889 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
890 (char *) &request, sizeof(request)));
891 #endif
892 return 0;
893 }
894
895 /* Thread creation */
896
897 int
898 attribute_hidden
__pthread_create(pthread_t * thread,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)899 __pthread_create(pthread_t *thread, const pthread_attr_t *attr,
900 void * (*start_routine)(void *), void *arg)
901 {
902 pthread_descr self = thread_self();
903 struct pthread_request request;
904 int retval;
905 if (__builtin_expect (l4_is_invalid_cap(__pthread_manager_request), 0)) {
906 if (__pthread_initialize_manager() < 0)
907 return EAGAIN;
908 }
909 request.req_thread = self;
910 request.req_kind = REQ_CREATE;
911 request.req_args.create.attr = attr;
912 request.req_args.create.fn = start_routine;
913 request.req_args.create.arg = arg;
914 #ifdef NOT_FOR_L4
915 sigprocmask(SIG_SETMASK, NULL, &request.req_args.create.mask);
916 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
917 (char *) &request, sizeof(request)));
918 suspend(self);
919 #else
920 __pthread_send_manager_rq(&request, 1);
921 #endif
922 retval = THREAD_GETMEM(self, p_retcode);
923 if (__builtin_expect (retval, 0) == 0)
924 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
925 return retval;
926 }
strong_alias(__pthread_create,pthread_create)927 strong_alias (__pthread_create, pthread_create)
928
929 /* Simple operations on thread identifiers */
930
931 pthread_descr
932 attribute_hidden
933 __pthread_thread_self(void)
934 {
935 return thread_self();
936 }
937
938 pthread_t
939 attribute_hidden
__pthread_self(void)940 __pthread_self(void)
941 {
942 pthread_descr self = thread_self();
943 return THREAD_GETMEM(self, p_tid);
944 }
strong_alias(__pthread_self,pthread_self)945 strong_alias (__pthread_self, pthread_self)
946
947 int
948 attribute_hidden
949 __pthread_equal(pthread_t thread1, pthread_t thread2)
950 {
951 return thread1 == thread2;
952 }
strong_alias(__pthread_equal,pthread_equal)953 strong_alias (__pthread_equal, pthread_equal)
954
955 #ifdef NOT_FOR_L4
956 /* Helper function for thread_self in the case of user-provided stacks */
957
958 #ifndef THREAD_SELF
959
960 pthread_descr
961 attribute_hidden internal_function
962 __pthread_find_self(void)
963 {
964 char * sp = CURRENT_STACK_FRAME;
965 pthread_handle h;
966
967 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
968 the manager threads handled specially in thread_self(), so start at 2 */
969 h = __pthread_handles + 2;
970 # ifdef _STACK_GROWS_UP
971 while (! (sp >= (char *) h->h_descr && sp < (char *) h->h_descr->p_guardaddr)) h++;
972 # else
973 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
974 # endif
975 return h->h_descr;
976 }
977
978 #else
979
980 pthread_descr
981 attribute_hidden internal_function
982 __pthread_self_stack(void)
983 {
984 char *sp = CURRENT_STACK_FRAME;
985 pthread_handle h;
986
987 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
988 return manager_thread;
989 h = __pthread_handles + 2;
990 # ifdef USE_TLS
991 # ifdef _STACK_GROWS_UP
992 while (h->h_descr == NULL
993 || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr))
994 h++;
995 # else
996 while (h->h_descr == NULL
997 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
998 h++;
999 # endif
1000 # else
1001 # ifdef _STACK_GROWS_UP
1002 while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
1003 h++;
1004 # else
1005 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
1006 h++;
1007 # endif
1008 # endif
1009 return h->h_descr;
1010 }
1011
1012 #endif
1013
1014 /* Thread scheduling */
1015
1016 int
1017 attribute_hidden internal_function
__pthread_setschedparam(pthread_t thread,int policy,const struct sched_param * param)1018 __pthread_setschedparam(pthread_t thread, int policy,
1019 const struct sched_param *param)
1020 {
1021 pthread_handle handle = thread_handle(thread);
1022 pthread_descr th;
1023
1024 __pthread_lock(&handle->h_lock, NULL);
1025 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
1026 __pthread_unlock(&handle->h_lock);
1027 return ESRCH;
1028 }
1029 th = handle->h_descr;
1030 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
1031 0)) {
1032 __pthread_unlock(&handle->h_lock);
1033 return errno;
1034 }
1035 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
1036 __pthread_unlock(&handle->h_lock);
1037 if (__pthread_manager_request >= 0)
1038 __pthread_manager_adjust_prio(th->p_priority);
1039 return 0;
1040 }
strong_alias(__pthread_setschedparam,pthread_setschedparam)1041 strong_alias (__pthread_setschedparam, pthread_setschedparam)
1042
1043 int
1044 attribute_hidden internal_function
1045 __pthread_getschedparam(pthread_t thread, int *policy,
1046 struct sched_param *param)
1047 {
1048 pthread_handle handle = thread_handle(thread);
1049 int pid, pol;
1050
1051 __pthread_lock(&handle->h_lock, NULL);
1052 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
1053 __pthread_unlock(&handle->h_lock);
1054 return ESRCH;
1055 }
1056 pid = handle->h_descr->p_pid;
1057 __pthread_unlock(&handle->h_lock);
1058 pol = __sched_getscheduler(pid);
1059 if (__builtin_expect (pol, 0) == -1) return errno;
1060 if (__sched_getparam(pid, param) == -1) return errno;
1061 *policy = pol;
1062 return 0;
1063 }
strong_alias(__pthread_getschedparam,pthread_getschedparam)1064 strong_alias (__pthread_getschedparam, pthread_getschedparam)
1065 #endif
1066
1067 /* Process-wide exit() request */
1068
1069 static void pthread_onexit_process(int retcode, void *arg)
1070 {
1071 //l4/if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
1072 if (!l4_is_invalid_cap(__pthread_manager_request)) {
1073 struct pthread_request request;
1074 pthread_descr self = thread_self();
1075
1076 /* Make sure we come back here after suspend(), in case we entered
1077 from a signal handler. */
1078 //l4/THREAD_SETMEM(self, p_signal_jmp, NULL);
1079
1080 request.req_thread = self;
1081 request.req_kind = REQ_PROCESS_EXIT;
1082 request.req_args.exit.code = retcode;
1083 #ifdef NOT_FOR_L4
1084 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1085 (char *) &request, sizeof(request)));
1086 suspend(self);
1087 #else
1088 // let pthread-manager kill all pthreads except myself
1089 // exclude the main thread
1090 __pthread_send_manager_rq(&request, 1);
1091 // kill manager thread
1092 __l4_kill_thread(__pthread_manager_request);
1093 if (self != __pthread_main_thread)
1094 {
1095 // this was not called from the main thread, so kill it as well
1096 __l4_kill_thread(__pthread_main_thread->p_th_cap);
1097 }
1098 return;
1099 #endif
1100 /* Main thread should accumulate times for thread manager and its
1101 children, so that timings for main thread account for all threads. */
1102 if (self == __pthread_main_thread)
1103 {
1104 UNIMPL();
1105 #if 0
1106 #ifdef USE_TLS
1107 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1108 #else
1109 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1110 #endif
1111 /* Since all threads have been asynchronously terminated
1112 (possibly holding locks), free cannot be used any more.
1113 For mtrace, we'd like to print something though. */
1114 /* #ifdef USE_TLS
1115 tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
1116 # if defined(TLS_DTV_AT_TP)
1117 tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
1118 # endif
1119 _dl_deallocate_tls (tcbp, true);
1120 #endif
1121 free (__pthread_manager_thread_bos); */
1122 #endif
1123 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1124 }
1125 }
1126 }
1127
1128 #ifndef HAVE_Z_NODELETE
1129 static int __pthread_atexit_retcode;
1130
pthread_atexit_process(void * arg,int retcode)1131 static void pthread_atexit_process(void *arg, int retcode)
1132 {
1133 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
1134 }
1135
pthread_atexit_retcode(void * arg,int retcode)1136 static void pthread_atexit_retcode(void *arg, int retcode)
1137 {
1138 __pthread_atexit_retcode = retcode;
1139 }
1140 #endif
1141
1142 #ifdef NOT_FOR_L4
1143 /* The handler for the RESTART signal just records the signal received
1144 in the thread descriptor, and optionally performs a siglongjmp
1145 (for pthread_cond_timedwait). */
1146
pthread_handle_sigrestart(int sig)1147 static void pthread_handle_sigrestart(int sig)
1148 {
1149 pthread_descr self = check_thread_self();
1150 THREAD_SETMEM(self, p_signal, sig);
1151 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
1152 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
1153 }
1154
1155 /* The handler for the CANCEL signal checks for cancellation
1156 (in asynchronous mode), for process-wide exit and exec requests.
1157 For the thread manager thread, redirect the signal to
1158 __pthread_manager_sighandler. */
1159
pthread_handle_sigcancel(int sig)1160 static void pthread_handle_sigcancel(int sig)
1161 {
1162 pthread_descr self = check_thread_self();
1163 sigjmp_buf * jmpbuf;
1164
1165 if (self == manager_thread)
1166 {
1167 __pthread_manager_sighandler(sig);
1168 return;
1169 }
1170 if (__builtin_expect (__pthread_exit_requested, 0)) {
1171 /* Main thread should accumulate times for thread manager and its
1172 children, so that timings for main thread account for all threads. */
1173 if (self == __pthread_main_thread) {
1174 #ifdef USE_TLS
1175 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1176 #else
1177 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1178 #endif
1179 }
1180 _exit(__pthread_exit_code);
1181 }
1182 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1183 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1184 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1185 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1186 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1187 if (jmpbuf != NULL) {
1188 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1189 siglongjmp(*jmpbuf, 1);
1190 }
1191 }
1192 }
1193
1194 /* Handler for the DEBUG signal.
1195 The debugging strategy is as follows:
1196 On reception of a REQ_DEBUG request (sent by new threads created to
1197 the thread manager under debugging mode), the thread manager throws
1198 __pthread_sig_debug to itself. The debugger (if active) intercepts
1199 this signal, takes into account new threads and continue execution
1200 of the thread manager by propagating the signal because it doesn't
1201 know what it is specifically done for. In the current implementation,
1202 the thread manager simply discards it. */
1203
pthread_handle_sigdebug(int sig)1204 static void pthread_handle_sigdebug(int sig)
1205 {
1206 /* Nothing */
1207 }
1208 #endif
1209
1210 /* Reset the state of the thread machinery after a fork().
1211 Close the pipe used for requests and set the main thread to the forked
1212 thread.
1213 Notice that we can't free the stack segments, as the forked thread
1214 may hold pointers into them. */
1215
1216 #ifdef NOT_FOR_L4
__pthread_reset_main_thread(void)1217 void __pthread_reset_main_thread(void)
1218 {
1219 pthread_descr self = thread_self();
1220
1221 if (__pthread_manager_request != -1) {
1222 /* Free the thread manager stack */
1223 free(__pthread_manager_thread_bos);
1224 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1225 /* Close the two ends of the pipe */
1226 close_not_cancel(__pthread_manager_request);
1227 close_not_cancel(__pthread_manager_reader);
1228 __pthread_manager_request = __pthread_manager_reader = -1;
1229 }
1230
1231 /* Update the pid of the main thread */
1232 THREAD_SETMEM(self, p_pid, __getpid());
1233 /* Make the forked thread the main thread */
1234 __pthread_main_thread = self;
1235 THREAD_SETMEM(self, p_nextlive, self);
1236 THREAD_SETMEM(self, p_prevlive, self);
1237 #if !(USE_TLS && HAVE___THREAD)
1238 /* Now this thread modifies the global variables. */
1239 THREAD_SETMEM(self, p_errnop, &_errno);
1240 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1241 # if defined __UCLIBC_HAS_IPv4__ || defined __UCLIBC_HAS_IPV6__
1242 THREAD_SETMEM(self, p_resp, __resp);
1243 # endif
1244 #endif
1245
1246 #ifndef FLOATING_STACKS
1247 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1248 XXX This can be wrong if the user set the limit during the run. */
1249 {
1250 struct rlimit limit;
1251 if (getrlimit (RLIMIT_STACK, &limit) == 0
1252 && limit.rlim_cur != limit.rlim_max)
1253 {
1254 limit.rlim_cur = limit.rlim_max;
1255 setrlimit(RLIMIT_STACK, &limit);
1256 }
1257 }
1258 #endif
1259 }
1260
1261 /* Process-wide exec() request */
1262
1263 void
1264 attribute_hidden internal_function
__pthread_kill_other_threads_np(void)1265 __pthread_kill_other_threads_np(void)
1266 {
1267 struct sigaction sa;
1268 /* Terminate all other threads and thread manager */
1269 pthread_onexit_process(0, NULL);
1270 /* Make current thread the main thread in case the calling thread
1271 changes its mind, does not exec(), and creates new threads instead. */
1272 __pthread_reset_main_thread();
1273
1274 /* Reset the signal handlers behaviour for the signals the
1275 implementation uses since this would be passed to the new
1276 process. */
1277 memset(&sa, 0, sizeof(sa));
1278 if (SIG_DFL) /* if it's constant zero, it's already done */
1279 sa.sa_handler = SIG_DFL;
1280 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1281 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1282 if (__pthread_sig_debug > 0)
1283 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1284 }
weak_alias(__pthread_kill_other_threads_np,pthread_kill_other_threads_np)1285 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1286 #endif
1287
1288 /* Concurrency symbol level. */
1289 static int current_level;
1290
1291 int
1292 attribute_hidden
1293 __pthread_setconcurrency(int level)
1294 {
1295 /* We don't do anything unless we have found a useful interpretation. */
1296 current_level = level;
1297 return 0;
1298 }
weak_alias(__pthread_setconcurrency,pthread_setconcurrency)1299 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1300
1301 int
1302 attribute_hidden
1303 __pthread_getconcurrency(void)
1304 {
1305 return current_level;
1306 }
weak_alias(__pthread_getconcurrency,pthread_getconcurrency)1307 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1308
1309 /* Primitives for controlling thread execution */
1310
1311 #ifdef NOT_FOR_L4
1312 void
1313 attribute_hidden
1314 __pthread_wait_for_restart_signal(pthread_descr self)
1315 {
1316 sigset_t mask;
1317
1318 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1319 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1320 THREAD_SETMEM(self, p_signal, 0);
1321 do {
1322 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1323 cancellation point. */
1324 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1325
1326 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1327 }
1328
1329 #if !__ASSUME_REALTIME_SIGNALS
1330 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1331 signals.
1332 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1333 Since the restart signal does not queue, we use an atomic counter to create
1334 queuing semantics. This is needed to resolve a rare race condition in
1335 pthread_cond_timedwait_relative. */
1336
1337 void
1338 attribute_hidden internal_function
__pthread_restart_old(pthread_descr th)1339 __pthread_restart_old(pthread_descr th)
1340 {
1341 if (pthread_atomic_increment(&th->p_resume_count) == -1)
1342 kill(th->p_pid, __pthread_sig_restart);
1343 }
1344
1345 void
1346 attribute_hidden internal_function
__pthread_suspend_old(pthread_descr self)1347 __pthread_suspend_old(pthread_descr self)
1348 {
1349 if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
1350 __pthread_wait_for_restart_signal(self);
1351 }
1352
1353 int
1354 attribute_hidden internal_function
__pthread_timedsuspend_old(pthread_descr self,const struct timespec * abstime)1355 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1356 {
1357 sigset_t unblock, initial_mask;
1358 int was_signalled = 0;
1359 sigjmp_buf jmpbuf;
1360
1361 if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
1362 /* Set up a longjmp handler for the restart signal, unblock
1363 the signal and sleep. */
1364
1365 if (sigsetjmp(jmpbuf, 1) == 0) {
1366 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1367 THREAD_SETMEM(self, p_signal, 0);
1368 /* Unblock the restart signal */
1369 __sigemptyset(&unblock);
1370 sigaddset(&unblock, __pthread_sig_restart);
1371 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1372
1373 while (1) {
1374 struct timeval now;
1375 struct timespec reltime;
1376
1377 /* Compute a time offset relative to now. */
1378 __gettimeofday (&now, NULL);
1379 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1380 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1381 if (reltime.tv_nsec < 0) {
1382 reltime.tv_nsec += 1000000000;
1383 reltime.tv_sec -= 1;
1384 }
1385
1386 /* Sleep for the required duration. If woken by a signal,
1387 resume waiting as required by Single Unix Specification. */
1388 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1389 break;
1390 }
1391
1392 /* Block the restart signal again */
1393 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1394 was_signalled = 0;
1395 } else {
1396 was_signalled = 1;
1397 }
1398 THREAD_SETMEM(self, p_signal_jmp, NULL);
1399 }
1400
1401 /* Now was_signalled is true if we exited the above code
1402 due to the delivery of a restart signal. In that case,
1403 we know we have been dequeued and resumed and that the
1404 resume count is balanced. Otherwise, there are some
1405 cases to consider. First, try to bump up the resume count
1406 back to zero. If it goes to 1, it means restart() was
1407 invoked on this thread. The signal must be consumed
1408 and the count bumped down and everything is cool. We
1409 can return a 1 to the caller.
1410 Otherwise, no restart was delivered yet, so a potential
1411 race exists; we return a 0 to the caller which must deal
1412 with this race in an appropriate way; for example by
1413 atomically removing the thread from consideration for a
1414 wakeup---if such a thing fails, it means a restart is
1415 being delivered. */
1416
1417 if (!was_signalled) {
1418 if (pthread_atomic_increment(&self->p_resume_count) != -1) {
1419 __pthread_wait_for_restart_signal(self);
1420 pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
1421 /* woke spontaneously and consumed restart signal */
1422 return 1;
1423 }
1424 /* woke spontaneously but did not consume restart---caller must resolve */
1425 return 0;
1426 }
1427 /* woken due to restart signal */
1428 return 1;
1429 }
1430 #endif /* __ASSUME_REALTIME_SIGNALS */
1431
1432 void
1433 attribute_hidden internal_function
__pthread_restart_new(pthread_descr th)1434 __pthread_restart_new(pthread_descr th)
1435 {
1436 /* The barrier is proabably not needed, in which case it still documents
1437 our assumptions. The intent is to commit previous writes to shared
1438 memory so the woken thread will have a consistent view. Complementary
1439 read barriers are present to the suspend functions. */
1440 WRITE_MEMORY_BARRIER();
1441 kill(th->p_pid, __pthread_sig_restart);
1442 }
1443
1444 /* There is no __pthread_suspend_new because it would just
1445 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1446
1447 int
1448 attribute_hidden internal_function
__pthread_timedsuspend_new(pthread_descr self,const struct timespec * abstime)1449 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1450 {
1451 sigset_t unblock, initial_mask;
1452 int was_signalled = 0;
1453 sigjmp_buf jmpbuf;
1454
1455 if (sigsetjmp(jmpbuf, 1) == 0) {
1456 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1457 THREAD_SETMEM(self, p_signal, 0);
1458 /* Unblock the restart signal */
1459 __sigemptyset(&unblock);
1460 sigaddset(&unblock, __pthread_sig_restart);
1461 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1462
1463 while (1) {
1464 struct timeval now;
1465 struct timespec reltime;
1466
1467 /* Compute a time offset relative to now. */
1468 __gettimeofday (&now, NULL);
1469 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1470 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1471 if (reltime.tv_nsec < 0) {
1472 reltime.tv_nsec += 1000000000;
1473 reltime.tv_sec -= 1;
1474 }
1475
1476 /* Sleep for the required duration. If woken by a signal,
1477 resume waiting as required by Single Unix Specification. */
1478 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1479 break;
1480 }
1481
1482 /* Block the restart signal again */
1483 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1484 was_signalled = 0;
1485 } else {
1486 was_signalled = 1;
1487 }
1488 THREAD_SETMEM(self, p_signal_jmp, NULL);
1489
1490 /* Now was_signalled is true if we exited the above code
1491 due to the delivery of a restart signal. In that case,
1492 everything is cool. We have been removed from whatever
1493 we were waiting on by the other thread, and consumed its signal.
1494
1495 Otherwise we this thread woke up spontaneously, or due to a signal other
1496 than restart. This is an ambiguous case that must be resolved by
1497 the caller; the thread is still eligible for a restart wakeup
1498 so there is a race. */
1499
1500 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1501 return was_signalled;
1502 }
1503 #endif
1504
1505
1506 /* trampoline function where threads are put before they are destroyed in
1507 __l4_kill_thread */
__l4_noop(void)1508 static void __l4_noop(void)
1509 {
1510 l4_sleep_forever();
1511 }
1512
1513 /*
1514 * Kill a thread hard.
1515 *
1516 * This function may only be used from pthreads exit handler. It kills a
1517 * thread hard. That means the thread does not get a chance to cleanup its
1518 * resources, including locks. We rely on the microkernel to free kernel
1519 * resources when the task object is destroyed.
1520 *
1521 * Ongoing IPC is canceled so that any locks the thread may hold in the
1522 * microkernel are freed.
1523 */
__l4_kill_thread(l4_cap_idx_t cap)1524 void __l4_kill_thread(l4_cap_idx_t cap)
1525 {
1526 /* cancel any ongoing IPC and put the thread into the __l4_noop function */
1527 l4_thread_ex_regs(cap, (l4_addr_t)__l4_noop, ~0UL, L4_THREAD_EX_REGS_CANCEL);
1528
1529 /* delete it */
1530 l4_task_delete_obj(L4RE_THIS_TASK_CAP, cap);
1531 }
1532
1533
1534 /* Debugging aid */
1535
1536 #ifdef DEBUG
1537 #include <stdarg.h>
1538
1539 void
1540 attribute_hidden internal_function
__pthread_message(const char * fmt,...)1541 __pthread_message(const char * fmt, ...)
1542 {
1543 char buffer[1024];
1544 va_list args;
1545 sprintf(buffer, "%05d : ", __getpid());
1546 va_start(args, fmt);
1547 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1548 va_end(args);
1549 TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer)));
1550 }
1551
1552 #endif
1553