1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
14
15 /* The "thread manager" thread: manages creation and termination of threads */
16
17 #ifndef PT_EI
18 #define PT_EI inline
19 #endif
20
21 #include <assert.h>
22 #include <errno.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/mman.h>
29 #include <sys/time.h>
30 #include <locale.h> /* for __uselocale */
31
32 #include <l4/sys/ipc.h>
33 #include <l4/re/env>
34 #include <l4/re/mem_alloc>
35 #include <l4/re/dataspace>
36 #include <l4/re/rm>
37 #include <l4/re/util/cap_alloc>
38 #include <l4/re/util/unique_cap>
39 #include <l4/sys/capability>
40 #include <l4/sys/debugger.h>
41 #include <l4/sys/factory>
42 #include <l4/sys/scheduler>
43 #include <l4/sys/thread>
44
45 extern "C" {
46 #include "pthread.h"
47 #include "internals.h"
48 #include "spinlock.h"
49 #include "restart.h"
50 #include "semaphore.h"
51 #include "l4.h"
52 #include <ldsodefs.h>
53 }
54
55 #include <pthread-l4.h>
56
57 #define USE_L4RE_FOR_STACK
58
59 #ifndef MIN
60 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
61 #endif
62
63 extern "C" void __pthread_new_thread_entry(void);
64
65 #ifndef THREAD_SELF
66 /* Indicate whether at least one thread has a user-defined stack (if 1),
67 or if all threads have stacks supplied by LinuxThreads (if 0). */
68 int __pthread_nonstandard_stacks;
69 #endif
70
71 /* Number of active entries in __pthread_handles (used by gdb) */
72 __volatile__ int __pthread_handles_num = 2;
73
74 /* Whether to use debugger additional actions for thread creation
75 (set to 1 by gdb) */
76 __volatile__ int __pthread_threads_debug;
77
78 static pthread_descr manager_thread;
79
80 /* Mapping from stack segment to thread descriptor. */
81 /* Stack segment numbers are also indices into the __pthread_handles array. */
82 /* Stack segment number 0 is reserved for the initial thread. */
83
84 # define thread_segment(seq) NULL
85
86 /* Flag set in signal handler to record child termination */
87
88 static __volatile__ int terminated_children;
89
90 /* Flag set when the initial thread is blocked on pthread_exit waiting
91 for all other threads to terminate */
92
93 static int main_thread_exiting;
94
95 /* Counter used to generate unique thread identifier.
96 Thread identifier is pthread_threads_counter + segment. */
97
98 //l4/static pthread_t pthread_threads_counter;
99
100 /* Forward declarations */
101
102 static int pthread_handle_create(pthread_descr creator, const pthread_attr_t *attr,
103 void * (*start_routine)(void *), void *arg);
104 static void pthread_handle_free(pthread_t th_id);
105 #ifdef NOT_FOR_L4
106 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
107 __attribute__ ((noreturn));
108 #else
109 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode);
110 #endif
111 //l4/static void pthread_kill_all_threads(int main_thread_also);
112 static void pthread_for_each_thread(void *arg,
113 void (*fn)(void *, pthread_descr));
114
115 static int pthread_exited(pthread_descr th);
116
117 /* The server thread managing requests for thread creation and termination */
118
119 int
120 __attribute__ ((noreturn))
__pthread_manager(void * arg)121 __pthread_manager(void *arg)
122 {
123 pthread_descr self = manager_thread = (pthread_descr)arg;
124 struct pthread_request request;
125
126 #ifdef USE_TLS
127 # if defined(TLS_TCB_AT_TP)
128 TLS_INIT_TP(self, 0);
129 #elif defined(TLS_DTV_AT_TP)
130 TLS_INIT_TP((char *)self + TLS_PRE_TCB_SIZE, 0);
131 #else
132 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
133 #endif
134 #endif
135 /* If we have special thread_self processing, initialize it. */
136 #ifdef INIT_THREAD_SELF
137 INIT_THREAD_SELF(self, 1);
138 #endif
139 #if !(USE_TLS && HAVE___THREAD)
140 /* Set the error variable. */
141 self->p_errnop = &self->p_errno;
142 self->p_h_errnop = &self->p_h_errno;
143 #endif
144 /* Raise our priority to match that of main thread */
145 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
146
147 l4_umword_t src;
148 l4_msgtag_t tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
149 int do_reply = 0;
150 /* Enter server loop */
151 while (1)
152 {
153 if (do_reply)
154 tag = l4_ipc_reply_and_wait(l4_utcb(), tag, &src, L4_IPC_NEVER);
155 else
156 tag = l4_ipc_wait(l4_utcb(), &src, L4_IPC_NEVER);
157
158 if (l4_msgtag_has_error(tag))
159 {
160 do_reply = 0;
161 continue;
162 }
163
164 memcpy(&request, l4_utcb_mr()->mr, sizeof(request));
165
166 do_reply = 0;
167 switch(request.req_kind)
168 {
169 case REQ_CREATE:
170 request.req_thread->p_retcode =
171 pthread_handle_create(request.req_thread,
172 request.req_args.create.attr,
173 request.req_args.create.fn,
174 request.req_args.create.arg);
175 do_reply = 1;
176 break;
177 case REQ_FREE:
178 pthread_handle_free(request.req_args.free.thread_id);
179 break;
180 case REQ_PROCESS_EXIT:
181 pthread_handle_exit(request.req_thread,
182 request.req_args.exit.code);
183 /* NOTREACHED */
184 break;
185 case REQ_MAIN_THREAD_EXIT:
186 main_thread_exiting = 1;
187 /* Reap children in case all other threads died and the signal handler
188 went off before we set main_thread_exiting to 1, and therefore did
189 not do REQ_KICK. */
190 //l4/pthread_reap_children();
191
192 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
193 restart(__pthread_main_thread);
194 /* The main thread will now call exit() which will trigger an
195 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
196 to the thread manager. In case you are wondering how the
197 manager terminates from its loop here. */
198 }
199 break;
200 case REQ_POST:
201 sem_post((sem_t*)request.req_args.post);
202 break;
203 case REQ_DEBUG:
204 #ifdef NOT_FOR_L4
205 /* Make gdb aware of new thread and gdb will restart the
206 new thread when it is ready to handle the new thread. */
207 if (__pthread_threads_debug && __pthread_sig_debug > 0)
208 raise(__pthread_sig_debug);
209 #else
210 do_reply = 1;
211 #endif
212 break;
213 case REQ_KICK:
214 /* This is just a prod to get the manager to reap some
215 threads right away, avoiding a potential delay at shutdown. */
216 break;
217 case REQ_FOR_EACH_THREAD:
218 pthread_for_each_thread(request.req_args.for_each.arg,
219 request.req_args.for_each.fn);
220 restart(request.req_thread);
221 do_reply = 1;
222 break;
223 case REQ_THREAD_EXIT:
224 {
225 if (!pthread_exited(request.req_thread))
226 {
227 auto th = request.req_thread;
228 /* Thread still waiting to be joined. Only release
229 L4 resources for now. */
230 // Keep the cap slot allocated and let pthread_free() do the
231 // final cleanup. This way, we can safely check the
232 // thread cap index for kernel object presence until
233 // pthread_join/detach() was called.
234 l4_fpage_t del_obj[2] =
235 {
236 L4::Cap<void>(th->p_thsem_cap).fpage(),
237 L4::Cap<void>(th->p_th_cap).fpage()
238 };
239 L4Re::Env::env()->task()->unmap_batch(del_obj, 2,
240 L4_FP_DELETE_OBJ);
241 }
242 }
243 break;
244 }
245 tag = l4_msgtag(0, 0, 0, L4_MSGTAG_SCHEDULE);
246 }
247 }
248
__pthread_manager_event(void * arg)249 int __pthread_manager_event(void *arg)
250 {
251 pthread_descr self = (pthread_descr)arg;
252 /* If we have special thread_self processing, initialize it. */
253 #ifdef INIT_THREAD_SELF
254 INIT_THREAD_SELF(self, 1);
255 #endif
256
257 /* Get the lock the manager will free once all is correctly set up. */
258 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
259 /* Free it immediately. */
260 __pthread_unlock (THREAD_GETMEM(self, p_lock));
261
262 return __pthread_manager(arg);
263 }
264
265 /* Process creation */
266
267 static int
268 __attribute__ ((noreturn))
pthread_start_thread(void * arg)269 pthread_start_thread(void *arg)
270 {
271 pthread_descr self = (pthread_descr) arg;
272 #ifdef USE_TLS
273 # if defined(TLS_TCB_AT_TP)
274 TLS_INIT_TP(self, 0);
275 #elif defined(TLS_DTV_AT_TP)
276 TLS_INIT_TP((char *)self + TLS_PRE_TCB_SIZE, 0);
277 #else
278 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
279 #endif
280 #endif
281
282 #ifdef NOT_FOR_L4
283 struct pthread_request request;
284 #endif
285 void * outcome;
286 #if HP_TIMING_AVAIL
287 hp_timing_t tmpclock;
288 #endif
289 /* Initialize special thread_self processing, if any. */
290 #ifdef INIT_THREAD_SELF
291 INIT_THREAD_SELF(self, self->p_nr);
292 #endif
293 #if HP_TIMING_AVAIL
294 HP_TIMING_NOW (tmpclock);
295 THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
296 #endif
297
298 #ifdef NOT_FOR_L4
299 /* Set the scheduling policy and priority for the new thread, if needed */
300 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
301 /* Explicit scheduling attributes were provided: apply them */
302 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
303 THREAD_GETMEM(self, p_start_args.schedpolicy),
304 &self->p_start_args.schedparam);
305 else if (manager_thread->p_priority > 0)
306 /* Default scheduling required, but thread manager runs in realtime
307 scheduling: switch new thread to SCHED_OTHER policy */
308 {
309 struct sched_param default_params;
310 default_params.sched_priority = 0;
311 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
312 SCHED_OTHER, &default_params);
313 }
314 #if !(USE_TLS && HAVE___THREAD)
315 /* Initialize thread-locale current locale to point to the global one.
316 With __thread support, the variable's initializer takes care of this. */
317 __uselocale (LC_GLOBAL_LOCALE);
318 #else
319 /* Initialize __resp. */
320 __resp = &self->p_res;
321 #endif
322 /* Make gdb aware of new thread */
323 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
324 request.req_thread = self;
325 request.req_kind = REQ_DEBUG;
326 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
327 (char *) &request, sizeof(request)));
328 suspend(self);
329 }
330 #endif
331 /* Run the thread code */
332 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
333 p_start_args.arg));
334 /* Exit with the given return value */
335 __pthread_do_exit(outcome, (char *)CURRENT_STACK_FRAME);
336 }
337
338 #ifdef NOT_FOR_L4
339 static int
340 __attribute__ ((noreturn))
pthread_start_thread_event(void * arg)341 pthread_start_thread_event(void *arg)
342 {
343 pthread_descr self = (pthread_descr) arg;
344
345 #ifdef INIT_THREAD_SELF
346 INIT_THREAD_SELF(self, self->p_nr);
347 #endif
348 /* Make sure our pid field is initialized, just in case we get there
349 before our father has initialized it. */
350 THREAD_SETMEM(self, p_pid, __getpid());
351 /* Get the lock the manager will free once all is correctly set up. */
352 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
353 /* Free it immediately. */
354 __pthread_unlock (THREAD_GETMEM(self, p_lock));
355
356 /* Continue with the real function. */
357 pthread_start_thread (arg);
358 }
359 #endif
360
361 #ifdef USE_L4RE_FOR_STACK
pthread_l4_free_stack(void * stack_addr,void * guardaddr)362 static int pthread_l4_free_stack(void *stack_addr, void *guardaddr)
363 {
364 L4Re::Env const *e = L4Re::Env::env();
365 int err;
366 L4::Cap<L4Re::Dataspace> ds;
367
368 err = e->rm()->detach(stack_addr, &ds);
369 if (err < 0)
370 return err;
371
372 if (err == L4Re::Rm::Detached_ds)
373 L4Re::Util::cap_alloc.free(ds, L4Re::This_task);
374
375 return e->rm()->free_area((l4_addr_t)guardaddr);
376 }
377 #endif
378
pthread_allocate_stack(const pthread_attr_t * attr,pthread_descr default_new_thread,int pagesize,char ** out_new_thread,char ** out_new_thread_bottom,char ** out_guardaddr,size_t * out_guardsize,size_t * out_stacksize)379 static int pthread_allocate_stack(const pthread_attr_t *attr,
380 pthread_descr default_new_thread,
381 int pagesize,
382 char ** out_new_thread,
383 char ** out_new_thread_bottom,
384 char ** out_guardaddr,
385 size_t * out_guardsize,
386 size_t * out_stacksize)
387 {
388 pthread_descr new_thread;
389 char * new_thread_bottom;
390 char * guardaddr;
391 size_t stacksize, guardsize;
392
393 #ifdef USE_TLS
394 /* TLS cannot work with fixed thread descriptor addresses. */
395 assert (default_new_thread == NULL);
396 #endif
397
398 if (attr != NULL && attr->__stackaddr_set)
399 {
400 #ifdef _STACK_GROWS_UP
401 /* The user provided a stack. */
402 # ifdef USE_TLS
403 /* This value is not needed. */
404 new_thread = (pthread_descr) attr->__stackaddr;
405 new_thread_bottom = (char *) new_thread;
406 # else
407 new_thread = (pthread_descr) attr->__stackaddr;
408 new_thread_bottom = (char *) (new_thread + 1);
409 # endif
410 guardaddr = attr->__stackaddr + attr->__stacksize;
411 guardsize = 0;
412 #else
413 /* The user provided a stack. For now we interpret the supplied
414 address as 1 + the highest addr. in the stack segment. If a
415 separate register stack is needed, we place it at the low end
416 of the segment, relying on the associated stacksize to
417 determine the low end of the segment. This differs from many
418 (but not all) other pthreads implementations. The intent is
419 that on machines with a single stack growing toward higher
420 addresses, stackaddr would be the lowest address in the stack
421 segment, so that it is consistently close to the initial sp
422 value. */
423 # ifdef USE_TLS
424 new_thread = (pthread_descr) attr->__stackaddr;
425 # else
426 new_thread =
427 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
428 # endif
429 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
430 guardaddr = new_thread_bottom;
431 guardsize = 0;
432 #endif
433 #ifndef THREAD_SELF
434 __pthread_nonstandard_stacks = 1;
435 #endif
436 #ifndef USE_TLS
437 /* Clear the thread data structure. */
438 memset (new_thread, '\0', sizeof (*new_thread));
439 #endif
440 stacksize = attr->__stacksize;
441 }
442 else
443 {
444 const size_t granularity = pagesize;
445 void *map_addr;
446
447 /* Allocate space for stack and thread descriptor at default address */
448 if (attr != NULL)
449 {
450 guardsize = page_roundup (attr->__guardsize, granularity);
451 stacksize = __pthread_max_stacksize - guardsize;
452 stacksize = MIN (stacksize,
453 page_roundup (attr->__stacksize, granularity));
454 }
455 else
456 {
457 guardsize = granularity;
458 stacksize = __pthread_max_stacksize - guardsize;
459 }
460
461 #ifdef USE_L4RE_FOR_STACK
462 map_addr = 0;
463 L4Re::Env const *e = L4Re::Env::env();
464 long err;
465
466 if (e->rm()->reserve_area(&map_addr, stacksize + guardsize,
467 L4Re::Rm::F::Search_addr) < 0)
468 return -1;
469
470 guardaddr = (char*)map_addr;
471
472 L4::Cap<L4Re::Dataspace> ds = L4Re::Util::cap_alloc.alloc<L4Re::Dataspace>();
473 if (!ds.is_valid())
474 return -1;
475
476 err = e->mem_alloc()->alloc(stacksize, ds);
477
478 if (err < 0)
479 {
480 L4Re::Util::cap_alloc.free(ds);
481 e->rm()->free_area(l4_addr_t(map_addr));
482 return -1;
483 }
484
485 new_thread_bottom = (char *) map_addr + guardsize;
486 err = e->rm()->attach(&new_thread_bottom, stacksize,
487 L4Re::Rm::F::In_area | L4Re::Rm::F::RW,
488 L4::Ipc::make_cap_rw(ds), 0);
489
490 if (err < 0)
491 {
492 L4Re::Util::cap_alloc.free(ds, L4Re::This_task);
493 e->rm()->free_area(l4_addr_t(map_addr));
494 return -1;
495 }
496 #else
497 map_addr = mmap(NULL, stacksize + guardsize,
498 PROT_READ | PROT_WRITE | PROT_EXEC,
499 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
500 if (map_addr == MAP_FAILED)
501 /* No more memory available. */
502 return -1;
503
504 guardaddr = (char *)map_addr;
505 if (guardsize > 0)
506 mprotect (guardaddr, guardsize, PROT_NONE);
507
508 new_thread_bottom = (char *) map_addr + guardsize;
509 #endif
510
511 #ifdef USE_TLS
512 new_thread = ((pthread_descr) (new_thread_bottom + stacksize));
513 #else
514 new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
515 #endif
516 }
517 *out_new_thread = (char *) new_thread;
518 *out_new_thread_bottom = new_thread_bottom;
519 *out_guardaddr = guardaddr;
520 *out_guardsize = guardsize;
521 *out_stacksize = stacksize;
522 return 0;
523 }
524
525 static inline
__pthread_mgr_create_thread(pthread_descr thread,char ** tos,int (* f)(void *),int prio,unsigned create_flags,l4_sched_cpu_set_t const & affinity)526 int __pthread_mgr_create_thread(pthread_descr thread, char **tos,
527 int (*f)(void*), int prio,
528 unsigned create_flags,
529 l4_sched_cpu_set_t const &affinity)
530 {
531 using namespace L4Re;
532 Env const *e = Env::env();
533 auto _t = L4Re::Util::make_unique_cap<L4::Thread>();
534 if (!_t.is_valid())
535 return -ENOMEM;
536
537 auto th_sem = L4Re::Util::make_unique_cap<Th_sem_cap>();
538 if (!th_sem.is_valid())
539 return -ENOMEM;
540
541 int err = l4_error(e->factory()->create(_t.get()));
542 if (err < 0)
543 return err;
544
545 // needed by __alloc_thread_sem
546 thread->p_th_cap = _t.cap();
547
548 err = __alloc_thread_sem(thread, th_sem.get());
549 if (err < 0)
550 return err;
551
552 thread->p_thsem_cap = th_sem.cap();
553
554 L4::Thread::Attr attr;
555 l4_utcb_t *nt_utcb = (l4_utcb_t*)thread->p_tid;
556
557 attr.bind(nt_utcb, L4Re::This_task);
558 attr.pager(e->rm());
559 attr.exc_handler(e->rm());
560 if ((err = l4_error(_t->control(attr))) < 0)
561 {
562 fprintf(stderr, "ERROR: thread control returned: %d\n", err);
563 return err;
564 }
565
566 l4_utcb_tcr_u(nt_utcb)->user[0] = l4_addr_t(thread);
567
568 l4_umword_t *&_tos = (l4_umword_t*&)*tos;
569
570 *(--_tos) = l4_addr_t(thread);
571 *(--_tos) = 0; /* ret addr */
572 *(--_tos) = l4_addr_t(f);
573
574 err = l4_error(_t->ex_regs(l4_addr_t(__pthread_new_thread_entry),
575 l4_addr_t(_tos), 0));
576
577 if (err < 0)
578 {
579 fprintf(stderr, "ERROR: exregs returned error: %d\n", err);
580 return err;
581 }
582
583 if (thread->p_start_args.start_routine
584 && !(create_flags & PTHREAD_L4_ATTR_NO_START))
585 {
586 l4_sched_param_t sp = l4_sched_param(prio >= 0 ? prio : 2);
587 sp.affinity = affinity;
588 err = l4_error(e->scheduler()->run_thread(_t.get(), sp));
589 if (err < 0)
590 {
591 fprintf(stderr,
592 "ERROR: could not start thread, run_thread returned %d\n",
593 err);
594 return err;
595 }
596 }
597
598 // release the automatic capabilities
599 _t.release();
600 th_sem.release();
601 return 0;
602 }
603
l4pthr_get_more_utcb()604 static int l4pthr_get_more_utcb()
605 {
606 using namespace L4Re;
607
608 l4_addr_t kumem = 0;
609 Env const *e = Env::env();
610
611 if (e->rm()->reserve_area(&kumem, L4_PAGESIZE,
612 Rm::F::Reserved | Rm::F::Search_addr))
613 return 1;
614
615 if (l4_error(e->task()->add_ku_mem(l4_fpage(kumem, L4_PAGESHIFT,
616 L4_FPAGE_RW))))
617 {
618 e->rm()->free_area(kumem);
619 return 1;
620 }
621
622 __l4_add_utcbs(kumem, kumem + L4_PAGESIZE);
623 return 0;
624 }
625
626
mgr_alloc_utcb()627 static inline l4_utcb_t *mgr_alloc_utcb()
628 {
629 l4_utcb_t *new_utcb = __pthread_first_free_handle;
630 if (!new_utcb)
631 return 0;
632
633 __pthread_first_free_handle = (l4_utcb_t*)l4_utcb_tcr_u(new_utcb)->user[0];
634 return new_utcb;
635 }
636
mgr_free_utcb(l4_utcb_t * u)637 static inline void mgr_free_utcb(l4_utcb_t *u)
638 {
639 if (!u)
640 return;
641
642 l4_utcb_tcr_u(u)->user[0] = l4_addr_t(__pthread_first_free_handle);
643 __pthread_first_free_handle = u;
644 }
645
__pthread_start_manager(pthread_descr mgr)646 int __pthread_start_manager(pthread_descr mgr)
647 {
648 int err;
649
650 mgr->p_tid = mgr_alloc_utcb();
651
652 err = __pthread_mgr_create_thread(mgr, &__pthread_manager_thread_tos,
653 __pthread_manager, -1, 0, l4_sched_cpu_set(0, ~0, 1));
654 if (err < 0)
655 {
656 fprintf(stderr, "ERROR: could not start pthread manager thread (err=%d)\n", err);
657 exit(100);
658 }
659
660 __pthread_manager_request = mgr->p_th_cap;
661 l4_debugger_set_object_name(__pthread_manager_request, "pthread-mgr");
662 return 0;
663 }
664
665
pthread_handle_create(pthread_descr creator,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)666 static int pthread_handle_create(pthread_descr creator, const pthread_attr_t *attr,
667 void * (*start_routine)(void *), void *arg)
668 {
669 int err;
670 pthread_descr new_thread;
671 char *stack_addr;
672 char * new_thread_bottom;
673 pthread_t new_thread_id;
674 char *guardaddr = NULL;
675 size_t guardsize = 0, stksize = 0;
676 int pagesize = L4_PAGESIZE;
677 int saved_errno = 0;
678
679 #ifdef USE_TLS
680 new_thread = (pthread*)_dl_allocate_tls (NULL);
681 if (new_thread == NULL)
682 return EAGAIN;
683 # if defined(TLS_DTV_AT_TP)
684 /* pthread_descr is below TP. */
685 new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
686 # endif
687 #else
688 /* Prevent warnings. */
689 new_thread = NULL;
690 #endif
691 #ifdef __NOT_FOR_L4__
692 /* First check whether we have to change the policy and if yes, whether
693 we can do this. Normally this should be done by examining the
694 return value of the __sched_setscheduler call in pthread_start_thread
695 but this is hard to implement. FIXME */
696 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
697 return EPERM;
698 #endif
699 /* Find a free segment for the thread, and allocate a stack if needed */
700
701 if (__pthread_first_free_handle == 0 && l4pthr_get_more_utcb())
702 {
703 #ifdef USE_TLS
704 # if defined(TLS_DTV_AT_TP)
705 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
706 # endif
707 _dl_deallocate_tls (new_thread, true);
708 #endif
709
710 return EAGAIN;
711 }
712
713 l4_utcb_t *new_utcb = mgr_alloc_utcb();
714 if (!new_utcb)
715 return EAGAIN;
716
717 new_thread_id = new_utcb;
718
719 if (pthread_allocate_stack(attr, thread_segment(sseg),
720 pagesize, &stack_addr, &new_thread_bottom,
721 &guardaddr, &guardsize, &stksize) == 0)
722 {
723 #ifdef USE_TLS
724 new_thread->p_stackaddr = stack_addr;
725 #else
726 new_thread = (pthread_descr) stack_addr;
727 #endif
728 }
729 else
730 {
731 mgr_free_utcb(new_utcb);
732 return EAGAIN;
733 }
734
735 /* Allocate new thread identifier */
736 /* Initialize the thread descriptor. Elements which have to be
737 initialized to zero already have this value. */
738 #if !defined USE_TLS || !TLS_DTV_AT_TP
739 new_thread->header.tcb = new_thread;
740 new_thread->header.self = new_thread;
741 #endif
742 new_thread->header.multiple_threads = 1;
743 new_thread->p_tid = new_thread_id;
744 new_thread->p_lock = handle_to_lock(new_utcb);
745 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
746 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
747 #if !(USE_TLS && HAVE___THREAD)
748 new_thread->p_errnop = &new_thread->p_errno;
749 new_thread->p_h_errnop = &new_thread->p_h_errno;
750 #endif
751 new_thread->p_guardaddr = guardaddr;
752 new_thread->p_guardsize = guardsize;
753 new_thread->p_inheritsched = attr ? attr->__inheritsched : PTHREAD_INHERIT_SCHED;
754 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
755 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
756 /* Initialize the thread handle */
757 __pthread_init_lock(handle_to_lock(new_utcb));
758 /* Determine scheduling parameters for the thread */
759 // If no attributes are provided, pthread_create uses default values as
760 // described in pthread_attr_init. PTHREAD_INHERIT_SCHED is the default.
761
762 new_thread->p_sched_policy = creator->p_sched_policy;
763 new_thread->p_priority = creator->p_priority;
764
765 if (attr != NULL)
766 {
767 new_thread->p_detached = attr->__detachstate;
768 new_thread->p_userstack = attr->__stackaddr_set;
769
770 switch(attr->__inheritsched)
771 {
772 case PTHREAD_EXPLICIT_SCHED:
773 new_thread->p_sched_policy = attr->__schedpolicy;
774 new_thread->p_priority = attr->__schedparam.sched_priority;
775 break;
776 case PTHREAD_INHERIT_SCHED:
777 break;
778 }
779 }
780 int prio = -1;
781 /* Set the scheduling policy and priority for the new thread, if needed */
782 if (new_thread->p_sched_policy >= 0)
783 {
784 /* Explicit scheduling attributes were provided: apply them */
785 prio = __pthread_l4_getprio(new_thread->p_sched_policy,
786 new_thread->p_priority);
787 /* Raise priority of thread manager if needed */
788 __pthread_manager_adjust_prio(prio);
789 }
790 else if (manager_thread->p_sched_policy > 3)
791 {
792 /* Default scheduling required, but thread manager runs in realtime
793 scheduling: switch new thread to SCHED_OTHER policy */
794 prio = __pthread_l4_getprio(SCHED_OTHER, 0);
795 }
796 /* Finish setting up arguments to pthread_start_thread */
797 new_thread->p_start_args.start_routine = start_routine;
798 new_thread->p_start_args.arg = arg;
799 /* Make the new thread ID available already now. If any of the later
800 functions fail we return an error value and the caller must not use
801 the stored thread ID. */
802 creator->p_retval = new_thread_id;
803 /* Do the cloning. We have to use two different functions depending
804 on whether we are debugging or not. */
805 err = __pthread_mgr_create_thread(new_thread, &stack_addr,
806 pthread_start_thread, prio,
807 attr ? attr->create_flags : 0,
808 attr ? attr->affinity : l4_sched_cpu_set(0, ~0, 1));
809 saved_errno = -err;
810
811 /* Check if cloning succeeded */
812 if (err < 0) {
813 /* Free the stack if we allocated it */
814 if (attr == NULL || !attr->__stackaddr_set)
815 {
816 #ifdef NEED_SEPARATE_REGISTER_STACK
817 size_t stacksize = ((char *)(new_thread->p_guardaddr)
818 - new_thread_bottom);
819 munmap((caddr_t)new_thread_bottom,
820 2 * stacksize + new_thread->p_guardsize);
821 #elif _STACK_GROWS_UP
822 # ifdef USE_TLS
823 size_t stacksize = guardaddr - stack_addr;
824 munmap(stack_addr, stacksize + guardsize);
825 # else
826
827 size_t stacksize = guardaddr - (char *)new_thread;
828 munmap(new_thread, stacksize + guardsize);
829 # endif
830 #else
831 #ifdef USE_L4RE_FOR_STACK
832 if (pthread_l4_free_stack(new_thread_bottom, guardaddr))
833 fprintf(stderr, "ERROR: failed to free stack\n");
834 #else
835 # ifdef USE_TLS
836 size_t stacksize = stack_addr - new_thread_bottom;
837 # else
838 size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
839 # endif
840 munmap(new_thread_bottom - guardsize, guardsize + stacksize);
841 #endif
842 #endif
843 }
844 #ifdef USE_TLS
845 # if defined(TLS_DTV_AT_TP)
846 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
847 # endif
848 _dl_deallocate_tls (new_thread, true);
849 #endif
850 mgr_free_utcb(new_utcb);
851 return saved_errno;
852 }
853 /* Insert new thread in doubly linked list of active threads */
854 new_thread->p_prevlive = __pthread_main_thread;
855 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
856 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
857 __pthread_main_thread->p_nextlive = new_thread;
858 /* Set pid field of the new thread, in case we get there before the
859 child starts. */
860 return 0;
861 }
862
863
864 /* Try to free the resources of a thread when requested by pthread_join
865 or pthread_detach on a terminated thread. */
866
pthread_free(pthread_descr th)867 static void pthread_free(pthread_descr th)
868 {
869 pthread_handle handle;
870 pthread_readlock_info *iter, *next;
871
872 ASSERT(th->p_exited);
873 /* Make the handle invalid */
874 handle = thread_handle(th->p_tid);
875 __pthread_lock(handle_to_lock(handle), NULL);
876 mgr_free_utcb(handle);
877 __pthread_unlock(handle_to_lock(handle));
878
879 {
880 // free the semaphore and the thread
881 L4Re::Util::Unique_del_cap<void> s(L4::Cap<void>(th->p_thsem_cap));
882 L4Re::Util::Unique_del_cap<void> t(L4::Cap<void>(th->p_th_cap));
883 }
884
885 /* One fewer threads in __pthread_handles */
886
887 /* Destroy read lock list, and list of free read lock structures.
888 If the former is not empty, it means the thread exited while
889 holding read locks! */
890
891 for (iter = th->p_readlock_list; iter != NULL; iter = next)
892 {
893 next = iter->pr_next;
894 free(iter);
895 }
896
897 for (iter = th->p_readlock_free; iter != NULL; iter = next)
898 {
899 next = iter->pr_next;
900 free(iter);
901 }
902
903 /* If initial thread, nothing to free */
904 if (!th->p_userstack)
905 {
906 size_t guardsize = th->p_guardsize;
907 /* Free the stack and thread descriptor area */
908 char *guardaddr = (char*)th->p_guardaddr;
909 #ifdef _STACK_GROWS_UP
910 # ifdef USE_TLS
911 size_t stacksize = guardaddr - th->p_stackaddr;
912 # else
913 size_t stacksize = guardaddr - (char *)th;
914 # endif
915 guardaddr = (char *)th;
916 #else
917 /* Guardaddr is always set, even if guardsize is 0. This allows
918 us to compute everything else. */
919 # ifdef USE_TLS
920 //l4/size_t stacksize = th->p_stackaddr - guardaddr - guardsize;
921 # else
922 //l4/size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
923 # endif
924 # ifdef NEED_SEPARATE_REGISTER_STACK
925 /* Take account of the register stack, which is below guardaddr. */
926 guardaddr -= stacksize;
927 stacksize *= 2;
928 # endif
929 #endif
930 #ifdef USE_L4RE_FOR_STACK
931 pthread_l4_free_stack(guardaddr + guardsize, guardaddr);
932 #else
933 munmap(guardaddr, stacksize + guardsize);
934 #endif
935
936 }
937
938 #ifdef USE_TLS
939 # if defined(TLS_DTV_AT_TP)
940 th = (pthread_descr) ((char *) th + TLS_PRE_TCB_SIZE);
941 # endif
942 _dl_deallocate_tls (th, true);
943 #endif
944 }
945
946 /* Handle threads that have exited */
947
pthread_exited(pthread_descr th)948 static int pthread_exited(pthread_descr th)
949 {
950 if (th->p_exited)
951 return 0;
952
953 int detached;
954 /* Remove thread from list of active threads */
955 th->p_nextlive->p_prevlive = th->p_prevlive;
956 th->p_prevlive->p_nextlive = th->p_nextlive;
957 /* Mark thread as exited, and if detached, free its resources */
958 __pthread_lock(th->p_lock, NULL);
959 th->p_exited = 1;
960 /* If we have to signal this event do it now. */
961 detached = th->p_detached;
962 __pthread_unlock(th->p_lock);
963 if (detached)
964 pthread_free(th);
965 /* If all threads have exited and the main thread is pending on a
966 pthread_exit, wake up the main thread and terminate ourselves. */
967 if (main_thread_exiting &&
968 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
969 restart(__pthread_main_thread);
970 /* Same logic as REQ_MAIN_THREAD_EXIT. */
971 }
972
973 return detached;
974 }
975
976
977 /* Try to free the resources of a thread when requested by pthread_join
978 or pthread_detach on a terminated thread. */
979
pthread_handle_free(pthread_t th_id)980 static void pthread_handle_free(pthread_t th_id)
981 {
982 pthread_handle handle = thread_handle(th_id);
983 pthread_descr th;
984
985 __pthread_lock(handle_to_lock(handle), NULL);
986 if (nonexisting_handle(handle, th_id)) {
987 /* pthread_reap_children has deallocated the thread already,
988 nothing needs to be done */
989 __pthread_unlock(handle_to_lock(handle));
990 return;
991 }
992 th = handle_to_descr(handle);
993 __pthread_unlock(handle_to_lock(handle));
994 if (!pthread_exited(th))
995 pthread_free(th);
996 }
997
998 /* Send a signal to all running threads */
999
1000 #if 0
1001 static void pthread_kill_all_threads(int main_thread_also)
1002 {
1003 UNIMPL("pthread_kill_all_threads");
1004 #if 0
1005 pthread_descr th;
1006 for (th = __pthread_main_thread->p_nextlive;
1007 th != __pthread_main_thread;
1008 th = th->p_nextlive) {
1009 kill(th->p_pid, sig);
1010 }
1011 if (main_thread_also) {
1012 kill(__pthread_main_thread->p_pid, sig);
1013 }
1014 #endif
1015 }
1016 #endif
1017
pthread_for_each_thread(void * arg,void (* fn)(void *,pthread_descr))1018 static void pthread_for_each_thread(void *arg,
1019 void (*fn)(void *, pthread_descr))
1020 {
1021 pthread_descr th;
1022
1023 for (th = __pthread_main_thread->p_nextlive;
1024 th != __pthread_main_thread;
1025 th = th->p_nextlive) {
1026 fn(arg, th);
1027 }
1028
1029 fn(arg, __pthread_main_thread);
1030 }
1031
1032 /* Process-wide exit() */
1033
pthread_handle_exit(pthread_descr issuing_thread,int exitcode)1034 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
1035 {
1036 pthread_descr th;
1037 __pthread_exit_requested = 1;
1038 __pthread_exit_code = exitcode;
1039 #if 0
1040 /* A forced asynchronous cancellation follows. Make sure we won't
1041 get stuck later in the main thread with a system lock being held
1042 by one of the cancelled threads. Ideally one would use the same
1043 code as in pthread_atfork(), but we can't distinguish system and
1044 user handlers there. */
1045 __flockfilelist();
1046 /* Send the CANCEL signal to all running threads, including the main
1047 thread, but excluding the thread from which the exit request originated
1048 (that thread must complete the exit, e.g. calling atexit functions
1049 and flushing stdio buffers). */
1050 for (th = issuing_thread->p_nextlive;
1051 th != issuing_thread;
1052 th = th->p_nextlive) {
1053 kill(th->p_pid, __pthread_sig_cancel);
1054 }
1055 /* Now, wait for all these threads, so that they don't become zombies
1056 and their times are properly added to the thread manager's times. */
1057 for (th = issuing_thread->p_nextlive;
1058 th != issuing_thread;
1059 th = th->p_nextlive) {
1060 waitpid(th->p_pid, NULL, __WCLONE);
1061 }
1062 __fresetlockfiles();
1063 #endif
1064 #ifdef THIS_IS_THE_ORIGINAL
1065 restart(issuing_thread);
1066 _exit(0);
1067 #else
1068 for (th = issuing_thread->p_nextlive;
1069 th != issuing_thread;
1070 th = th->p_nextlive)
1071 {
1072 __l4_kill_thread(th->p_th_cap);
1073 }
1074
1075 // let caller continue
1076 if (l4_error(l4_ipc_send(L4_INVALID_CAP | L4_SYSF_REPLY,
1077 l4_utcb(),
1078 l4_msgtag(0, 0, 0, 0),
1079 L4_IPC_SEND_TIMEOUT_0)))
1080 // assume caller has quit (and will not continue exit())
1081 _exit(0);
1082 #endif
1083 }
1084
1085 #if 0
1086 /* Handler for __pthread_sig_cancel in thread manager thread */
1087
1088 void __pthread_manager_sighandler(int sig)
1089 {
1090 int kick_manager = terminated_children == 0 && main_thread_exiting;
1091 terminated_children = 1;
1092
1093 /* If the main thread is terminating, kick the thread manager loop
1094 each time some threads terminate. This eliminates a two second
1095 shutdown delay caused by the thread manager sleeping in the
1096 call to __poll(). Instead, the thread manager is kicked into
1097 action, reaps the outstanding threads and resumes the main thread
1098 so that it can complete the shutdown. */
1099
1100 if (kick_manager) {
1101 struct pthread_request request;
1102 request.req_thread = 0;
1103 request.req_kind = REQ_KICK;
1104 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1105 (char *) &request, sizeof(request)));
1106 }
1107 }
1108 #endif
1109 /* Adjust priority of thread manager so that it always run at a priority
1110 higher than all threads */
1111
__pthread_manager_adjust_prio(int thread_prio)1112 void __pthread_manager_adjust_prio(int thread_prio)
1113 {
1114 if (!manager_thread)
1115 return;
1116
1117 if (thread_prio <= manager_thread->p_priority)
1118 return;
1119
1120 l4_sched_param_t sp = l4_sched_param(thread_prio, 0);
1121 L4Re::Env::env()->scheduler()->run_thread(L4::Cap<L4::Thread>(manager_thread->p_th_cap), sp);
1122 manager_thread->p_priority = thread_prio;
1123 }
1124