From 6bee946b37c2ec46d1408fa8b0d072af95e75029 Mon Sep 17 00:00:00 2001 From: Lionel Sambuc Date: Sun, 7 Jan 2018 20:16:07 +0100 Subject: [PATCH] pthread_ enabling tls, not yet working --- lib/libc/Makefile | 3 +- lib/libc/misc/initfini.c | 2 - lib/libpthread/pthread.c | 2 - lib/libpthread/pthread_int.h | 14 +- lib/libpthread/pthread_userspace.c | 302 ++--------------------- libexec/ld.elf_so/arch/arm/mdreloc.c | 4 +- libexec/ld.elf_so/arch/i386/mdreloc.c | 8 +- libexec/ld.elf_so/map_object.c | 4 +- libexec/ld.elf_so/rtld.c | 8 +- minix/lib/libc/sys/Makefile.inc | 2 +- minix/lib/libc/sys/_minix_lwp.c | 335 ++++++++++++++++++++++++++ sys/arch/arm/include/mcontext.h | 5 + sys/arch/i386/include/mcontext.h | 5 + sys/arch/i386/include/types.h | 2 - 14 files changed, 375 insertions(+), 321 deletions(-) create mode 100644 minix/lib/libc/sys/_minix_lwp.c diff --git a/lib/libc/Makefile b/lib/libc/Makefile index 76a8a37c1..e68498f23 100644 --- a/lib/libc/Makefile +++ b/lib/libc/Makefile @@ -105,11 +105,10 @@ SUBDIR+= pkgconfig .include "${.CURDIR}/time/Makefile.inc" .if defined(__MINIX) .include "${NETBSDSRCDIR}/minix/lib/libc/sys/Makefile.inc" -.else +.endif # defined(__MINIX) .if ${RUMPRUN} != "yes" .include "${.CURDIR}/tls/Makefile.inc" .endif -.endif # defined(__MINIX) .include "${.CURDIR}/sys/Makefile.inc" .if ${HAVE_LIBGCC_EH} == "no" .include "${NETBSDSRCDIR}/sys/lib/libunwind/Makefile.inc" diff --git a/lib/libc/misc/initfini.c b/lib/libc/misc/initfini.c index 8eef018ee..7a5fd46b5 100644 --- a/lib/libc/misc/initfini.c +++ b/lib/libc/misc/initfini.c @@ -99,14 +99,12 @@ _libc_init(void) #if defined(__minix) && defined(_REENTRANT) /* Atomic operations */ __libc_atomic_init(); -#endif /* defined(__minix) && defined(_REENTRANT) */ #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) /* Initialize TLS for statically linked programs. */ __libc_static_tls_setup(); #endif -#if defined(__minix) && defined(_REENTRANT) /* Threads */ __libc_thr_init(); #endif /* defined(__minix) && defined(_REENTRANT) */ diff --git a/lib/libpthread/pthread.c b/lib/libpthread/pthread.c index e6af05685..794f32bad 100644 --- a/lib/libpthread/pthread.c +++ b/lib/libpthread/pthread.c @@ -1344,7 +1344,6 @@ pthread__initmain(pthread_t *newt) 4 * pthread__pagesize / 1024); *newt = pthread__main; -#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix) #if defined(_PTHREAD_GETTCB_EXT) pthread__main->pt_tls = _PTHREAD_GETTCB_EXT(); #elif defined(__HAVE___LWP_GETTCB_FAST) @@ -1353,7 +1352,6 @@ pthread__initmain(pthread_t *newt) pthread__main->pt_tls = _lwp_getprivate(); #endif pthread__main->pt_tls->tcb_pthread = pthread__main; -#endif /* defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix) */ } static signed int diff --git a/lib/libpthread/pthread_int.h b/lib/libpthread/pthread_int.h index 5fedfd8c0..fa5964c69 100644 --- a/lib/libpthread/pthread_int.h +++ b/lib/libpthread/pthread_int.h @@ -95,7 +95,7 @@ struct pthread_lock_ops { struct __pthread_st { pthread_t pt_self; /* Must be first. */ -#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix) +#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) struct tls_tcb *pt_tls; /* Thread Local Storage area */ #endif unsigned int pt_magic; /* Magic number */ @@ -255,26 +255,17 @@ int pthread__find(pthread_t) PTHREAD_HIDE; _INITCONTEXT_U_MD(ucp) \ } while (/*CONSTCOND*/0) -#if !defined(__minix) #if !defined(__HAVE_TLS_VARIANT_I) && !defined(__HAVE_TLS_VARIANT_II) #error Either __HAVE_TLS_VARIANT_I or __HAVE_TLS_VARIANT_II must be defined #endif -#endif /* !defined(__minix) */ #ifdef _PTHREAD_GETTCB_EXT struct tls_tcb *_PTHREAD_GETTCB_EXT(void); #endif -#if defined(__minix) -struct tls_tcb { - void *tcb_pthread; -}; -#endif /* !defined(__minix) */ - static inline pthread_t __constfunc pthread__self(void) { -#if !defined(__minix) #if defined(_PTHREAD_GETTCB_EXT) struct tls_tcb * const tcb = _PTHREAD_GETTCB_EXT(); #elif defined(__HAVE___LWP_GETTCB_FAST) @@ -282,9 +273,6 @@ pthread__self(void) #else struct tls_tcb * const tcb = __lwp_getprivate_fast(); #endif -#else - struct tls_tcb * const tcb = _lwp_getprivate(); -#endif /* defined(__minix) */ return (pthread_t)tcb->tcb_pthread; } diff --git a/lib/libpthread/pthread_userspace.c b/lib/libpthread/pthread_userspace.c index d70a3dc82..bb33b041e 100644 --- a/lib/libpthread/pthread_userspace.c +++ b/lib/libpthread/pthread_userspace.c @@ -1,13 +1,9 @@ #include -#include #include -#include #include #include -#include -#include #include #include #include @@ -25,9 +21,6 @@ #define MAX_THREAD_POOL 1024 #define SLOT_IN_USE 0x0001 -void __libc_thr_init(void); -void __libc_atomic_init(void); - int _sys_sched_yield(void); int _sys_mq_send(mqd_t, const char *, size_t, unsigned); ssize_t _sys_mq_receive(mqd_t, char *, size_t, unsigned *); @@ -35,63 +28,18 @@ ssize_t _sys_mq_receive(mqd_t, char *, size_t, unsigned *); /* Work around kludge for pthread_cancelstub */ int pthread__cancel_stub_binder; -struct lwp { - int return_value; - int flags; - const char * name; - ucontext_t context; - struct tls_tcb tls; -}; - -static struct lwp lwp_threads[MAX_THREAD_POOL]; -static volatile lwpid_t current_thread = 0; - -static int -__minix_runnable(struct lwp* thread) -{ - uint32_t mask = (LWP_SUSPENDED | LSSLEEP | LW_UNPARKED | SLOT_IN_USE); - uint32_t runnable = (~LWP_SUSPENDED | ~LSSLEEP | LW_UNPARKED | SLOT_IN_USE); - - if ((thread->flags & mask) == runnable) { - return 1; /* Runnable */ +#if 0 +#define print(msg) \ + { \ + const char m[] = msg; \ + write(2, m, sizeof(m)); \ } +#else +#define print(m) /**/ +#endif - return 0; /* Not runnable */ -} - -static void -__minix_schedule(int signal __unused) -{ - static int last_pos = 0; - struct lwp* old = &lwp_threads[current_thread]; - int pos; - - /* Select Next thread to run. - * Simply scan the array looking for a schedulable thread, and - * loopback to the start if we reach the end. */ - for(pos = last_pos; pos != last_pos; - pos = ((pos + 1) % MAX_THREAD_POOL)) { - if (__minix_runnable(&lwp_threads[pos])) { - break; - } - } - - if (pos == last_pos) { - /* No other thread found to run, is the current one - * still runnable? */ - if (!__minix_runnable(&lwp_threads[pos])) { - return; /* "No runnable threads to schedule. */ - } - } - - /* Point the current thread to the thread picked. */ - current_thread = pos; - - /* Restore the next context of the thread picked. */ - last_pos = pos; - - (void)swapcontext(&(old->context), &(lwp_threads[pos].context)); -} +extern void +__minix_schedule(int signal __unused); void __pthread_init_minix(void) __attribute__((__constructor__, __used__)); void @@ -118,231 +66,6 @@ __pthread_init_minix(void) r = setitimer(ITIMER_VIRTUAL, &nit, &oit); } -lwpid_t -_lwp_self(void) -{ - return current_thread; -} - -void -_lwp_makecontext(ucontext_t *context, void (*start_routine)(void *), - void *arg, void *private, caddr_t stack_base, size_t stack_size) -{ - /* Already done in pthread_makelwp, here for reference */ - memset(context, 0, sizeof(*context)); - _INITCONTEXT_U(context); - context->uc_stack.ss_sp = stack_base; - context->uc_stack.ss_size = stack_size; - context->uc_stack.ss_flags = 0; - context->uc_link = NULL; - - makecontext(context, start_routine, 1, arg); -} - -int -_lwp_create(const ucontext_t *context, unsigned long flags, lwpid_t *new_lwp) -{ - size_t i = 0; - - while ((i < MAX_THREAD_POOL) && - (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE))) { - i++; - } - - if (MAX_THREAD_POOL == i) { - errno = EAGAIN; - return -1; - } - - /* ADD CHECKS ON UCONTEXT */ - - memset(&lwp_threads[i], 0, sizeof(lwp_threads[i])); - lwp_threads[i].flags = flags | SLOT_IN_USE; - lwp_threads[i].context = *context; - *new_lwp = i; - - return 0; -} - -int -_lwp_suspend(lwpid_t lwp) -{ - int nb_lwp = 0; - - for (size_t i = 0; i < MAX_THREAD_POOL; i++) { - if (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE)) { - nb_lwp++; - } - } - - if (1 < nb_lwp) { - return EDEADLK; - } - - if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { - return ESRCH; - } - - lwp_threads[lwp].flags |= LW_WSUSPEND; - - return 0; -} - -int -_lwp_continue(lwpid_t lwp) -{ - if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { - return ESRCH; - } - - lwp_threads[lwp].flags &= ~LW_WSUSPEND; - - return 0; -} - -int -_lwp_wakeup(lwpid_t lwp) -{ - if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { - return ESRCH; - } - - if (LSSLEEP != (lwp_threads[lwp].flags & LSSLEEP)) { - return ENODEV; - } - - lwp_threads[lwp].flags &= ~LSSLEEP; - - __minix_schedule(SIGVTALRM); - - return 0; -} - -int -_lwp_detach(lwpid_t lwp) -{ - if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { - errno = ESRCH; - return -1; - } - - if (LWP_DETACHED == (lwp_threads[lwp].flags & LWP_DETACHED)) { - errno = EINVAL; - return -1; - } - - lwp_threads[lwp].flags |= LWP_DETACHED; - - return 0; -} - -int -_lwp_setname(lwpid_t target, const char * name) -{ - /* Name is already a copy for our use. */ - lwp_threads[target].name = name; - - return 0; -} - -int -_lwp_getname(lwpid_t target, char * name, size_t len) -{ - return strlcpy(name, lwp_threads[target].name, len) < len ? 0 : -1; -} - -void * -_lwp_getprivate(void) -{ - return &lwp_threads[current_thread].tls; -} - -void -_lwp_setprivate(void *cookie) -{ - /* Not supported */ -} - -int -_lwp_unpark(lwpid_t lwp, const void * hint) -{ - if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { - errno = ESRCH; - return -1; - } - - lwp_threads[lwp].flags |= LW_UNPARKED; - - return 0; -} - -ssize_t -_lwp_unpark_all(const lwpid_t * targets, size_t ntargets, const void * hint) -{ - if (NULL == targets) { - return MAX_THREAD_POOL; - } - - if (MAX_THREAD_POOL <= ntargets) { - errno = EINVAL; - return -1; - } - - for (size_t i = 0; i < ntargets; i++) { - lwp_threads[targets[i]].flags |= LW_UNPARKED; - } - - return 0; -} - -int -_lwp_park(clockid_t clock_id, int i, const struct timespec * ts, lwpid_t thread, - const void *cookie, const void *cookie2) -{ - // FIXME - return -1; -} - -int -_lwp_wait(lwpid_t wlwp, lwpid_t *rlwp) -{ - // FIXME - return -1; -} - -int -_lwp_kill(lwpid_t thread, int signal) -{ - // FIXME - errno = ESRCH; - return -1; -} - -int -_lwp_exit(void) -{ - lwp_threads[current_thread].flags &= ~SLOT_IN_USE; - __minix_schedule(SIGVTALRM); - - /* We reach this only if there is nothing left to schedule. */ - exit(0); -} - -int -_lwp_ctl(int features, struct lwpctl **address) -{ - /* LSC Add stuff to actually do something with this. */ - *address = malloc(sizeof(struct lwpctl)); - if (NULL == *address) { - return -1; - } - - memset(*address, 0, sizeof(struct lwpctl)); - - // FIXME - return 0; -} - int _sys_sched_yield(void) { @@ -360,30 +83,35 @@ sched_yield(void) int _sched_setaffinity(pid_t a, lwpid_t b, size_t c, const cpuset_t *d) { + print("_sched_setaffinity\n"); return -1; } int _sched_getaffinity(pid_t a, lwpid_t b, size_t c, cpuset_t *d) { + print("_sched_getaffinity\n"); return -1; } int _sched_setparam(pid_t a, lwpid_t b, int c, const struct sched_param *d) { + print("_sched_setparam\n"); return -1; } int _sched_getparam(pid_t a, lwpid_t b, int *c, struct sched_param *d) { + print("_sched_getparam\n"); return -1; } int rasctl(void *addr, size_t len, int op) { + print("rasctl\n"); errno = EOPNOTSUPP; return -1; } diff --git a/libexec/ld.elf_so/arch/arm/mdreloc.c b/libexec/ld.elf_so/arch/arm/mdreloc.c index 44b8643e9..b8ea86063 100644 --- a/libexec/ld.elf_so/arch/arm/mdreloc.c +++ b/libexec/ld.elf_so/arch/arm/mdreloc.c @@ -179,7 +179,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj) rdbg(("COPY (avoid in main)")); break; -#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) +#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) case R_TYPE(TLS_DTPOFF32): def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) @@ -232,7 +232,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj) obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)tmp)); break; -#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */ +#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */ default: rdbg(("sym = %lu, type = %lu, offset = %p, " diff --git a/libexec/ld.elf_so/arch/i386/mdreloc.c b/libexec/ld.elf_so/arch/i386/mdreloc.c index 1ef4d0b57..22da6d944 100644 --- a/libexec/ld.elf_so/arch/i386/mdreloc.c +++ b/libexec/ld.elf_so/arch/i386/mdreloc.c @@ -120,7 +120,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj) rdbg(("COPY (avoid in main)")); break; -#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) +#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) case R_TYPE(TLS_TPOFF): def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) @@ -176,7 +176,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj) obj->path, (void *)*where)); break; -#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */ +#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */ default: rdbg(("sym = %lu, type = %lu, offset = %p, " @@ -283,7 +283,7 @@ _rtld_relocate_plt_objects(const Obj_Entry *obj) return err; } -#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) +#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) /* * i386 specific GNU variant of __tls_get_addr using register based * argument passing. @@ -305,4 +305,4 @@ ___tls_get_addr(void *arg_) return _rtld_tls_get_addr(tcb, idx, offset); } -#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */ +#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */ diff --git a/libexec/ld.elf_so/map_object.c b/libexec/ld.elf_so/map_object.c index ae13c4f09..6a0f7985d 100644 --- a/libexec/ld.elf_so/map_object.c +++ b/libexec/ld.elf_so/map_object.c @@ -95,10 +95,10 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb) #endif Elf_Addr phdr_vaddr; size_t phdr_memsz; -#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) +#if !defined(__minix) caddr_t gap_addr; size_t gap_size; -#endif /* defined(__minix) */ +#endif /* !defined(__minix) */ int i; #ifdef RTLD_LOADER Elf_Addr clear_vaddr; diff --git a/libexec/ld.elf_so/rtld.c b/libexec/ld.elf_so/rtld.c index b99f85798..7c07e9551 100644 --- a/libexec/ld.elf_so/rtld.c +++ b/libexec/ld.elf_so/rtld.c @@ -111,9 +111,9 @@ static void *auxinfo; char *__progname; char **environ; -#if !defined(__minix) +#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) static volatile bool _rtld_mutex_may_recurse; -#endif /* !defined(__minix) */ +#endif /* !defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */ #if defined(RTLD_DEBUG) #ifndef __sh__ @@ -1524,7 +1524,7 @@ _rtld_objlist_remove(Objlist *list, Obj_Entry *obj) } } -#if defined(__minix) +#if defined(__minix) && !(defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) void _rtld_shared_enter(void) {} void _rtld_shared_exit(void) {} void _rtld_exclusive_enter(sigset_t *mask) {} @@ -1655,4 +1655,4 @@ _rtld_exclusive_exit(sigset_t *mask) sigprocmask(SIG_SETMASK, mask, NULL); } -#endif /* !defined(__minix) */ +#endif /* defined(__minix) && !(defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */ diff --git a/minix/lib/libc/sys/Makefile.inc b/minix/lib/libc/sys/Makefile.inc index 096107be8..33e418879 100644 --- a/minix/lib/libc/sys/Makefile.inc +++ b/minix/lib/libc/sys/Makefile.inc @@ -23,7 +23,7 @@ SRCS+= accept.c access.c adjtime.c bind.c brk.c sbrk.c m_closefrom.c getsid.c \ wait4.c write.c \ utimensat.c utimes.c futimes.c lutimes.c futimens.c \ _exit.c _ucontext.c environ.c __getcwd.c vfork.c sizeup.c init.c \ - getrusage.c setrlimit.c setpgid.c __sysctl.c + getrusage.c setrlimit.c setpgid.c __sysctl.c _minix_lwp.c _lwp.c # Minix specific syscalls / utils. SRCS+= kernel_utils.c sprofile.c stack_utils.c _mcontext.c diff --git a/minix/lib/libc/sys/_minix_lwp.c b/minix/lib/libc/sys/_minix_lwp.c new file mode 100644 index 000000000..26960feb5 --- /dev/null +++ b/minix/lib/libc/sys/_minix_lwp.c @@ -0,0 +1,335 @@ +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define MAX_THREAD_POOL 1024 +#define SLOT_IN_USE 0x0001 + +struct lwp { + int return_value; + int flags; + const char * name; + ucontext_t context; + struct tls_tcb *tls; +}; + +static struct lwp lwp_threads[MAX_THREAD_POOL]; +static volatile lwpid_t current_thread = 0; + +void +__minix_schedule(int signal __unused); + +#if 0 +#define print(msg) \ + { \ + const char m[] = msg; \ + write(2, m, sizeof(m)); \ + } +#else +#define print(m) /**/ +#endif + +static int +__minix_runnable(struct lwp* thread) +{ + uint32_t mask = (LWP_SUSPENDED | LSSLEEP | LW_UNPARKED | SLOT_IN_USE); + uint32_t runnable = (LW_UNPARKED | SLOT_IN_USE); + + if ((thread->flags & mask) == runnable) { + return 1; /* Runnable */ + } + + return 0; /* Not runnable */ +} + +void +__minix_schedule(int signal __unused) +{ + static int last_pos = 0; + struct lwp* old = &lwp_threads[current_thread]; + int pos; + + /* Select Next thread to run. + * Simply scan the array looking for a schedulable thread, and + * loopback to the start if we reach the end. */ + for(pos = last_pos; pos != last_pos; + pos = ((pos + 1) % MAX_THREAD_POOL)) { + if (__minix_runnable(&lwp_threads[pos])) { + break; + } + } + + if (pos == last_pos) { + /* No other thread found to run, is the current one + * still runnable? */ + if (!__minix_runnable(&lwp_threads[pos])) { + print("__minix_schedule no switch\n"); + return; /* "No runnable threads to schedule. */ + } + } + + print("__minix_schedule switch\n"); + /* Point the current thread to the thread picked. */ + current_thread = pos; + + /* Restore the next context of the thread picked. */ + last_pos = pos; + + (void)swapcontext(&(old->context), &(lwp_threads[pos].context)); +} + +lwpid_t +_lwp_self(void) +{ + return current_thread; +} +#if 0 +void +_lwp_makecontext(ucontext_t *context, void (*start_routine)(void *), + void *arg, void *private, caddr_t stack_base, size_t stack_size) +{ + /* Already done in pthread_makelwp, here for reference */ + memset(context, 0, sizeof(*context)); + context->uc_flags = _UC_CPU | _UC_STACK;; + context->uc_stack.ss_sp = stack_base; + context->uc_stack.ss_size = stack_size; + context->uc_stack.ss_flags = 0; + context->uc_link = NULL; + + makecontext(context, start_routine, 1, arg); +} +#endif +int +_lwp_create(const ucontext_t *context, unsigned long flags, lwpid_t *new_lwp) +{ + size_t i = 0; + + while ((i < MAX_THREAD_POOL) && + (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE))) { + i++; + } + + if (MAX_THREAD_POOL == i) { + errno = EAGAIN; + return -1; + } + + /* ADD CHECKS ON UCONTEXT */ + + memset(&lwp_threads[i], 0, sizeof(lwp_threads[i])); + lwp_threads[i].flags = flags | SLOT_IN_USE; + lwp_threads[i].context = *context; + *new_lwp = i; + + return 0; +} + +int +_lwp_suspend(lwpid_t lwp) +{ + int nb_lwp = 0; + + print("_lwp_suspend\n"); + for (size_t i = 0; i < MAX_THREAD_POOL; i++) { + if (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE)) { + nb_lwp++; + } + } + + if (1 < nb_lwp) { + return EDEADLK; + } + + if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { + return ESRCH; + } + + lwp_threads[lwp].flags |= LW_WSUSPEND; + + return 0; +} + +int +_lwp_continue(lwpid_t lwp) +{ + print("_lwp_continue\n"); + if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { + return ESRCH; + } + + lwp_threads[lwp].flags &= ~LW_WSUSPEND; + + return 0; +} + +int +_lwp_wakeup(lwpid_t lwp) +{ + print("_lwp_wakeup\n"); + if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { + return ESRCH; + } + + print("_lwp_wakeup 2\n"); + if (LSSLEEP != (lwp_threads[lwp].flags & LSSLEEP)) { + return ENODEV; + } + + lwp_threads[lwp].flags &= ~LSSLEEP; + + print("_lwp_wakeup 3\n"); + __minix_schedule(SIGVTALRM); + + print("_lwp_wakeup 4\n"); + return 0; +} + +int +_lwp_detach(lwpid_t lwp) +{ + print("_lwp_detach\n"); + if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { + errno = ESRCH; + return -1; + } + + if (LWP_DETACHED == (lwp_threads[lwp].flags & LWP_DETACHED)) { + errno = EINVAL; + return -1; + } + + lwp_threads[lwp].flags |= LWP_DETACHED; + + return 0; +} + +int +_lwp_setname(lwpid_t target, const char * name) +{ + print("_lwp_setname\n"); + /* Name is already a copy for our use. */ + lwp_threads[target].name = name; + + return 0; +} + +int +_lwp_getname(lwpid_t target, char * name, size_t len) +{ + print("_lwp_getname\n"); + return strlcpy(name, lwp_threads[target].name, len) < len ? 0 : -1; +} + +void * +_lwp_getprivate(void) +{ +// print("_lwp_getprivate\n"); + return lwp_threads[current_thread].tls; +} + +void +_lwp_setprivate(void *cookie) +{ + print("_lwp_setprivate\n"); + lwp_threads[current_thread].tls = cookie; +} + +int +_lwp_unpark(lwpid_t lwp, const void * hint) +{ + print("_lwp_unpark\n"); + if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) { + errno = ESRCH; + return -1; + } + + lwp_threads[lwp].flags |= LW_UNPARKED; + + return 0; +} + +ssize_t +_lwp_unpark_all(const lwpid_t * targets, size_t ntargets, const void * hint) +{ + print("_lwp_unpark_all\n"); + if (NULL == targets) { + return MAX_THREAD_POOL; + } + + if (MAX_THREAD_POOL <= ntargets) { + errno = EINVAL; + return -1; + } + + for (size_t i = 0; i < ntargets; i++) { + lwp_threads[targets[i]].flags |= LW_UNPARKED; + } + + return 0; +} + +int +_lwp_park(clockid_t clock_id, int i, const struct timespec * ts, lwpid_t thread, + const void *cookie, const void *cookie2) +{ + print("_lwp_park\n"); + // FIXME + return -1; +} + +int +_lwp_wait(lwpid_t wlwp, lwpid_t *rlwp) +{ + print("_lwp_wait\n"); + // FIXME + return -1; +} + +int +_lwp_kill(lwpid_t thread, int signal) +{ + print("_lwp_kill\n"); + // FIXME + errno = ESRCH; + return -1; +} + +int +_lwp_exit(void) +{ + print("_lwp_exit\n"); + lwp_threads[current_thread].flags &= ~SLOT_IN_USE; + __minix_schedule(SIGVTALRM); + + /* We reach this only if there is nothing left to schedule. */ + exit(0); +} + +int +_lwp_ctl(int features, struct lwpctl **address) +{ + print("_lwp_ctl\n"); + /* LSC Add stuff to actually do something with this. */ + *address = malloc(sizeof(struct lwpctl)); + if (NULL == *address) { + return -1; + } + + memset(*address, 0, sizeof(struct lwpctl)); + + // FIXME + return 0; +} diff --git a/sys/arch/arm/include/mcontext.h b/sys/arch/arm/include/mcontext.h index 9dbdac81a..4e0793d1a 100644 --- a/sys/arch/arm/include/mcontext.h +++ b/sys/arch/arm/include/mcontext.h @@ -156,6 +156,10 @@ __BEGIN_DECLS static __inline void * __lwp_getprivate_fast(void) { +#if defined(__minix) + extern void *_lwp_getprivate(void); + return _lwp_getprivate(); +#else #if !defined(__thumb__) || defined(_ARM_ARCH_T2) extern void *_lwp_getprivate(void); void *rv; @@ -173,6 +177,7 @@ __lwp_getprivate_fast(void) extern void *__aeabi_read_tp(void); return __aeabi_read_tp(); #endif /* !__thumb__ || _ARM_ARCH_T2 */ +#endif /* defined(__minix) */ } #if defined(_KERNEL) diff --git a/sys/arch/i386/include/mcontext.h b/sys/arch/i386/include/mcontext.h index 6eebe2eed..625d90f0c 100644 --- a/sys/arch/i386/include/mcontext.h +++ b/sys/arch/i386/include/mcontext.h @@ -148,11 +148,16 @@ int getmcontext(mcontext_t *mcp); static __inline void * __lwp_getprivate_fast(void) { +#if defined(__minix) + extern void *_lwp_getprivate(void); + return _lwp_getprivate(); +#else void *__tmp; __asm volatile("movl %%gs:0, %0" : "=r" (__tmp)); return __tmp; +#endif /* defined(__minix) */ } #endif /* !_I386_MCONTEXT_H_ */ diff --git a/sys/arch/i386/include/types.h b/sys/arch/i386/include/types.h index 4427ed761..1441a61cb 100644 --- a/sys/arch/i386/include/types.h +++ b/sys/arch/i386/include/types.h @@ -124,9 +124,7 @@ typedef unsigned char __cpu_simple_lock_nv_t; #define __HAVE_INTR_CONTROL #define __HAVE_MM_MD_OPEN #define __HAVE___LWP_GETPRIVATE_FAST -#if !defined(__minix) #define __HAVE_TLS_VARIANT_II -#endif /* !defined(__minix) */ #define __HAVE_COMMON___TLS_GET_ADDR #if defined(_KERNEL)