pthread_ enabling tls, not yet working

This commit is contained in:
2018-01-07 20:16:07 +01:00
parent 8e9c072e44
commit 6bee946b37
14 changed files with 375 additions and 321 deletions

View File

@@ -105,11 +105,10 @@ SUBDIR+= pkgconfig
.include "${.CURDIR}/time/Makefile.inc"
.if defined(__MINIX)
.include "${NETBSDSRCDIR}/minix/lib/libc/sys/Makefile.inc"
.else
.endif # defined(__MINIX)
.if ${RUMPRUN} != "yes"
.include "${.CURDIR}/tls/Makefile.inc"
.endif
.endif # defined(__MINIX)
.include "${.CURDIR}/sys/Makefile.inc"
.if ${HAVE_LIBGCC_EH} == "no"
.include "${NETBSDSRCDIR}/sys/lib/libunwind/Makefile.inc"

View File

@@ -99,14 +99,12 @@ _libc_init(void)
#if defined(__minix) && defined(_REENTRANT)
/* Atomic operations */
__libc_atomic_init();
#endif /* defined(__minix) && defined(_REENTRANT) */
#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
/* Initialize TLS for statically linked programs. */
__libc_static_tls_setup();
#endif
#if defined(__minix) && defined(_REENTRANT)
/* Threads */
__libc_thr_init();
#endif /* defined(__minix) && defined(_REENTRANT) */

View File

@@ -1344,7 +1344,6 @@ pthread__initmain(pthread_t *newt)
4 * pthread__pagesize / 1024);
*newt = pthread__main;
#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix)
#if defined(_PTHREAD_GETTCB_EXT)
pthread__main->pt_tls = _PTHREAD_GETTCB_EXT();
#elif defined(__HAVE___LWP_GETTCB_FAST)
@@ -1353,7 +1352,6 @@ pthread__initmain(pthread_t *newt)
pthread__main->pt_tls = _lwp_getprivate();
#endif
pthread__main->pt_tls->tcb_pthread = pthread__main;
#endif /* defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix) */
}
static signed int

View File

@@ -95,7 +95,7 @@ struct pthread_lock_ops {
struct __pthread_st {
pthread_t pt_self; /* Must be first. */
#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) || defined(__minix)
#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
struct tls_tcb *pt_tls; /* Thread Local Storage area */
#endif
unsigned int pt_magic; /* Magic number */
@@ -255,26 +255,17 @@ int pthread__find(pthread_t) PTHREAD_HIDE;
_INITCONTEXT_U_MD(ucp) \
} while (/*CONSTCOND*/0)
#if !defined(__minix)
#if !defined(__HAVE_TLS_VARIANT_I) && !defined(__HAVE_TLS_VARIANT_II)
#error Either __HAVE_TLS_VARIANT_I or __HAVE_TLS_VARIANT_II must be defined
#endif
#endif /* !defined(__minix) */
#ifdef _PTHREAD_GETTCB_EXT
struct tls_tcb *_PTHREAD_GETTCB_EXT(void);
#endif
#if defined(__minix)
struct tls_tcb {
void *tcb_pthread;
};
#endif /* !defined(__minix) */
static inline pthread_t __constfunc
pthread__self(void)
{
#if !defined(__minix)
#if defined(_PTHREAD_GETTCB_EXT)
struct tls_tcb * const tcb = _PTHREAD_GETTCB_EXT();
#elif defined(__HAVE___LWP_GETTCB_FAST)
@@ -282,9 +273,6 @@ pthread__self(void)
#else
struct tls_tcb * const tcb = __lwp_getprivate_fast();
#endif
#else
struct tls_tcb * const tcb = _lwp_getprivate();
#endif /* defined(__minix) */
return (pthread_t)tcb->tcb_pthread;
}

View File

@@ -1,13 +1,9 @@
#include <sys/cdefs.h>
#include <sys/aio.h>
#include <sys/lwp.h>
#include <sys/lwpctl.h>
#include <sys/param.h>
#include <sys/ras.h>
#include <sys/syscall.h>
#include <assert.h>
#include <lwp.h>
#include <sched.h>
#include <signal.h>
@@ -25,9 +21,6 @@
#define MAX_THREAD_POOL 1024
#define SLOT_IN_USE 0x0001
void __libc_thr_init(void);
void __libc_atomic_init(void);
int _sys_sched_yield(void);
int _sys_mq_send(mqd_t, const char *, size_t, unsigned);
ssize_t _sys_mq_receive(mqd_t, char *, size_t, unsigned *);
@@ -35,63 +28,18 @@ ssize_t _sys_mq_receive(mqd_t, char *, size_t, unsigned *);
/* Work around kludge for pthread_cancelstub */
int pthread__cancel_stub_binder;
struct lwp {
int return_value;
int flags;
const char * name;
ucontext_t context;
struct tls_tcb tls;
};
static struct lwp lwp_threads[MAX_THREAD_POOL];
static volatile lwpid_t current_thread = 0;
static int
__minix_runnable(struct lwp* thread)
{
uint32_t mask = (LWP_SUSPENDED | LSSLEEP | LW_UNPARKED | SLOT_IN_USE);
uint32_t runnable = (~LWP_SUSPENDED | ~LSSLEEP | LW_UNPARKED | SLOT_IN_USE);
if ((thread->flags & mask) == runnable) {
return 1; /* Runnable */
#if 0
#define print(msg) \
{ \
const char m[] = msg; \
write(2, m, sizeof(m)); \
}
#else
#define print(m) /**/
#endif
return 0; /* Not runnable */
}
static void
__minix_schedule(int signal __unused)
{
static int last_pos = 0;
struct lwp* old = &lwp_threads[current_thread];
int pos;
/* Select Next thread to run.
* Simply scan the array looking for a schedulable thread, and
* loopback to the start if we reach the end. */
for(pos = last_pos; pos != last_pos;
pos = ((pos + 1) % MAX_THREAD_POOL)) {
if (__minix_runnable(&lwp_threads[pos])) {
break;
}
}
if (pos == last_pos) {
/* No other thread found to run, is the current one
* still runnable? */
if (!__minix_runnable(&lwp_threads[pos])) {
return; /* "No runnable threads to schedule. */
}
}
/* Point the current thread to the thread picked. */
current_thread = pos;
/* Restore the next context of the thread picked. */
last_pos = pos;
(void)swapcontext(&(old->context), &(lwp_threads[pos].context));
}
extern void
__minix_schedule(int signal __unused);
void __pthread_init_minix(void) __attribute__((__constructor__, __used__));
void
@@ -118,231 +66,6 @@ __pthread_init_minix(void)
r = setitimer(ITIMER_VIRTUAL, &nit, &oit);
}
lwpid_t
_lwp_self(void)
{
return current_thread;
}
void
_lwp_makecontext(ucontext_t *context, void (*start_routine)(void *),
void *arg, void *private, caddr_t stack_base, size_t stack_size)
{
/* Already done in pthread_makelwp, here for reference */
memset(context, 0, sizeof(*context));
_INITCONTEXT_U(context);
context->uc_stack.ss_sp = stack_base;
context->uc_stack.ss_size = stack_size;
context->uc_stack.ss_flags = 0;
context->uc_link = NULL;
makecontext(context, start_routine, 1, arg);
}
int
_lwp_create(const ucontext_t *context, unsigned long flags, lwpid_t *new_lwp)
{
size_t i = 0;
while ((i < MAX_THREAD_POOL) &&
(SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE))) {
i++;
}
if (MAX_THREAD_POOL == i) {
errno = EAGAIN;
return -1;
}
/* ADD CHECKS ON UCONTEXT */
memset(&lwp_threads[i], 0, sizeof(lwp_threads[i]));
lwp_threads[i].flags = flags | SLOT_IN_USE;
lwp_threads[i].context = *context;
*new_lwp = i;
return 0;
}
int
_lwp_suspend(lwpid_t lwp)
{
int nb_lwp = 0;
for (size_t i = 0; i < MAX_THREAD_POOL; i++) {
if (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE)) {
nb_lwp++;
}
}
if (1 < nb_lwp) {
return EDEADLK;
}
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
lwp_threads[lwp].flags |= LW_WSUSPEND;
return 0;
}
int
_lwp_continue(lwpid_t lwp)
{
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
lwp_threads[lwp].flags &= ~LW_WSUSPEND;
return 0;
}
int
_lwp_wakeup(lwpid_t lwp)
{
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
if (LSSLEEP != (lwp_threads[lwp].flags & LSSLEEP)) {
return ENODEV;
}
lwp_threads[lwp].flags &= ~LSSLEEP;
__minix_schedule(SIGVTALRM);
return 0;
}
int
_lwp_detach(lwpid_t lwp)
{
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
errno = ESRCH;
return -1;
}
if (LWP_DETACHED == (lwp_threads[lwp].flags & LWP_DETACHED)) {
errno = EINVAL;
return -1;
}
lwp_threads[lwp].flags |= LWP_DETACHED;
return 0;
}
int
_lwp_setname(lwpid_t target, const char * name)
{
/* Name is already a copy for our use. */
lwp_threads[target].name = name;
return 0;
}
int
_lwp_getname(lwpid_t target, char * name, size_t len)
{
return strlcpy(name, lwp_threads[target].name, len) < len ? 0 : -1;
}
void *
_lwp_getprivate(void)
{
return &lwp_threads[current_thread].tls;
}
void
_lwp_setprivate(void *cookie)
{
/* Not supported */
}
int
_lwp_unpark(lwpid_t lwp, const void * hint)
{
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
errno = ESRCH;
return -1;
}
lwp_threads[lwp].flags |= LW_UNPARKED;
return 0;
}
ssize_t
_lwp_unpark_all(const lwpid_t * targets, size_t ntargets, const void * hint)
{
if (NULL == targets) {
return MAX_THREAD_POOL;
}
if (MAX_THREAD_POOL <= ntargets) {
errno = EINVAL;
return -1;
}
for (size_t i = 0; i < ntargets; i++) {
lwp_threads[targets[i]].flags |= LW_UNPARKED;
}
return 0;
}
int
_lwp_park(clockid_t clock_id, int i, const struct timespec * ts, lwpid_t thread,
const void *cookie, const void *cookie2)
{
// FIXME
return -1;
}
int
_lwp_wait(lwpid_t wlwp, lwpid_t *rlwp)
{
// FIXME
return -1;
}
int
_lwp_kill(lwpid_t thread, int signal)
{
// FIXME
errno = ESRCH;
return -1;
}
int
_lwp_exit(void)
{
lwp_threads[current_thread].flags &= ~SLOT_IN_USE;
__minix_schedule(SIGVTALRM);
/* We reach this only if there is nothing left to schedule. */
exit(0);
}
int
_lwp_ctl(int features, struct lwpctl **address)
{
/* LSC Add stuff to actually do something with this. */
*address = malloc(sizeof(struct lwpctl));
if (NULL == *address) {
return -1;
}
memset(*address, 0, sizeof(struct lwpctl));
// FIXME
return 0;
}
int
_sys_sched_yield(void)
{
@@ -360,30 +83,35 @@ sched_yield(void)
int
_sched_setaffinity(pid_t a, lwpid_t b, size_t c, const cpuset_t *d)
{
print("_sched_setaffinity\n");
return -1;
}
int
_sched_getaffinity(pid_t a, lwpid_t b, size_t c, cpuset_t *d)
{
print("_sched_getaffinity\n");
return -1;
}
int
_sched_setparam(pid_t a, lwpid_t b, int c, const struct sched_param *d)
{
print("_sched_setparam\n");
return -1;
}
int
_sched_getparam(pid_t a, lwpid_t b, int *c, struct sched_param *d)
{
print("_sched_getparam\n");
return -1;
}
int
rasctl(void *addr, size_t len, int op)
{
print("rasctl\n");
errno = EOPNOTSUPP;
return -1;
}

View File

@@ -179,7 +179,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj)
rdbg(("COPY (avoid in main)"));
break;
#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
case R_TYPE(TLS_DTPOFF32):
def = _rtld_find_symdef(symnum, obj, &defobj, false);
if (def == NULL)
@@ -232,7 +232,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj)
obj->strtab + obj->symtab[symnum].st_name,
obj->path, (void *)tmp));
break;
#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */
#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */
default:
rdbg(("sym = %lu, type = %lu, offset = %p, "

View File

@@ -120,7 +120,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj)
rdbg(("COPY (avoid in main)"));
break;
#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
case R_TYPE(TLS_TPOFF):
def = _rtld_find_symdef(symnum, obj, &defobj, false);
if (def == NULL)
@@ -176,7 +176,7 @@ _rtld_relocate_nonplt_objects(Obj_Entry *obj)
obj->path, (void *)*where));
break;
#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */
#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */
default:
rdbg(("sym = %lu, type = %lu, offset = %p, "
@@ -283,7 +283,7 @@ _rtld_relocate_plt_objects(const Obj_Entry *obj)
return err;
}
#if defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
/*
* i386 specific GNU variant of __tls_get_addr using register based
* argument passing.
@@ -305,4 +305,4 @@ ___tls_get_addr(void *arg_)
return _rtld_tls_get_addr(tcb, idx, offset);
}
#endif /* defined(__minix) && defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) */
#endif /* defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */

View File

@@ -95,10 +95,10 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
#endif
Elf_Addr phdr_vaddr;
size_t phdr_memsz;
#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
#if !defined(__minix)
caddr_t gap_addr;
size_t gap_size;
#endif /* defined(__minix) */
#endif /* !defined(__minix) */
int i;
#ifdef RTLD_LOADER
Elf_Addr clear_vaddr;

View File

@@ -111,9 +111,9 @@ static void *auxinfo;
char *__progname;
char **environ;
#if !defined(__minix)
#if defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
static volatile bool _rtld_mutex_may_recurse;
#endif /* !defined(__minix) */
#endif /* !defined(__minix) && (defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */
#if defined(RTLD_DEBUG)
#ifndef __sh__
@@ -1524,7 +1524,7 @@ _rtld_objlist_remove(Objlist *list, Obj_Entry *obj)
}
}
#if defined(__minix)
#if defined(__minix) && !(defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II))
void _rtld_shared_enter(void) {}
void _rtld_shared_exit(void) {}
void _rtld_exclusive_enter(sigset_t *mask) {}
@@ -1655,4 +1655,4 @@ _rtld_exclusive_exit(sigset_t *mask)
sigprocmask(SIG_SETMASK, mask, NULL);
}
#endif /* !defined(__minix) */
#endif /* defined(__minix) && !(defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)) */

View File

@@ -23,7 +23,7 @@ SRCS+= accept.c access.c adjtime.c bind.c brk.c sbrk.c m_closefrom.c getsid.c \
wait4.c write.c \
utimensat.c utimes.c futimes.c lutimes.c futimens.c \
_exit.c _ucontext.c environ.c __getcwd.c vfork.c sizeup.c init.c \
getrusage.c setrlimit.c setpgid.c __sysctl.c
getrusage.c setrlimit.c setpgid.c __sysctl.c _minix_lwp.c _lwp.c
# Minix specific syscalls / utils.
SRCS+= kernel_utils.c sprofile.c stack_utils.c _mcontext.c

View File

@@ -0,0 +1,335 @@
#include <sys/cdefs.h>
#include <sys/lwp.h>
#include <sys/lwpctl.h>
#include <sys/param.h>
#include <lwp.h>
#include <signal.h>
#include <stdint.h>
#include <stdlib.h>
#include <ucontext.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#define MAX_THREAD_POOL 1024
#define SLOT_IN_USE 0x0001
struct lwp {
int return_value;
int flags;
const char * name;
ucontext_t context;
struct tls_tcb *tls;
};
static struct lwp lwp_threads[MAX_THREAD_POOL];
static volatile lwpid_t current_thread = 0;
void
__minix_schedule(int signal __unused);
#if 0
#define print(msg) \
{ \
const char m[] = msg; \
write(2, m, sizeof(m)); \
}
#else
#define print(m) /**/
#endif
static int
__minix_runnable(struct lwp* thread)
{
uint32_t mask = (LWP_SUSPENDED | LSSLEEP | LW_UNPARKED | SLOT_IN_USE);
uint32_t runnable = (LW_UNPARKED | SLOT_IN_USE);
if ((thread->flags & mask) == runnable) {
return 1; /* Runnable */
}
return 0; /* Not runnable */
}
void
__minix_schedule(int signal __unused)
{
static int last_pos = 0;
struct lwp* old = &lwp_threads[current_thread];
int pos;
/* Select Next thread to run.
* Simply scan the array looking for a schedulable thread, and
* loopback to the start if we reach the end. */
for(pos = last_pos; pos != last_pos;
pos = ((pos + 1) % MAX_THREAD_POOL)) {
if (__minix_runnable(&lwp_threads[pos])) {
break;
}
}
if (pos == last_pos) {
/* No other thread found to run, is the current one
* still runnable? */
if (!__minix_runnable(&lwp_threads[pos])) {
print("__minix_schedule no switch\n");
return; /* "No runnable threads to schedule. */
}
}
print("__minix_schedule switch\n");
/* Point the current thread to the thread picked. */
current_thread = pos;
/* Restore the next context of the thread picked. */
last_pos = pos;
(void)swapcontext(&(old->context), &(lwp_threads[pos].context));
}
lwpid_t
_lwp_self(void)
{
return current_thread;
}
#if 0
void
_lwp_makecontext(ucontext_t *context, void (*start_routine)(void *),
void *arg, void *private, caddr_t stack_base, size_t stack_size)
{
/* Already done in pthread_makelwp, here for reference */
memset(context, 0, sizeof(*context));
context->uc_flags = _UC_CPU | _UC_STACK;;
context->uc_stack.ss_sp = stack_base;
context->uc_stack.ss_size = stack_size;
context->uc_stack.ss_flags = 0;
context->uc_link = NULL;
makecontext(context, start_routine, 1, arg);
}
#endif
int
_lwp_create(const ucontext_t *context, unsigned long flags, lwpid_t *new_lwp)
{
size_t i = 0;
while ((i < MAX_THREAD_POOL) &&
(SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE))) {
i++;
}
if (MAX_THREAD_POOL == i) {
errno = EAGAIN;
return -1;
}
/* ADD CHECKS ON UCONTEXT */
memset(&lwp_threads[i], 0, sizeof(lwp_threads[i]));
lwp_threads[i].flags = flags | SLOT_IN_USE;
lwp_threads[i].context = *context;
*new_lwp = i;
return 0;
}
int
_lwp_suspend(lwpid_t lwp)
{
int nb_lwp = 0;
print("_lwp_suspend\n");
for (size_t i = 0; i < MAX_THREAD_POOL; i++) {
if (SLOT_IN_USE == (lwp_threads[i].flags & SLOT_IN_USE)) {
nb_lwp++;
}
}
if (1 < nb_lwp) {
return EDEADLK;
}
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
lwp_threads[lwp].flags |= LW_WSUSPEND;
return 0;
}
int
_lwp_continue(lwpid_t lwp)
{
print("_lwp_continue\n");
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
lwp_threads[lwp].flags &= ~LW_WSUSPEND;
return 0;
}
int
_lwp_wakeup(lwpid_t lwp)
{
print("_lwp_wakeup\n");
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
return ESRCH;
}
print("_lwp_wakeup 2\n");
if (LSSLEEP != (lwp_threads[lwp].flags & LSSLEEP)) {
return ENODEV;
}
lwp_threads[lwp].flags &= ~LSSLEEP;
print("_lwp_wakeup 3\n");
__minix_schedule(SIGVTALRM);
print("_lwp_wakeup 4\n");
return 0;
}
int
_lwp_detach(lwpid_t lwp)
{
print("_lwp_detach\n");
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
errno = ESRCH;
return -1;
}
if (LWP_DETACHED == (lwp_threads[lwp].flags & LWP_DETACHED)) {
errno = EINVAL;
return -1;
}
lwp_threads[lwp].flags |= LWP_DETACHED;
return 0;
}
int
_lwp_setname(lwpid_t target, const char * name)
{
print("_lwp_setname\n");
/* Name is already a copy for our use. */
lwp_threads[target].name = name;
return 0;
}
int
_lwp_getname(lwpid_t target, char * name, size_t len)
{
print("_lwp_getname\n");
return strlcpy(name, lwp_threads[target].name, len) < len ? 0 : -1;
}
void *
_lwp_getprivate(void)
{
// print("_lwp_getprivate\n");
return lwp_threads[current_thread].tls;
}
void
_lwp_setprivate(void *cookie)
{
print("_lwp_setprivate\n");
lwp_threads[current_thread].tls = cookie;
}
int
_lwp_unpark(lwpid_t lwp, const void * hint)
{
print("_lwp_unpark\n");
if ((MAX_THREAD_POOL <= lwp) || (lwp < 0)) {
errno = ESRCH;
return -1;
}
lwp_threads[lwp].flags |= LW_UNPARKED;
return 0;
}
ssize_t
_lwp_unpark_all(const lwpid_t * targets, size_t ntargets, const void * hint)
{
print("_lwp_unpark_all\n");
if (NULL == targets) {
return MAX_THREAD_POOL;
}
if (MAX_THREAD_POOL <= ntargets) {
errno = EINVAL;
return -1;
}
for (size_t i = 0; i < ntargets; i++) {
lwp_threads[targets[i]].flags |= LW_UNPARKED;
}
return 0;
}
int
_lwp_park(clockid_t clock_id, int i, const struct timespec * ts, lwpid_t thread,
const void *cookie, const void *cookie2)
{
print("_lwp_park\n");
// FIXME
return -1;
}
int
_lwp_wait(lwpid_t wlwp, lwpid_t *rlwp)
{
print("_lwp_wait\n");
// FIXME
return -1;
}
int
_lwp_kill(lwpid_t thread, int signal)
{
print("_lwp_kill\n");
// FIXME
errno = ESRCH;
return -1;
}
int
_lwp_exit(void)
{
print("_lwp_exit\n");
lwp_threads[current_thread].flags &= ~SLOT_IN_USE;
__minix_schedule(SIGVTALRM);
/* We reach this only if there is nothing left to schedule. */
exit(0);
}
int
_lwp_ctl(int features, struct lwpctl **address)
{
print("_lwp_ctl\n");
/* LSC Add stuff to actually do something with this. */
*address = malloc(sizeof(struct lwpctl));
if (NULL == *address) {
return -1;
}
memset(*address, 0, sizeof(struct lwpctl));
// FIXME
return 0;
}

View File

@@ -156,6 +156,10 @@ __BEGIN_DECLS
static __inline void *
__lwp_getprivate_fast(void)
{
#if defined(__minix)
extern void *_lwp_getprivate(void);
return _lwp_getprivate();
#else
#if !defined(__thumb__) || defined(_ARM_ARCH_T2)
extern void *_lwp_getprivate(void);
void *rv;
@@ -173,6 +177,7 @@ __lwp_getprivate_fast(void)
extern void *__aeabi_read_tp(void);
return __aeabi_read_tp();
#endif /* !__thumb__ || _ARM_ARCH_T2 */
#endif /* defined(__minix) */
}
#if defined(_KERNEL)

View File

@@ -148,11 +148,16 @@ int getmcontext(mcontext_t *mcp);
static __inline void *
__lwp_getprivate_fast(void)
{
#if defined(__minix)
extern void *_lwp_getprivate(void);
return _lwp_getprivate();
#else
void *__tmp;
__asm volatile("movl %%gs:0, %0" : "=r" (__tmp));
return __tmp;
#endif /* defined(__minix) */
}
#endif /* !_I386_MCONTEXT_H_ */

View File

@@ -124,9 +124,7 @@ typedef unsigned char __cpu_simple_lock_nv_t;
#define __HAVE_INTR_CONTROL
#define __HAVE_MM_MD_OPEN
#define __HAVE___LWP_GETPRIVATE_FAST
#if !defined(__minix)
#define __HAVE_TLS_VARIANT_II
#endif /* !defined(__minix) */
#define __HAVE_COMMON___TLS_GET_ADDR
#if defined(_KERNEL)