Overhaul glibc locking (XCC)
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 26 Apr 2017 19:18:49 +0000 (15:18 -0400)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 3 May 2017 16:13:02 +0000 (12:13 -0400)
Previously, we were using the low-level locks (spin_pdr_locks) for almost
everything in glibc.  The libc_lock was a spinlock, etc.  It turns out, the
libc_lock is supposed to be a mutex.

I had a couple cases where the app would yield while holding a libc_lock,
which you cannot do if it is a spinlock.  Specifically, this happened when
calling fclose() on a FILE (holding the old libc_lock), which closed the
underlying FD, when we were using epoll.  The epoll close callback grabs a
uthread mutex.  The fix is to make the libc_lock a uthread mutex too.  No
problem, right?

The problem was malloc was also using those locks, and vcore context calls
malloc and free.  Vcore context code cannot use mutexes.  So our options
were either never to call malloc or free from vcore context, or to use a
different type of lock.  So far, we can get by with malloc using
spin_pdr_locks (the low-level lock (LLL)).

This all does mean that vcore context code cannot call some things from
glibc, such as close(), printf(), or anything using the libc_lock (a
mutex), including the _IO_lock (which is now explicitly a libc_lock (mutex)
and not a spinlock).  Currently, that includes interprocess signal
handlers, which are run from vcore context.  fprintf() to stderr and stdout
will work, but some other signal-safe functions might not.  close() doesn't
work, not because of the libc_lock (fclose() grabs that, not close()), but
because of the close callbacks, which *do* grab mutexes.  And you can't
grab a mutex from vcore context.

There are a few other implications.  malloc() (using a spinlock) relies on
sbrk(), which previously was a libc_lock.  That now needs to be a spinlock.
It is possible for the kernel to block the mmap() call.  If that happens,
userspace will spin until the call completes.  Oh well.

I also sorted out the rtld locks.  Previously, we were doing nothing for
the recursive locks.  If we had threads racing in dlopen(), then we could
corrupt the various data structures in ld.  Yikes!  There might be issues
with those locks still.  I made them spinlocks, since we can't access
parlib objects at all in that code.  (We can access headers).

Now that rtld isn't using the libc_locks, we no longer have anyone calling
into parlib-compat.c's spinlocks, and we can replace that thread-unsafe
code with asserts.

There's probably a few problems with all of this still.  For instance,
maybe we an do something different for ld.so compared to libdl.so.  Who
knows.  At least it's a little better than before.

Rebuild glibc.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/Versions
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/bits/libc-lock.h
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/bits/stdio-lock.h
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/malloc-machine.h
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/parlib-compat.c
tools/compilers/gcc-glibc/glibc-2.19-akaros/sysdeps/akaros/sbrk.c

index ecb071f..ea9cddc 100644 (file)
@@ -87,10 +87,31 @@ libc {
     _assert_failed;
     nsec2tsc;
     tsc2nsec;
+    spin_pdr_init;
     spin_pdr_trylock;
     spin_pdr_lock;
     spin_pdr_unlock;
     cpu_relax_vc;
     uthread_sched_yield;
+    uth_mutex_init;
+    uth_mutex_destroy;
+    uth_mutex_lock;
+    uth_mutex_trylock;
+    uth_mutex_unlock;
+    uth_recurse_mutex_init;
+    uth_recurse_mutex_destroy;
+    uth_recurse_mutex_lock;
+    uth_recurse_mutex_trylock;
+    uth_recurse_mutex_unlock;
+    uth_rwlock_init;
+    uth_rwlock_destroy;
+    uth_rwlock_rdlock;
+    uth_rwlock_try_rdlock;
+    uth_rwlock_wrlock;
+    uth_rwlock_try_wrlock;
+    uth_rwlock_unlock;
+    dtls_key_create;
+    set_dtls;
+    get_dtls;
   }
 }
index 39943b9..adcb6fb 100644 (file)
@@ -1,11 +1,11 @@
-/* libc-internal interface for mutex locks.  NPTL version.
-   Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
+/* libc-internal interface for mutex locks.  Stub version.
+   Copyright (C) 1996-2014 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public License as
-   published by the Free Software Foundation; either version 2.1 of the
-   License, or (at your option) any later version.
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
 
    The GNU C Library is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -13,9 +13,8 @@
    Lesser General Public License for more details.
 
    You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; see the file COPYING.LIB.  If not,
-   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-   Boston, MA 02111-1307, USA.  */
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
 
 #ifndef _BITS_LIBC_LOCK_H
 #define _BITS_LIBC_LOCK_H 1
 # include <tls.h>
 #endif
 
-/* Lock types.  */
-typedef int __libc_lock_t; 
-#define _LIBC_LOCK_INITIALIZER LLL_LOCK_INITIALIZER
-
-typedef struct __libc_lock_recursive { 
-       __libc_lock_t lock; 
-       int count; 
-       void* owner; 
-} __libc_lock_recursive_t;
-#define _LIBC_LOCK_RECURSIVE_INITIALIZER {_LIBC_LOCK_INITIALIZER,0,0}
+#include <parlib/uthread.h>
+#include <parlib/dtls.h>
 
 /* Define a lock variable NAME with storage class CLASS.  The lock must be
    initialized with __libc_lock_init before it can be used (or define it
@@ -46,301 +37,194 @@ typedef struct __libc_lock_recursive {
    definitions you must use a pointer to the lock structure (i.e., NAME
    begins with a `*'), because its storage size will not be known outside
    of libc.  */
-#define __libc_lock_define(CLASS,NAME)\
-       CLASS __libc_lock_t NAME;
-#define __libc_lock_define_recursive(CLASS,NAME)\
-       CLASS __libc_lock_recursive_t NAME;
-
-/* Define an initialized lock variable NAME with storage class CLASS.
-   For the C library we take a deeper look at the initializer.  For
-   this implementation all fields are initialized to zero.  Therefore
-   we don't initialize the variable which allows putting it into the
-   BSS section.  (Except on PA-RISC and other odd architectures, where
-   initialized locks must be set to one due to the lack of normal
-   atomic operations.) */
-
-#if LLL_LOCK_INITIALIZER == 0
-#define __libc_lock_define_initialized(CLASS,NAME)\
-       CLASS __libc_lock_t NAME;
-#else
-#define __libc_lock_define_initialized(CLASS,NAME)\
-       CLASS __libc_lock_t NAME = _LIBC_LOCK_INITIALIZER;
-#endif
+#define __libc_lock_define(CLASS, NAME) \
+       CLASS uth_mutex_t NAME;
+#define __libc_lock_define_recursive(CLASS, NAME) \
+       CLASS uth_recurse_mutex_t NAME;
+#define __libc_rwlock_define(CLASS, NAME) \
+       CLASS uth_rwlock_t NAME;
+
+/* These don't seem to be used much outside our sysdep (malloc-machine.h), but
+ * the RTLD one later is used. */
+#define _LIBC_LOCK_INITIALIZER UTH_MUTEX_INIT
+#define _LIBC_LOCK_RECURSIVE_INITIALIZER UTH_RECURSE_MUTEX_INIT
+#define _LIBC_RWLOCK_INITIALIZER UTH_RWLOCK_INIT
+
+/* Define an initialized lock variable NAME with storage class CLASS.  */
+#define __libc_lock_define_initialized(CLASS, NAME) \
+       CLASS uth_mutex_t NAME = UTH_MUTEX_INIT;
+#define __libc_rwlock_define_initialized(CLASS, NAME) \
+       CLASS uth_rwlock_t NAME = UTH_RWLOCK_INIT;
 
 /* Define an initialized recursive lock variable NAME with storage
    class CLASS.  */
-#if LLL_LOCK_INITIALIZER == 0
-#define __libc_lock_define_initialized_recursive(CLASS,NAME)\
-       CLASS __libc_lock_recursive_t NAME;
-#else
-#define __libc_lock_define_initialized_recursive(CLASS,NAME)\
-       CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
-#endif
+#define __libc_lock_define_initialized_recursive(CLASS, NAME) \
+       CLASS uth_recurse_mutex_t NAME = UTH_RECURSE_MUTEX_INIT;
 
 /* Initialize the named lock variable, leaving it in a consistent, unlocked
    state.  */
-#define __libc_lock_init(NAME) ((NAME) = _LIBC_LOCK_INITIALIZER, 0)
+#define __libc_lock_init(NAME) uth_mutex_init(&(NAME))
+#define __libc_rwlock_init(NAME) uth_rwlock_init(&(NAME))
 
 /* Same as last but this time we initialize a recursive mutex.  */
-#if defined _LIBC && (!defined NOT_IN_libc)
-#define __libc_lock_init_recursive(NAME)\
-       ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
-#else
-#define __libc_lock_init_recursive(NAME)\
-do {\
-       NAME.lock = 0;\
-       NAME.count = 0;\
-       NAME.owner = 0;\
-} while (0)
-#endif
+#define __libc_lock_init_recursive(NAME) uth_recurse_mutex_init(&(NAME))
 
 /* Finalize the named lock variable, which must be locked.  It cannot be
    used again until __libc_lock_init is called again on it.  This must be
    called on a lock variable before the containing storage is reused.  */
-#define __libc_lock_fini(NAME) ((void) 0)
+#define __libc_lock_fini(NAME) uth_mutex_destroy(&(NAME))
+#define __libc_rwlock_fini(NAME) uth_rwlock_destroy(&(NAME))
 
 /* Finalize recursive named lock.  */
-#define __libc_lock_fini_recursive(NAME) ((void) 0)
+#define __libc_lock_fini_recursive(NAME) uth_recurse_mutex_destroy(&(NAME))
 
 /* Lock the named lock variable.  */
-# define __libc_lock_lock(NAME)\
-       ({ lll_lock (NAME, LLL_PRIVATE); 0; })
+#define __libc_lock_lock(NAME) uth_mutex_lock(&(NAME))
+#define __libc_rwlock_rdlock(NAME) uth_rwlock_rdlock(&(NAME))
+#define __libc_rwlock_wrlock(NAME) uth_rwlock_wrlock(&(NAME))
 
 /* Lock the recursive named lock variable.  */
-#ifndef IS_IN_rtld
-
-# define __libc_lock_lock_recursive(NAME)\
-do {\
-       void *self = THREAD_SELF;\
-       if((NAME).owner != self) {\
-               lll_lock ((NAME).lock, LLL_PRIVATE);\
-               (NAME).owner = self;\
-       }\
-       ++(NAME).count;\
-} while (0)
+#define __libc_lock_lock_recursive(NAME) uth_recurse_mutex_lock(&(NAME))
+
+/* Try to lock the named lock variable.  */
+#define __libc_lock_trylock(NAME) \
+       ({ uth_mutex_trylock(&(NAME)) ? 0 : EBUSY; })
+#define __libc_rwlock_tryrdlock(NAME) \
+       ({ uth_rwlock_try_rdlock(&(NAME)) ? 0 : EBUSY; })
+#define __libc_rwlock_trywrlock(NAME) \
+       ({ uth_rwlock_try_wrlock(&(NAME)) ? 0 : EBUSY; })
+
+/* Try to lock the recursive named lock variable.  */
+#define __libc_lock_trylock_recursive(NAME) \
+       ({ uth_recurse_mutex_trylock(&(NAME)) ? 0 : EBUSY; })
+
+/* Unlock the named lock variable.  */
+#define __libc_lock_unlock(NAME) uth_mutex_unlock(&(NAME))
+#define __libc_rwlock_unlock(NAME) uth_rwlock_unlock(&(NAME))
 
 /* Unlock the recursive named lock variable.  */
-/* We do no error checking here.  */
-# define __libc_lock_unlock_recursive(NAME)\
-       do {\
-               if(--(NAME).count == 0) {\
-                       (NAME).owner = NULL;\
-                       lll_unlock((NAME).lock, LLL_PRIVATE);\
-               }\
-       } while (0)
+#define __libc_lock_unlock_recursive(NAME) uth_recurse_mutex_unlock(&(NAME))
+
+/* RTLD locks */
+/* Ideally, we'd use uthread mutexes.  That's what pthreads does.  However, this
+ * code will be in ld.so, and will never actually link against parlib.  We might
+ * be able to do some function pointer magic, but for the most part, we'll
+ * probably need kernel support (#futex or something).  Instead of that, we can
+ * build recursive locks on top of spinlocks, and deal with any problems as they
+ * arise.  By not using PDR, we run the risk of deadlocking, but I can live with
+ * that for now (you'd need to dlopen() from vcore context, which would probably
+ * panic for some other reason). */
+
+typedef struct {
+       unsigned int lock;
+       unsigned int count;
+       void *owner;
+} __rtld_lock_recursive_t;
+
+#define _RTLD_LOCK_RECURSIVE_INITIALIZER { 0, 0, (void*)-1 }
+
+static inline void *__rtld_lock_who_am_i(void)
+{
+       if (atomic_read(&vcpd_of(0)->flags) & VC_SCP_NOVCCTX)
+               return (void*)0xf00baa;
+       /* We can't use TLS related to parlib (in_vcore_context() / vcore_id() will
+        * crash.  current_uthread won't link.).  We *can* find our thread
+        * descriptor, which disambiguates any callers (including between vcore
+        * context (which probably shouldn't be in here) and uthreads, so long as
+        * uthreads have TLS - which they must if they are making glibc calls. */
+       return THREAD_SELF;
+}
 
-#else /* Ignore recursive locks within rtld */
+static inline void rtld_lock_lock_initialize(__rtld_lock_recursive_t *lock)
+{
+       lock->lock = 0;
+       lock->count = 0;
+       lock->owner = (void*)-1;
+}
 
-# define __libc_lock_lock_recursive(NAME) do { } while(0)
-# define __libc_lock_unlock_recursive(NAME) do { } while(0)
+static inline void rtld_lock_lock_recursive(__rtld_lock_recursive_t *lock)
+{
+       void *me = __rtld_lock_who_am_i();
+
+       if (lock->owner == me) {
+               lock->count++;
+               return;
+       }
+       while (__sync_lock_test_and_set(&lock->lock, 1))
+               cpu_relax();
+       lock->count++;
+       lock->owner = me;
+}
 
-#endif
+static inline void rtld_lock_unlock_recursive(__rtld_lock_recursive_t *lock)
+{
+       lock->count--;
+       if (lock->count)
+               return;
+       lock->owner = (void*)-1;
+       wmb();
+       lock->lock = 0;
+}
 
-/* Try to lock the named lock variable.  */
-#define __libc_lock_trylock(NAME)\
-       lll_trylock(NAME)
+#define __rtld_lock_define_recursive(CLASS, NAME) \
+       CLASS __rtld_lock_recursive_t NAME;
+#define __rtld_lock_define_initialized_recursive(CLASS, NAME) \
+       CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
 
-/* Try to lock the recursive named lock variable.  */
-#define __libc_lock_trylock_recursive(NAME)\
-({\
-       int result = 0;\
-       void *self = THREAD_SELF;\
-       if((NAME).owner != self) {\
-               if(lll_trylock((NAME).lock) == 0) {\
-                       (NAME).owner = self;\
-                       (NAME).count = 1;\
-               }\
-               else\
-                       result = EBUSY;\
-    }\
-       else\
-               ++(NAME).count;\
-       result;\
-})
+/* __rtld_lock_initialize probably should be __rtld_lock_initialize_recursive.
+ * Might be a glibc bug.  These also want &(NAME), and not NAME, hence the
+ * macros. */
+#define __rtld_lock_initialize(NAME) rtld_lock_lock_initialize(&(NAME))
+#define __rtld_lock_lock_recursive(NAME) rtld_lock_lock_recursive(&(NAME))
+#define __rtld_lock_unlock_recursive(NAME) rtld_lock_unlock_recursive(&(NAME))
 
-/* Unlock the named lock variable.  */
-#define __libc_lock_unlock(NAME)\
-       lll_unlock (NAME, LLL_PRIVATE)
-
-#define __libc_lock_default_lock_recursive(lock)\
-       ++((__libc_lock_recursive_t *)(lock))->count;
-#define __libc_lock_default_unlock_recursive(lock)\
-       --((__libc_lock_recursive_t *)(lock))->count;
-
-/* libc's rwlocks are the same as regular locks for now... */
-typedef __libc_lock_t __libc_rwlock_t; 
-#define _LIBC_RWLOCK_INITIALIZER _LIBC_LOCK_INITIALIZER
-typedef __libc_lock_recursive_t __libc_rwlock_recursive_t; 
-#define _LIBC_RWLOCK_RECURSIVE_INITIALIZER _LIBC_LOCK_RECURSIVE_INITIALIZER
-#define __libc_rwlock_define(CLASS,NAME)\
-       __libc_lock_define(CLASS,NAME)
-#define __libc_rwlock_define_recursive(CLASS,NAME)\
-       __libc_lock_define_recursive(CLASS,NAME)
-#define __libc_rwlock_define_initialized(CLASS,NAME)\
-       __libc_lock_define_initialized(CLASS,NAME)
-#define __libc_rwlock_define_initialized_recursive(CLASS,NAME)\
-       __libc_lock_define_initialized_recursive(CLASS,NAME)
-#define __libc_rwlock_init(NAME)\
-       __libc_lock_init(NAME)
-#define __libc_rwlock_init_recursive(NAME)\
-       __libc_lock_init_recursive(NAME)
-#define __libc_rwlock_fini(NAME)\
-       __libc_lock_fini(NAME)
-#define __libc_rwlock_fini_recursive(NAME)\
-       __libc_lock_fini_recursive(NAME)
-#define __libc_rwlock_rdlock(NAME)\
-       __libc_lock_lock(NAME)
-#define __libc_rwlock_wrlock(NAME)\
-       __libc_lock_lock(NAME)
-#define __libc_rwlock_rdlock_recursive(NAME)\
-       __libc_lock_lock_recursive(NAME)
-#define __libc_rwlock_wrlock_recursive(NAME)\
-       __libc_lock_lock_recursive(NAME)
-#define __libc_rwlock_tryrlock(NAME)\
-       __libc_lock_trylock(NAME)
-#define __libc_rwlock_trywlock(NAME)\
-       __libc_lock_trylock(NAME)
-#define __libc_rwlock_tryrlock_recursive(NAME)\
-       __libc_lock_trylock_recursive(NAME)
-#define __libc_rwlock_trywlock_recursive(NAME)\
-       __libc_lock_trylock_recursive(NAME)
-#define __libc_rwlock_unlock(NAME)\
-       __libc_lock_unlock(NAME) 
-#define __libc_rwlock_unlock_recursive(NAME)\
-       __libc_lock_unlock_recursive(NAME) 
-#define __libc_rwlock_default_rdlock_recursive(lock)\
-       __libc_lock_default_lock_recursive(lock)
-#define __libc_rwlock_default_wrlock_recursive(lock)\
-       __libc_lock_default_lock_recursive(lock)
-#define __libc_rwlock_default_unlock_recursive(lock)\
-       __libc_lock_default_unlock_recursive(lock)
-
-/* rtld locks are the same as libc locks */
-typedef __libc_lock_t __rtld_lock_t;
-#define _RTLD_LOCK_INITIALIZER _LIBC_LOCK_INITIALIZER
-typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
-#define _RTLD_LOCK_RECURSIVE_INITIALIZER _LIBC_LOCK_RECURSIVE_INITIALIZER
-#define __rtld_lock_define(CLASS,NAME)\
-       __libc_lock_define_recursive(CLASS,NAME)
-#define __rtld_lock_define_recursive(CLASS,NAME)\
-       __libc_lock_define_recursive(CLASS,NAME)
-#define __rtld_lock_define_initialized(CLASS,NAME)\
-       __libc_lock_define_initialized_recursive(CLASS,NAME)
-#define __rtld_lock_define_initialized_recursive(CLASS,NAME)\
-       __libc_lock_define_initialized_recursive(CLASS,NAME)
-#define __rtld_lock_initialize(NAME)\
-       __libc_lock_init_recursive(NAME)
-#define __rtld_lock_init_recursive(NAME)\
-       __libc_lock_init_recursive(NAME)
-# define __rtld_lock_fini(NAME)\
-       __libc_lock_fini_recursive(NAME)
-# define __rtld_lock_fini_recursive(NAME)\
-       __libc_lock_fini_recursive(NAME)
-#define __rtld_lock_lock(NAME)\
-       __libc_lock_lock_recursive(NAME)
-#define __rtld_lock_lock_recursive(NAME)\
-       __libc_lock_lock_recursive(NAME)
-#define __rtld_lock_trylock(NAME)\
-       __libc_lock_trylock_recursive(NAME)
-#define __rtld_lock_trylock_recursive(NAME)\
-       __libc_lock_trylock_recursive(NAME)
-#define __rtld_lock_unlock(NAME)\
-       __libc_lock_unlock_recursive(NAME) 
-#define __rtld_lock_unlock_recursive(NAME)\
-       __libc_lock_unlock_recursive(NAME) 
-#define __rtld_lock_default_lock_recursive(lock)\
-       __libc_lock_default_lock_recursive(lock)
-#define __rtld_lock_default_unlock_recursive(lock)\
-       __libc_lock_default_unlock_recursive(lock)
-#define __rtld_rwlock_define(CLASS,NAME)\
-       __libc_rwlock_define_recursive(CLASS,NAME)
-#define __rtld_rwlock_define_recursive(CLASS,NAME)\
-       __libc_rwlock_define_recursive(CLASS,NAME)
-#define __rtld_rwlock_define_initialized(CLASS,NAME)\
-       __libc_rwlock_define_initialized_recursive(CLASS,NAME)
-#define __rtld_rwlock_define_initialized_recursive(CLASS,NAME)\
-       __libc_rwlock_define_initialized_recursive(CLASS,NAME)
-#define __rtld_rwlock_init(NAME)\
-       __libc_rwlock_init_recursive(NAME)
-#define __rtld_rwlock_init_recursive(NAME)\
-       __libc_rwlock_init_recursive(NAME)
-#define __rtld_rwlock_fini(NAME)\
-       __libc_rwlock_fini_recursive(NAME)
-#define __rtld_rwlock_fini_recursive(NAME)\
-       __libc_rwlock_fini_recursive(NAME)
-#define __rtld_rwlock_rdlock(NAME)\
-       __libc_rwlock_lock_recursive(NAME)
-#define __rtld_rwlock_wrlock(NAME)\
-       __libc_rwlock_lock_recursive(NAME)
-#define __rtld_rwlock_rdlock_recursive(NAME)\
-       __libc_rwlock_lock_recursive(NAME)
-#define __rtld_rwlock_wrlock_recursive(NAME)\
-       __libc_rwlock_lock_recursive(NAME)
-#define __rtld_rwlock_tryrlock(NAME)\
-       __libc_rwlock_trylock_recursive(NAME)
-#define __rtld_rwlock_trywlock(NAME)\
-       __libc_rwlock_trylock_recursive(NAME)
-#define __rtld_rwlock_tryrlock_recursive(NAME)\
-       __libc_rwlock_trylock_recursive(NAME)
-#define __rtld_rwlock_trywlock_recursive(NAME)\
-       __libc_rwlock_trylock_recursive(NAME)
-#define __rtld_rwlock_unlock(NAME)\
-       __libc_rwlock_unlock_recursive(NAME) 
-#define __rtld_rwlock_unlock_recursive(NAME)\
-       __libc_rwlock_unlock_recursive(NAME) 
-#define __rtld_rwlock_default_rdlock_recursive(lock)\
-       __libc_rwlock_default_lock_recursive(lock)
-#define __rtld_rwlock_default_wrlock_recursive(lock)\
-       __libc_rwlock_default_lock_recursive(lock)
-#define __rtld_rwlock_default_unlock_recursive(lock)\
-       __libc_rwlock_default_unlock_recursive(lock)
 
 /* Define once control variable.  */
-#define __libc_once_define(CLASS, NAME) CLASS int NAME = 0
+#define __libc_once_define(CLASS, NAME) \
+       CLASS parlib_once_t NAME
 
 /* Call handler iff the first call.  */
-#define __libc_once(ONCE_CONTROL, INIT_FUNCTION)\
-do {\
-       if((ONCE_CONTROL) == 0) {\
-               INIT_FUNCTION ();\
-               (ONCE_CONTROL) = 1;\
-       }\
-} while (0)
+#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
+       parlib_run_once(&(ONCE_CONTROL), (void (*)(void*))(INIT_FUNCTION), NULL)
+
+/* Get once control variable.  */
+#define __libc_once_get(ONCE_CONTROL) \
+  ((ONCE_CONTROL).ran_once == TRUE)
 
 /* Start a critical region with a cleanup function */
-#define __libc_cleanup_region_start(DOIT, FCT, ARG)\
-{\
-  typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0;\
-  typeof (ARG) __save_ARG = ARG;\
+#define __libc_cleanup_region_start(DOIT, FCT, ARG)                        \
+{                                                                          \
+  typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0;                      \
+  typeof (ARG) __save_ARG = ARG;                                           \
   /* close brace is in __libc_cleanup_region_end below. */
 
 /* End a critical region started with __libc_cleanup_region_start. */
-#define __libc_cleanup_region_end(DOIT)\
-if((DOIT) && __save_FCT != 0)\
-    (*__save_FCT)(__save_ARG);\
+#define __libc_cleanup_region_end(DOIT)                                            \
+  if ((DOIT) && __save_FCT != 0)                                           \
+    (*__save_FCT)(__save_ARG);                                             \
 }
 
 /* Sometimes we have to exit the block in the middle.  */
-#define __libc_cleanup_end(DOIT)\
-if ((DOIT) && __save_FCT != 0)\
-       (*__save_FCT)(__save_ARG);\
+#define __libc_cleanup_end(DOIT)                                           \
+  if ((DOIT) && __save_FCT != 0)                                           \
+    (*__save_FCT)(__save_ARG);                                             \
 
 #define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg)
 #define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute)
 
 /* We need portable names for some of the functions.  */
-#define __libc_mutex_unlock
+#define __libc_mutex_unlock uth_mutex_unlock
 
 /* Type for key of thread specific data.  */
-typedef int __libc_key_t;
+typedef dtls_key_t __libc_key_t;
 
 /* Create key for thread specific data.  */
-#define __libc_key_create(KEY,DEST) -1
+#define __libc_key_create(KEY,DEST)    (*KEY = dtls_key_create(DEST), 0)
 
 /* Set thread-specific data associated with KEY to VAL.  */
-#define __libc_setspecific(KEY,VAL) ((void)0)
+#define __libc_setspecific(KEY,VAL)    set_dtls(KEY, VAL)
 
 /* Get thread-specific data associated with KEY.  */
-#define __libc_getspecific(KEY) 0
+#define __libc_getspecific(KEY) get_dtls(KEY)
 
 #endif /* bits/libc-lock.h */
index e2678cc..f3a09ea 100644 (file)
 #ifndef _BITS_STDIO_LOCK_H
 #define _BITS_STDIO_LOCK_H 1
 
-#include <bits/libc-lock.h>
-#include <lowlevellock.h>
-
-
-/* The locking here is very inexpensive, even for inlining.  */
-#define _IO_lock_inexpensive   1
+/* AKAROS: upper half from bits/stdio-lock.h */
 
-typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
-
-#define _IO_lock_initializer { LLL_LOCK_INITIALIZER, 0, NULL }
-
-#define _IO_lock_init(_name) \
-  ((_name) = (_IO_lock_t) _IO_lock_initializer , 0)
+#include <bits/libc-lock.h>
 
-#define _IO_lock_fini(_name) \
-  ((void) 0)
+__libc_lock_define_recursive (typedef, _IO_lock_t)
 
-#define _IO_lock_lock(_name) \
-  do {                                                                       \
-    void *__self = THREAD_SELF;                                                      \
-    if ((_name).owner != __self)                                             \
-      {                                                                              \
-       lll_lock ((_name).lock, LLL_PRIVATE);                                 \
-        (_name).owner = __self;                                                      \
-      }                                                                              \
-    ++(_name).cnt;                                                           \
-  } while (0)
-
-#define _IO_lock_trylock(_name) \
-  ({                                                                         \
-    int __result = 0;                                                        \
-    void *__self = THREAD_SELF;                                                      \
-    if ((_name).owner != __self)                                             \
-      {                                                                              \
-        if (lll_trylock ((_name).lock) == 0)                                 \
-          {                                                                  \
-            (_name).owner = __self;                                          \
-            (_name).cnt = 1;                                                 \
-          }                                                                  \
-        else                                                                 \
-          __result = EBUSY;                                                  \
-      }                                                                              \
-    else                                                                     \
-      ++(_name).cnt;                                                         \
-    __result;                                                                \
-  })
-
-#define _IO_lock_unlock(_name) \
-  do {                                                                       \
-    if (--(_name).cnt == 0)                                                  \
-      {                                                                              \
-        (_name).owner = NULL;                                                \
-       lll_unlock ((_name).lock, LLL_PRIVATE);                               \
-      }                                                                              \
-  } while (0)
+/* We need recursive (counting) mutexes.  */
+#ifdef _LIBC_LOCK_RECURSIVE_INITIALIZER
+# define _IO_lock_initializer _LIBC_LOCK_RECURSIVE_INITIALIZER
+#elif _IO_MTSAFE_IO
+ #error libio needs recursive mutexes for _IO_MTSAFE_IO
+#endif
 
+#define _IO_lock_init(_name)   __libc_lock_init_recursive (_name)
+#define _IO_lock_fini(_name)   __libc_lock_fini_recursive (_name)
+#define _IO_lock_lock(_name)   __libc_lock_lock_recursive (_name)
+#define _IO_lock_trylock(_name)        __libc_lock_trylock_recursive (_name)
+#define _IO_lock_unlock(_name) __libc_lock_unlock_recursive (_name)
 
 
 #define _IO_cleanup_region_start(_fct, _fp) \
@@ -84,6 +46,11 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
 #define _IO_cleanup_region_end(_doit) \
   __libc_cleanup_region_end (_doit)
 
+
+/* AKAROS: lower half from nptl/sysdeps/pthreads/bits/stdio-lock.h.  Not sure if
+ * this is critical or not.  won't build with the standard bits/stdio-lock.h. */
+
+
 #if defined _LIBC && !defined NOT_IN_libc
 
 # ifdef __EXCEPTIONS
index 444029b..be8060e 100644 (file)
 #define _MALLOC_MACHINE_H
 
 #include <atomic.h>
-#include <bits/libc-lock.h>
+#include <lowlevellock.h>
 
-__libc_lock_define (typedef, mutex_t)
+typedef struct spin_pdr_lock mutex_t;
 
-#define mutex_init(m)          __libc_lock_init (*(m))
-#define mutex_lock(m)          __libc_lock_lock (*(m))
-#define mutex_trylock(m)       __libc_lock_trylock (*(m))
-#define mutex_unlock(m)                __libc_lock_unlock (*(m))
-#define MUTEX_INITIALIZER      LLL_LOCK_INITIALIZER
+/* These macros expect to take a pointer to the object */
+#define mutex_init(m)          spin_pdr_init(m)
+#define mutex_lock(m)          spin_pdr_lock(m)
+#define mutex_trylock(m)       ({ spin_pdr_trylock(m) ? 0 : EBUSY; })
+#define mutex_unlock(m)                spin_pdr_unlock(m)
+#define MUTEX_INITIALIZER      SPINPDR_INITIALIZER
 
 /* thread specific data for glibc */
 
@@ -41,6 +42,8 @@ __libc_tsd_define (static, void *, MALLOC)    /* declaration/common definition */
 #define tsd_setspecific(key, data)     __libc_tsd_set (void *, MALLOC, (data))
 #define tsd_getspecific(key, vptr)     ((vptr) = __libc_tsd_get (void *, MALLOC))
 
+/* TODO: look into pthread's version.  We might need this, and it could be that
+ * glibc has the fork_cbs already. */
 #define thread_atfork(prepare, parent, child) do {} while(0)
 
 #include <sysdeps/generic/malloc-machine.h>
index d11224e..ee71904 100644 (file)
@@ -11,6 +11,8 @@
 #include <parlib/assert.h>
 #include <parlib/spinlock.h>
 #include <parlib/timing.h>
+#include <parlib/uthread.h>
+#include <parlib/dtls.h>
 #include <stdbool.h>
 
 /* Here we define functions and variables that are really defined in parlib, but
  * This also doesn't work for ld.so, which doesn't link against parlib.  That's
  * probably a good thing (uthread constructors would be a mess for ld, I bet).
  * But that does mean that these stubs need to actually do something for
- * functions that ld.so calls.  See the notes below for more info. */
+ * functions that ld.so calls.
+ *
+ * Also, be careful and paranoid.  If you change or add functions in here,
+ * recompile apps that link against libc - even if they link dynamically.
+ * Otherwise, when they linked with libc.so, *libc itself* (not the actual
+ * program) would not find the parlib functions - it would still use these
+ * functions.  I don't have a good explanation for it, but that's what seemed to
+ * happen.
+ *
+ * For an example, if you write(2, "foo\n", 4) on every lock acquisition, you'll
+ * see one foo per process, which I think comes from ld (back when it used
+ * spin_pdr locks for the rtld lock).  Later functions that call spin_pdr_lock,
+ * whether from the app, parlib, or libc, do not output foo.  This is not the
+ * case if the application was not rebuilt before this change (e.g. bash, ssh,
+ * etc). */
 
 __thread int __weak_vcoreid = 0;
 extern __thread int __vcoreid __attribute__ ((weak, alias ("__weak_vcoreid")));
@@ -65,41 +81,27 @@ uint64_t __tsc2nsec(uint64_t tsc_time)
 }
 weak_alias(__tsc2nsec, tsc2nsec)
 
-/* ld.so calls these, so we need them to work.  We don't need them to be
- * thread-safe, since we're single-threaded, but we do need them to use the
- * right values for 'locked' and 'unlocked'.
- *
- * Note that for this change I needed to recompile the binaries that link with
- * libc - even if they link dynamically.  Otherwise, when they linked with
- * libc.so, *libc itself* (not the actual program) would not find the parlib
- * functions - it would still use these functions.  Lots of glibc functions
- * (printf, fflush, etc) call some version of spin_pdr_lock (lll_lock).
- *
- * For an example, if you write(2, "foo\n", 4) on ever lock acquisition, you'll
- * see one foo per process, which I think comes from ld.  Later functions that
- * call spin_pdr_lock, whether from the app, parlib, or libc, do not output foo.
- * This is not the case if the application was not rebuilt before this change
- * (e.g. bash, ssh, etc). */
+void __spin_pdr_init(struct spin_pdr_lock *pdr_lock)
+{
+       assert(0);
+}
+weak_alias(__spin_pdr_init, spin_pdr_init)
+
 bool __spin_pdr_trylock(struct spin_pdr_lock *pdr_lock)
 {
-       if (pdr_lock->lock != SPINPDR_UNLOCKED)
-               return FALSE;
-       pdr_lock->lock = 0;
-       return TRUE;
+       assert(0);
 }
 weak_alias(__spin_pdr_trylock, spin_pdr_trylock)
 
 void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
 {
-       assert(pdr_lock->lock == SPINPDR_UNLOCKED);
-       /* assume we're vcore 0 */
-       pdr_lock->lock = 0;
+       assert(0);
 }
 weak_alias(__spin_pdr_lock, spin_pdr_lock)
 
 void __spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
 {
-       pdr_lock->lock = SPINPDR_UNLOCKED;
+       assert(0);
 }
 weak_alias(__spin_pdr_unlock, spin_pdr_unlock)
 
@@ -116,3 +118,123 @@ void __uthread_sched_yield(void)
        ros_syscall(SYS_proc_yield, TRUE, 0, 0, 0, 0, 0);
 }
 weak_alias(__uthread_sched_yield, uthread_sched_yield)
+
+void __uth_mutex_init(uth_mutex_t *m)
+{
+       assert(0);
+}
+weak_alias(__uth_mutex_init, uth_mutex_init)
+
+void __uth_mutex_destroy(uth_mutex_t *m)
+{
+       assert(0);
+}
+weak_alias(__uth_mutex_destroy, uth_mutex_destroy)
+
+void __uth_mutex_lock(uth_mutex_t *m)
+{
+       assert(0);
+}
+weak_alias(__uth_mutex_lock, uth_mutex_lock)
+
+bool __uth_mutex_trylock(uth_mutex_t *m)
+{
+       assert(0);
+}
+weak_alias(__uth_mutex_trylock, uth_mutex_trylock)
+
+void __uth_mutex_unlock(uth_mutex_t *m)
+{
+       assert(0);
+}
+weak_alias(__uth_mutex_unlock, uth_mutex_unlock)
+
+void __uth_recurse_mutex_init(uth_recurse_mutex_t *r_m)
+{
+       assert(0);
+}
+weak_alias(__uth_recurse_mutex_init, uth_recurse_mutex_init)
+
+void __uth_recurse_mutex_destroy(uth_recurse_mutex_t *r_m)
+{
+       assert(0);
+}
+weak_alias(__uth_recurse_mutex_destroy, uth_recurse_mutex_destroy)
+
+void __uth_recurse_mutex_lock(uth_recurse_mutex_t *r_m)
+{
+       assert(0);
+}
+weak_alias(__uth_recurse_mutex_lock, uth_recurse_mutex_lock)
+
+bool __uth_recurse_mutex_trylock(uth_recurse_mutex_t *r_m)
+{
+       assert(0);
+}
+weak_alias(__uth_recurse_mutex_trylock, uth_recurse_mutex_trylock)
+
+void __uth_recurse_mutex_unlock(uth_recurse_mutex_t *r_m)
+{
+       assert(0);
+}
+weak_alias(__uth_recurse_mutex_unlock, uth_recurse_mutex_unlock)
+
+void __uth_rwlock_init(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_init, uth_rwlock_init)
+
+void __uth_rwlock_destroy(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_destroy, uth_rwlock_destroy)
+
+void __uth_rwlock_rdlock(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_rdlock, uth_rwlock_rdlock)
+
+bool __uth_rwlock_try_rdlock(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_try_rdlock, uth_rwlock_try_rdlock)
+
+void __uth_rwlock_wrlock(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_wrlock, uth_rwlock_wrlock)
+
+bool __uth_rwlock_try_wrlock(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_try_wrlock, uth_rwlock_try_wrlock)
+
+void __uth_rwlock_unlock(uth_rwlock_t *rwl)
+{
+       assert(0);
+}
+weak_alias(__uth_rwlock_unlock, uth_rwlock_unlock)
+
+dtls_key_t __dtls_key_create(dtls_dtor_t dtor)
+{
+       assert(0);
+}
+weak_alias(__dtls_key_create, dtls_key_create)
+
+void __set_dtls(dtls_key_t key, const void *dtls)
+{
+       assert(0);
+}
+weak_alias(__set_dtls, set_dtls)
+
+void *__get_dtls(dtls_key_t key)
+{
+       assert(0);
+}
+weak_alias(__get_dtls, get_dtls)
index 47a5045..4c6fbb6 100644 (file)
 #include <ros/memlayout.h>
 #include <ros/procdata.h>
 #include <sys/mman.h>
+#include <parlib/spinlock.h>
 
 static uintptr_t curbrk = BRK_START;
-__libc_lock_define_initialized(static, __brk_lock);
+
+/* brk() is called by malloc, which holds spinlocks.  So we need to use
+ * spinlocks too.  It is possible that the kernel will block in the mmap() call,
+ * in which case the process would spin.  That's already the case for malloc,
+ * regardless of what we do here in brk() (since ultimately, brk() can block. */
+static struct spin_pdr_lock __brk_lock = SPINPDR_INITIALIZER;
 
 static bool is_early_scp(void)
 {
@@ -42,14 +48,14 @@ static void brk_lock(void)
 {
        if (is_early_scp())
                return;
-       __libc_lock_lock(__brk_lock);
+       spin_pdr_lock(&__brk_lock);
 }
 
 static void brk_unlock(void)
 {
        if (is_early_scp())
                return;
-       __libc_lock_unlock(__brk_lock);
+       spin_pdr_unlock(&__brk_lock);
 }
 
 static uintptr_t