pthread: Allow new threads in forked children
[akaros.git] / user / parlib / thread0_sched.c
index aed2dce..ca1d0a0 100644 (file)
 #include <parlib/arch/trap.h>
 #include <parlib/ros_debug.h>
 #include <stdlib.h>
+#include <sys/fork_cb.h>
 
+static void thread0_sched_init(void);
 static void thread0_sched_entry(void);
 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
 static void thread0_thread_refl_fault(struct uthread *uth,
                                       struct user_context *ctx);
 static void thread0_thread_runnable(struct uthread *uth);
-static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
-                                       int flags);
+static void thread0_thread_has_blocked(struct uthread *uth, int flags);
 static void thread0_thread_exited(struct uthread *uth);
 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
-static uth_sync_t thread0_sync_alloc(void);
-static void thread0_sync_free(uth_sync_t);
-static struct uthread *thread0_sync_get_next(uth_sync_t);
-static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth);
+static void thread0_sync_init(uth_sync_t *s);
+static void thread0_sync_destroy(uth_sync_t *s);
+static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s);
+static struct uthread *thread0_sync_get_next(uth_sync_t *s);
+static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
+static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b);
+static bool thread0_sync_is_empty(uth_sync_t *s);
 
 /* externed into uthread.c */
 struct schedule_ops thread0_2ls_ops = {
+       .sched_init = thread0_sched_init,
        .sched_entry = thread0_sched_entry,
        .thread_blockon_sysc = thread0_thread_blockon_sysc,
        .thread_refl_fault = thread0_thread_refl_fault,
@@ -41,12 +46,17 @@ struct schedule_ops thread0_2ls_ops = {
        .thread_has_blocked = thread0_thread_has_blocked,
        .thread_exited = thread0_thread_exited,
        .thread_create = thread0_thread_create,
-       .sync_alloc = thread0_sync_alloc,
-       .sync_free = thread0_sync_free,
+       .sync_init = thread0_sync_init,
+       .sync_destroy = thread0_sync_destroy,
+       .sync_enqueue = thread0_sync_enqueue,
        .sync_get_next = thread0_sync_get_next,
        .sync_get_uth = thread0_sync_get_uth,
+       .sync_swap = thread0_sync_swap,
+       .sync_is_empty = thread0_sync_is_empty,
 };
 
+struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
+
 /* externed into uthread.c */
 struct uthread *thread0_uth;
 
@@ -65,12 +75,29 @@ void thread0_handle_syscall(struct event_msg *ev_msg,
        thread0_info.is_blocked = FALSE;
 }
 
-void thread0_lib_init(void)
+static void thread0_pre_fork(void)
 {
+}
+
+static void thread0_post_fork(pid_t ret)
+{
+}
+
+void thread0_sched_init(void)
+{
+       int ret;
+
+       ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
+                            sizeof(struct uthread));
+       assert(!ret);
+       memset(thread0_uth, 0, sizeof(struct uthread)); /* aggressively 0 for bugs*/
        memset(&thread0_info, 0, sizeof(thread0_info));
        /* we don't care about the message, so don't bother with a UCQ */
        sysc_evq = get_eventq(EV_MBOX_BITMAP);
        sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
+       uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
+       pre_fork_2ls = thread0_pre_fork;
+       post_fork_2ls = thread0_post_fork;
 }
 
 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
@@ -96,7 +123,7 @@ static void thread0_sched_entry(void)
 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
 {
        struct syscall *sysc = (struct syscall*)arg;
-       thread0_thread_has_blocked(uthread, NULL, 0);
+       thread0_thread_has_blocked(uthread, 0);
        if (!register_evq(sysc, sysc_evq))
                thread0_thread_runnable(uthread);
 }
@@ -146,8 +173,7 @@ static void thread0_thread_runnable(struct uthread *uth)
        thread0_info.is_blocked = FALSE;
 }
 
-static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
-                                       int flags)
+static void thread0_thread_has_blocked(struct uthread *uth, int flags)
 {
        assert(!thread0_info.is_blocked);
        thread0_info.is_blocked = TRUE;
@@ -165,16 +191,20 @@ static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
        panic("Thread0 sched asked to create more threads!");
 }
 
-static uth_sync_t thread0_sync_alloc(void)
+static void thread0_sync_init(uth_sync_t *s)
 {
-       return (void*)0xf00baa;
+       memset(s, 0x5a, sizeof(uth_sync_t));
 }
 
-static void thread0_sync_free(uth_sync_t s)
+static void thread0_sync_destroy(uth_sync_t *s)
 {
 }
 
-static struct uthread *thread0_sync_get_next(uth_sync_t s)
+static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s)
+{
+}
+
+static struct uthread *thread0_sync_get_next(uth_sync_t *s)
 {
        if (thread0_info.is_blocked) {
                /* Note we don't clear is_blocked.  Runnable does that, which should be
@@ -185,7 +215,7 @@ static struct uthread *thread0_sync_get_next(uth_sync_t s)
        }
 }
 
-static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth)
+static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
 {
        assert(uth == thread0_uth);
        if (thread0_info.is_blocked) {
@@ -194,3 +224,12 @@ static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth)
        }
        return FALSE;
 }
+
+static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b)
+{
+}
+
+static bool thread0_sync_is_empty(uth_sync_t *s)
+{
+       return !thread0_info.is_blocked;
+}