parlib: Implement join/detach() for all uthreads
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <stdlib.h>
18
19 static void thread0_sched_entry(void);
20 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
21 static void thread0_thread_refl_fault(struct uthread *uth,
22                                       struct user_context *ctx);
23 static void thread0_thread_runnable(struct uthread *uth);
24 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
25                                        int flags);
26 static void thread0_thread_exited(struct uthread *uth);
27 static uth_sync_t thread0_sync_alloc(void);
28 static void thread0_sync_free(uth_sync_t);
29 static struct uthread *thread0_sync_get_next(uth_sync_t);
30 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth);
31
32 /* externed into uthread.c */
33 struct schedule_ops thread0_2ls_ops = {
34         .sched_entry = thread0_sched_entry,
35         .thread_blockon_sysc = thread0_thread_blockon_sysc,
36         .thread_refl_fault = thread0_thread_refl_fault,
37         .thread_runnable = thread0_thread_runnable,
38         .thread_paused = thread0_thread_runnable,
39         .thread_has_blocked = thread0_thread_has_blocked,
40         .thread_exited = thread0_thread_exited,
41         .sync_alloc = thread0_sync_alloc,
42         .sync_free = thread0_sync_free,
43         .sync_get_next = thread0_sync_get_next,
44         .sync_get_uth = thread0_sync_get_uth,
45 };
46
47 /* externed into uthread.c */
48 struct uthread *thread0_uth;
49
50 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
51  * don't actually attach this mgmt info to it.  But since we just have one
52  * thread, it doesn't matter. */
53 struct thread0_info {
54         bool                                            is_blocked;
55 };
56 static struct thread0_info thread0_info;
57 static struct event_queue *sysc_evq;
58
59 void thread0_handle_syscall(struct event_msg *ev_msg,
60                             unsigned int ev_type, void *data)
61 {
62         thread0_info.is_blocked = FALSE;
63 }
64
65 void thread0_lib_init(void)
66 {
67         memset(&thread0_info, 0, sizeof(thread0_info));
68         /* we don't care about the message, so don't bother with a UCQ */
69         sysc_evq = get_eventq(EV_MBOX_BITMAP);
70         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
71 }
72
73 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
74 static void thread0_sched_entry(void)
75 {
76         /* TODO: support signal handling whenever we run a uthread */
77         if (current_uthread) {
78                 uthread_prep_pending_signals(current_uthread);
79                 run_current_uthread();
80                 assert(0);
81         }
82         while (1) {
83                 if (!thread0_info.is_blocked) {
84                         uthread_prep_pending_signals(thread0_uth);
85                         run_uthread(thread0_uth);
86                         assert(0);
87                 }
88                 sys_yield(FALSE);
89                 handle_events(0);
90         }
91 }
92
93 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
94 {
95         struct syscall *sysc = (struct syscall*)arg;
96         thread0_thread_has_blocked(uthread, NULL, 0);
97         if (!register_evq(sysc, sysc_evq))
98                 thread0_thread_runnable(uthread);
99 }
100
101 static void refl_error(struct uthread *uth, unsigned int trap_nr,
102                        unsigned int err, unsigned long aux)
103 {
104         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
105                trap_nr, err, aux);
106         /* Note that uthread.c already copied out our ctx into the uth
107          * struct */
108         print_user_context(&uth->u_ctx);
109         printf("Turn on printx to spew unhandled, malignant trap info\n");
110         exit(-1);
111 }
112
113 static bool handle_page_fault(struct uthread *uth, unsigned int err,
114                               unsigned long aux)
115 {
116         if (!(err & PF_VMR_BACKED))
117                 return FALSE;
118         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
119         __block_uthread_on_async_sysc(uth);
120         return TRUE;
121 }
122
123 static void thread0_thread_refl_fault(struct uthread *uth,
124                                       struct user_context *ctx)
125 {
126         unsigned int trap_nr = __arch_refl_get_nr(ctx);
127         unsigned int err = __arch_refl_get_err(ctx);
128         unsigned long aux = __arch_refl_get_aux(ctx);
129
130         assert(ctx->type == ROS_HW_CTX);
131         switch (trap_nr) {
132         case HW_TRAP_PAGE_FAULT:
133                 if (!handle_page_fault(uth, err, aux))
134                         refl_error(uth, trap_nr, err, aux);
135                 break;
136         default:
137                 refl_error(uth, trap_nr, err, aux);
138         }
139 }
140
141 static void thread0_thread_runnable(struct uthread *uth)
142 {
143         thread0_info.is_blocked = FALSE;
144 }
145
146 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
147                                        int flags)
148 {
149         assert(!thread0_info.is_blocked);
150         thread0_info.is_blocked = TRUE;
151 }
152
153 /* Actually, a 2LS only needs to implement this if it calls
154  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
155 static void thread0_thread_exited(struct uthread *uth)
156 {
157         assert(0);
158 }
159
160 static uth_sync_t thread0_sync_alloc(void)
161 {
162         return (void*)0xf00baa;
163 }
164
165 static void thread0_sync_free(uth_sync_t s)
166 {
167 }
168
169 static struct uthread *thread0_sync_get_next(uth_sync_t s)
170 {
171         if (thread0_info.is_blocked) {
172                 /* Note we don't clear is_blocked.  Runnable does that, which should be
173                  * called before the next get_next (since we have only one thread). */
174                 return thread0_uth;
175         } else {
176                 return NULL;
177         }
178 }
179
180 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth)
181 {
182         assert(uth == thread0_uth);
183         if (thread0_info.is_blocked) {
184                 /* Note we don't clear is_blocked.  Runnable does that. */
185                 return TRUE;
186         }
187         return FALSE;
188 }