pthread: Allow new threads in forked children
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <parlib/ros_debug.h>
18 #include <stdlib.h>
19 #include <sys/fork_cb.h>
20
21 static void thread0_sched_init(void);
22 static void thread0_sched_entry(void);
23 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
24 static void thread0_thread_refl_fault(struct uthread *uth,
25                                       struct user_context *ctx);
26 static void thread0_thread_runnable(struct uthread *uth);
27 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
28 static void thread0_thread_exited(struct uthread *uth);
29 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
30 static void thread0_sync_init(uth_sync_t *s);
31 static void thread0_sync_destroy(uth_sync_t *s);
32 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s);
33 static struct uthread *thread0_sync_get_next(uth_sync_t *s);
34 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
35 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b);
36 static bool thread0_sync_is_empty(uth_sync_t *s);
37
38 /* externed into uthread.c */
39 struct schedule_ops thread0_2ls_ops = {
40         .sched_init = thread0_sched_init,
41         .sched_entry = thread0_sched_entry,
42         .thread_blockon_sysc = thread0_thread_blockon_sysc,
43         .thread_refl_fault = thread0_thread_refl_fault,
44         .thread_runnable = thread0_thread_runnable,
45         .thread_paused = thread0_thread_runnable,
46         .thread_has_blocked = thread0_thread_has_blocked,
47         .thread_exited = thread0_thread_exited,
48         .thread_create = thread0_thread_create,
49         .sync_init = thread0_sync_init,
50         .sync_destroy = thread0_sync_destroy,
51         .sync_enqueue = thread0_sync_enqueue,
52         .sync_get_next = thread0_sync_get_next,
53         .sync_get_uth = thread0_sync_get_uth,
54         .sync_swap = thread0_sync_swap,
55         .sync_is_empty = thread0_sync_is_empty,
56 };
57
58 struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
59
60 /* externed into uthread.c */
61 struct uthread *thread0_uth;
62
63 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
64  * don't actually attach this mgmt info to it.  But since we just have one
65  * thread, it doesn't matter. */
66 struct thread0_info {
67         bool                                            is_blocked;
68 };
69 static struct thread0_info thread0_info;
70 static struct event_queue *sysc_evq;
71
72 void thread0_handle_syscall(struct event_msg *ev_msg,
73                             unsigned int ev_type, void *data)
74 {
75         thread0_info.is_blocked = FALSE;
76 }
77
78 static void thread0_pre_fork(void)
79 {
80 }
81
82 static void thread0_post_fork(pid_t ret)
83 {
84 }
85
86 void thread0_sched_init(void)
87 {
88         int ret;
89
90         ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
91                              sizeof(struct uthread));
92         assert(!ret);
93         memset(thread0_uth, 0, sizeof(struct uthread)); /* aggressively 0 for bugs*/
94         memset(&thread0_info, 0, sizeof(thread0_info));
95         /* we don't care about the message, so don't bother with a UCQ */
96         sysc_evq = get_eventq(EV_MBOX_BITMAP);
97         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
98         uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
99         pre_fork_2ls = thread0_pre_fork;
100         post_fork_2ls = thread0_post_fork;
101 }
102
103 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
104 static void thread0_sched_entry(void)
105 {
106         /* TODO: support signal handling whenever we run a uthread */
107         if (current_uthread) {
108                 uthread_prep_pending_signals(current_uthread);
109                 run_current_uthread();
110                 assert(0);
111         }
112         while (1) {
113                 if (!thread0_info.is_blocked) {
114                         uthread_prep_pending_signals(thread0_uth);
115                         run_uthread(thread0_uth);
116                         assert(0);
117                 }
118                 sys_yield(FALSE);
119                 handle_events(0);
120         }
121 }
122
123 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
124 {
125         struct syscall *sysc = (struct syscall*)arg;
126         thread0_thread_has_blocked(uthread, 0);
127         if (!register_evq(sysc, sysc_evq))
128                 thread0_thread_runnable(uthread);
129 }
130
131 static void refl_error(struct uthread *uth, unsigned int trap_nr,
132                        unsigned int err, unsigned long aux)
133 {
134         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
135                trap_nr, err, aux);
136         /* Note that uthread.c already copied out our ctx into the uth
137          * struct */
138         print_user_context(&uth->u_ctx);
139         printf("Turn on printx to spew unhandled, malignant trap info\n");
140         exit(-1);
141 }
142
143 static bool handle_page_fault(struct uthread *uth, unsigned int err,
144                               unsigned long aux)
145 {
146         if (!(err & PF_VMR_BACKED))
147                 return FALSE;
148         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
149         __block_uthread_on_async_sysc(uth);
150         return TRUE;
151 }
152
153 static void thread0_thread_refl_fault(struct uthread *uth,
154                                       struct user_context *ctx)
155 {
156         unsigned int trap_nr = __arch_refl_get_nr(ctx);
157         unsigned int err = __arch_refl_get_err(ctx);
158         unsigned long aux = __arch_refl_get_aux(ctx);
159
160         assert(ctx->type == ROS_HW_CTX);
161         switch (trap_nr) {
162         case HW_TRAP_PAGE_FAULT:
163                 if (!handle_page_fault(uth, err, aux))
164                         refl_error(uth, trap_nr, err, aux);
165                 break;
166         default:
167                 refl_error(uth, trap_nr, err, aux);
168         }
169 }
170
171 static void thread0_thread_runnable(struct uthread *uth)
172 {
173         thread0_info.is_blocked = FALSE;
174 }
175
176 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
177 {
178         assert(!thread0_info.is_blocked);
179         thread0_info.is_blocked = TRUE;
180 }
181
182 /* Actually, a 2LS only needs to implement this if it calls
183  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
184 static void thread0_thread_exited(struct uthread *uth)
185 {
186         assert(0);
187 }
188
189 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
190 {
191         panic("Thread0 sched asked to create more threads!");
192 }
193
194 static void thread0_sync_init(uth_sync_t *s)
195 {
196         memset(s, 0x5a, sizeof(uth_sync_t));
197 }
198
199 static void thread0_sync_destroy(uth_sync_t *s)
200 {
201 }
202
203 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s)
204 {
205 }
206
207 static struct uthread *thread0_sync_get_next(uth_sync_t *s)
208 {
209         if (thread0_info.is_blocked) {
210                 /* Note we don't clear is_blocked.  Runnable does that, which should be
211                  * called before the next get_next (since we have only one thread). */
212                 return thread0_uth;
213         } else {
214                 return NULL;
215         }
216 }
217
218 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
219 {
220         assert(uth == thread0_uth);
221         if (thread0_info.is_blocked) {
222                 /* Note we don't clear is_blocked.  Runnable does that. */
223                 return TRUE;
224         }
225         return FALSE;
226 }
227
228 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b)
229 {
230 }
231
232 static bool thread0_sync_is_empty(uth_sync_t *s)
233 {
234         return !thread0_info.is_blocked;
235 }