Treat tabs as having eight spaces instead of four
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <parlib/ros_debug.h>
18 #include <stdlib.h>
19 #include <sys/fork_cb.h>
20
21 static void thread0_sched_init(void);
22 static void thread0_sched_entry(void);
23 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
24 static void thread0_thread_refl_fault(struct uthread *uth,
25                                       struct user_context *ctx);
26 static void thread0_thread_runnable(struct uthread *uth);
27 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
28 static void thread0_thread_exited(struct uthread *uth);
29 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
30 static void thread0_sync_init(uth_sync_t *s);
31 static void thread0_sync_destroy(uth_sync_t *s);
32 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s);
33 static struct uthread *thread0_sync_get_next(uth_sync_t *s);
34 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
35 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b);
36 static bool thread0_sync_is_empty(uth_sync_t *s);
37
38 /* externed into uthread.c */
39 struct schedule_ops thread0_2ls_ops = {
40         .sched_init = thread0_sched_init,
41         .sched_entry = thread0_sched_entry,
42         .thread_blockon_sysc = thread0_thread_blockon_sysc,
43         .thread_refl_fault = thread0_thread_refl_fault,
44         .thread_runnable = thread0_thread_runnable,
45         .thread_paused = thread0_thread_runnable,
46         .thread_has_blocked = thread0_thread_has_blocked,
47         .thread_exited = thread0_thread_exited,
48         .thread_create = thread0_thread_create,
49         .sync_init = thread0_sync_init,
50         .sync_destroy = thread0_sync_destroy,
51         .sync_enqueue = thread0_sync_enqueue,
52         .sync_get_next = thread0_sync_get_next,
53         .sync_get_uth = thread0_sync_get_uth,
54         .sync_swap = thread0_sync_swap,
55         .sync_is_empty = thread0_sync_is_empty,
56 };
57
58 struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
59
60 /* externed into uthread.c */
61 struct uthread *thread0_uth;
62
63 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
64  * don't actually attach this mgmt info to it.  But since we just have one
65  * thread, it doesn't matter. */
66 struct thread0_info {
67         bool                            is_blocked;
68 };
69 static struct thread0_info thread0_info;
70 static struct event_queue *sysc_evq;
71
72 void thread0_handle_syscall(struct event_msg *ev_msg,
73                             unsigned int ev_type, void *data)
74 {
75         thread0_info.is_blocked = FALSE;
76 }
77
78 static void thread0_pre_fork(void)
79 {
80 }
81
82 static void thread0_post_fork(pid_t ret)
83 {
84 }
85
86 void thread0_sched_init(void)
87 {
88         int ret;
89
90         ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
91                              sizeof(struct uthread));
92         assert(!ret);
93         /* aggressively 0 for bugs*/
94         memset(thread0_uth, 0, sizeof(struct uthread));
95         memset(&thread0_info, 0, sizeof(thread0_info));
96         /* we don't care about the message, so don't bother with a UCQ */
97         sysc_evq = get_eventq(EV_MBOX_BITMAP);
98         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
99         uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
100         pre_fork_2ls = thread0_pre_fork;
101         post_fork_2ls = thread0_post_fork;
102 }
103
104 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
105 static void thread0_sched_entry(void)
106 {
107         /* TODO: support signal handling whenever we run a uthread */
108         if (current_uthread) {
109                 uthread_prep_pending_signals(current_uthread);
110                 run_current_uthread();
111                 assert(0);
112         }
113         while (1) {
114                 if (!thread0_info.is_blocked) {
115                         uthread_prep_pending_signals(thread0_uth);
116                         run_uthread(thread0_uth);
117                         assert(0);
118                 }
119                 sys_yield(FALSE);
120                 handle_events(0);
121         }
122 }
123
124 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
125 {
126         struct syscall *sysc = (struct syscall*)arg;
127         thread0_thread_has_blocked(uthread, 0);
128         if (!register_evq(sysc, sysc_evq))
129                 thread0_thread_runnable(uthread);
130 }
131
132 static void refl_error(struct uthread *uth, unsigned int trap_nr,
133                        unsigned int err, unsigned long aux)
134 {
135         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
136                trap_nr, err, aux);
137         /* Note that uthread.c already copied out our ctx into the uth
138          * struct */
139         print_user_context(&uth->u_ctx);
140         printf("Turn on printx to spew unhandled, malignant trap info\n");
141         exit(-1);
142 }
143
144 static bool handle_page_fault(struct uthread *uth, unsigned int err,
145                               unsigned long aux)
146 {
147         if (!(err & PF_VMR_BACKED))
148                 return FALSE;
149         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
150         __block_uthread_on_async_sysc(uth);
151         return TRUE;
152 }
153
154 static void thread0_thread_refl_fault(struct uthread *uth,
155                                       struct user_context *ctx)
156 {
157         unsigned int trap_nr = __arch_refl_get_nr(ctx);
158         unsigned int err = __arch_refl_get_err(ctx);
159         unsigned long aux = __arch_refl_get_aux(ctx);
160
161         assert(ctx->type == ROS_HW_CTX);
162         switch (trap_nr) {
163         case HW_TRAP_PAGE_FAULT:
164                 if (!handle_page_fault(uth, err, aux))
165                         refl_error(uth, trap_nr, err, aux);
166                 break;
167         default:
168                 refl_error(uth, trap_nr, err, aux);
169         }
170 }
171
172 static void thread0_thread_runnable(struct uthread *uth)
173 {
174         thread0_info.is_blocked = FALSE;
175 }
176
177 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
178 {
179         assert(!thread0_info.is_blocked);
180         thread0_info.is_blocked = TRUE;
181 }
182
183 /* Actually, a 2LS only needs to implement this if it calls
184  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
185 static void thread0_thread_exited(struct uthread *uth)
186 {
187         assert(0);
188 }
189
190 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
191 {
192         panic("Thread0 sched asked to create more threads!");
193 }
194
195 static void thread0_sync_init(uth_sync_t *s)
196 {
197         memset(s, 0x5a, sizeof(uth_sync_t));
198 }
199
200 static void thread0_sync_destroy(uth_sync_t *s)
201 {
202 }
203
204 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s)
205 {
206 }
207
208 static struct uthread *thread0_sync_get_next(uth_sync_t *s)
209 {
210         if (thread0_info.is_blocked) {
211                 /* Note we don't clear is_blocked.  Runnable does that, which
212                  * should be called before the next get_next (since we have only
213                  * one thread). */
214                 return thread0_uth;
215         } else {
216                 return NULL;
217         }
218 }
219
220 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
221 {
222         assert(uth == thread0_uth);
223         if (thread0_info.is_blocked) {
224                 /* Note we don't clear is_blocked.  Runnable does that. */
225                 return TRUE;
226         }
227         return FALSE;
228 }
229
230 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b)
231 {
232 }
233
234 static bool thread0_sync_is_empty(uth_sync_t *s)
235 {
236         return !thread0_info.is_blocked;
237 }