parlib: Have exactly one specific 2LS
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <parlib/ros_debug.h>
18 #include <stdlib.h>
19
20 static void thread0_sched_init(void);
21 static void thread0_sched_entry(void);
22 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
23 static void thread0_thread_refl_fault(struct uthread *uth,
24                                       struct user_context *ctx);
25 static void thread0_thread_runnable(struct uthread *uth);
26 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t *sync,
27                                        int flags);
28 static void thread0_thread_exited(struct uthread *uth);
29 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
30 static void thread0_sync_init(uth_sync_t *s);
31 static void thread0_sync_destroy(uth_sync_t *s);
32 static struct uthread *thread0_sync_get_next(uth_sync_t *s);
33 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
34
35 /* externed into uthread.c */
36 struct schedule_ops thread0_2ls_ops = {
37         .sched_init = thread0_sched_init,
38         .sched_entry = thread0_sched_entry,
39         .thread_blockon_sysc = thread0_thread_blockon_sysc,
40         .thread_refl_fault = thread0_thread_refl_fault,
41         .thread_runnable = thread0_thread_runnable,
42         .thread_paused = thread0_thread_runnable,
43         .thread_has_blocked = thread0_thread_has_blocked,
44         .thread_exited = thread0_thread_exited,
45         .thread_create = thread0_thread_create,
46         .sync_init = thread0_sync_init,
47         .sync_destroy = thread0_sync_destroy,
48         .sync_get_next = thread0_sync_get_next,
49         .sync_get_uth = thread0_sync_get_uth,
50 };
51
52 struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
53
54 /* externed into uthread.c */
55 struct uthread *thread0_uth;
56
57 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
58  * don't actually attach this mgmt info to it.  But since we just have one
59  * thread, it doesn't matter. */
60 struct thread0_info {
61         bool                                            is_blocked;
62 };
63 static struct thread0_info thread0_info;
64 static struct event_queue *sysc_evq;
65
66 void thread0_handle_syscall(struct event_msg *ev_msg,
67                             unsigned int ev_type, void *data)
68 {
69         thread0_info.is_blocked = FALSE;
70 }
71
72 void thread0_sched_init(void)
73 {
74         int ret;
75
76         ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
77                              sizeof(struct uthread));
78         assert(!ret);
79         memset(thread0_uth, 0, sizeof(struct uthread)); /* aggressively 0 for bugs*/
80         memset(&thread0_info, 0, sizeof(thread0_info));
81         /* we don't care about the message, so don't bother with a UCQ */
82         sysc_evq = get_eventq(EV_MBOX_BITMAP);
83         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
84         uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
85 }
86
87 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
88 static void thread0_sched_entry(void)
89 {
90         /* TODO: support signal handling whenever we run a uthread */
91         if (current_uthread) {
92                 uthread_prep_pending_signals(current_uthread);
93                 run_current_uthread();
94                 assert(0);
95         }
96         while (1) {
97                 if (!thread0_info.is_blocked) {
98                         uthread_prep_pending_signals(thread0_uth);
99                         run_uthread(thread0_uth);
100                         assert(0);
101                 }
102                 sys_yield(FALSE);
103                 handle_events(0);
104         }
105 }
106
107 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
108 {
109         struct syscall *sysc = (struct syscall*)arg;
110         thread0_thread_has_blocked(uthread, NULL, 0);
111         if (!register_evq(sysc, sysc_evq))
112                 thread0_thread_runnable(uthread);
113 }
114
115 static void refl_error(struct uthread *uth, unsigned int trap_nr,
116                        unsigned int err, unsigned long aux)
117 {
118         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
119                trap_nr, err, aux);
120         /* Note that uthread.c already copied out our ctx into the uth
121          * struct */
122         print_user_context(&uth->u_ctx);
123         printf("Turn on printx to spew unhandled, malignant trap info\n");
124         exit(-1);
125 }
126
127 static bool handle_page_fault(struct uthread *uth, unsigned int err,
128                               unsigned long aux)
129 {
130         if (!(err & PF_VMR_BACKED))
131                 return FALSE;
132         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
133         __block_uthread_on_async_sysc(uth);
134         return TRUE;
135 }
136
137 static void thread0_thread_refl_fault(struct uthread *uth,
138                                       struct user_context *ctx)
139 {
140         unsigned int trap_nr = __arch_refl_get_nr(ctx);
141         unsigned int err = __arch_refl_get_err(ctx);
142         unsigned long aux = __arch_refl_get_aux(ctx);
143
144         assert(ctx->type == ROS_HW_CTX);
145         switch (trap_nr) {
146         case HW_TRAP_PAGE_FAULT:
147                 if (!handle_page_fault(uth, err, aux))
148                         refl_error(uth, trap_nr, err, aux);
149                 break;
150         default:
151                 refl_error(uth, trap_nr, err, aux);
152         }
153 }
154
155 static void thread0_thread_runnable(struct uthread *uth)
156 {
157         thread0_info.is_blocked = FALSE;
158 }
159
160 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t *sync,
161                                        int flags)
162 {
163         assert(!thread0_info.is_blocked);
164         thread0_info.is_blocked = TRUE;
165 }
166
167 /* Actually, a 2LS only needs to implement this if it calls
168  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
169 static void thread0_thread_exited(struct uthread *uth)
170 {
171         assert(0);
172 }
173
174 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
175 {
176         panic("Thread0 sched asked to create more threads!");
177 }
178
179 static void thread0_sync_init(uth_sync_t *s)
180 {
181         memset(s, 0x5a, sizeof(uth_sync_t));
182 }
183
184 static void thread0_sync_destroy(uth_sync_t *s)
185 {
186 }
187
188 static struct uthread *thread0_sync_get_next(uth_sync_t *s)
189 {
190         if (thread0_info.is_blocked) {
191                 /* Note we don't clear is_blocked.  Runnable does that, which should be
192                  * called before the next get_next (since we have only one thread). */
193                 return thread0_uth;
194         } else {
195                 return NULL;
196         }
197 }
198
199 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
200 {
201         assert(uth == thread0_uth);
202         if (thread0_info.is_blocked) {
203                 /* Note we don't clear is_blocked.  Runnable does that. */
204                 return TRUE;
205         }
206         return FALSE;
207 }