kconfig: use pkg-config for ncurses detection
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <parlib/ros_debug.h>
18 #include <stdlib.h>
19 #include <sys/fork_cb.h>
20
21 static void thread0_sched_init(void);
22 static void thread0_sched_entry(void);
23 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
24 static void thread0_thread_refl_fault(struct uthread *uth,
25                                       struct user_context *ctx);
26 static void thread0_thread_runnable(struct uthread *uth);
27 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
28 static void thread0_thread_exited(struct uthread *uth);
29 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
30 static void thread0_got_posix_signal(int sig_nr, struct siginfo *info);
31 static void thread0_sync_init(uth_sync_t *s);
32 static void thread0_sync_destroy(uth_sync_t *s);
33 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s);
34 static struct uthread *thread0_sync_get_next(uth_sync_t *s);
35 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
36 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b);
37 static bool thread0_sync_is_empty(uth_sync_t *s);
38
39 /* externed into uthread.c */
40 struct schedule_ops thread0_2ls_ops = {
41         .sched_init = thread0_sched_init,
42         .sched_entry = thread0_sched_entry,
43         .thread_blockon_sysc = thread0_thread_blockon_sysc,
44         .thread_refl_fault = thread0_thread_refl_fault,
45         .thread_runnable = thread0_thread_runnable,
46         .thread_paused = thread0_thread_runnable,
47         .thread_has_blocked = thread0_thread_has_blocked,
48         .thread_exited = thread0_thread_exited,
49         .thread_create = thread0_thread_create,
50         .got_posix_signal = thread0_got_posix_signal,
51         .sync_init = thread0_sync_init,
52         .sync_destroy = thread0_sync_destroy,
53         .sync_enqueue = thread0_sync_enqueue,
54         .sync_get_next = thread0_sync_get_next,
55         .sync_get_uth = thread0_sync_get_uth,
56         .sync_swap = thread0_sync_swap,
57         .sync_is_empty = thread0_sync_is_empty,
58 };
59
60 struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
61
62 /* externed into uthread.c */
63 struct uthread *thread0_uth;
64
65 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
66  * don't actually attach this mgmt info to it.  But since we just have one
67  * thread, it doesn't matter. */
68 struct thread0_info {
69         bool                            is_blocked;
70 };
71 static struct thread0_info thread0_info;
72 static struct event_queue *sysc_evq;
73
74 void thread0_handle_syscall(struct event_msg *ev_msg,
75                             unsigned int ev_type, void *data)
76 {
77         thread0_info.is_blocked = FALSE;
78 }
79
80 static void thread0_pre_fork(void)
81 {
82 }
83
84 static void thread0_post_fork(pid_t ret)
85 {
86 }
87
88 void thread0_sched_init(void)
89 {
90         int ret;
91
92         ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
93                              sizeof(struct uthread));
94         assert(!ret);
95         /* aggressively 0 for bugs*/
96         memset(thread0_uth, 0, sizeof(struct uthread));
97         memset(&thread0_info, 0, sizeof(thread0_info));
98         /* we don't care about the message, so don't bother with a UCQ */
99         sysc_evq = get_eventq(EV_MBOX_BITMAP);
100         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
101         uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
102         pre_fork_2ls = thread0_pre_fork;
103         post_fork_2ls = thread0_post_fork;
104 }
105
106 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
107 static void thread0_sched_entry(void)
108 {
109         /* TODO: support signal handling whenever we run a uthread */
110         if (current_uthread) {
111                 uthread_prep_pending_signals(current_uthread);
112                 run_current_uthread();
113                 assert(0);
114         }
115         while (1) {
116                 if (!thread0_info.is_blocked) {
117                         uthread_prep_pending_signals(thread0_uth);
118                         run_uthread(thread0_uth);
119                         assert(0);
120                 }
121                 sys_yield(FALSE);
122                 handle_events(0);
123         }
124 }
125
126 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
127 {
128         struct syscall *sysc = (struct syscall*)arg;
129
130         thread0_thread_has_blocked(uthread, 0);
131         if (!register_evq(sysc, sysc_evq))
132                 thread0_thread_runnable(uthread);
133 }
134
135 static void refl_error(struct uthread *uth, unsigned int trap_nr,
136                        unsigned int err, unsigned long aux)
137 {
138         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
139                trap_nr, err, aux);
140         /* Note that uthread.c already copied out our ctx into the uth
141          * struct */
142         print_user_context(&uth->u_ctx);
143         printf("Turn on printx to spew unhandled, malignant trap info\n");
144         exit(-1);
145 }
146
147 static bool handle_page_fault(struct uthread *uth, unsigned int err,
148                               unsigned long aux)
149 {
150         if (!(err & PF_VMR_BACKED))
151                 return FALSE;
152         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
153         __block_uthread_on_async_sysc(uth);
154         return TRUE;
155 }
156
157 static void thread0_thread_refl_fault(struct uthread *uth,
158                                       struct user_context *ctx)
159 {
160         unsigned int trap_nr = __arch_refl_get_nr(ctx);
161         unsigned int err = __arch_refl_get_err(ctx);
162         unsigned long aux = __arch_refl_get_aux(ctx);
163
164         assert(ctx->type == ROS_HW_CTX);
165         switch (trap_nr) {
166         case HW_TRAP_PAGE_FAULT:
167                 if (!handle_page_fault(uth, err, aux))
168                         refl_error(uth, trap_nr, err, aux);
169                 break;
170         default:
171                 refl_error(uth, trap_nr, err, aux);
172         }
173 }
174
175 static void thread0_thread_runnable(struct uthread *uth)
176 {
177         thread0_info.is_blocked = FALSE;
178 }
179
180 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
181 {
182         assert(!thread0_info.is_blocked);
183         thread0_info.is_blocked = TRUE;
184 }
185
186 /* Actually, a 2LS only needs to implement this if it calls
187  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
188 static void thread0_thread_exited(struct uthread *uth)
189 {
190         assert(0);
191 }
192
193 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
194 {
195         panic("Thread0 sched asked to create more threads!");
196 }
197
198 static void thread0_got_posix_signal(int sig_nr, struct siginfo *info)
199 {
200         if (current_uthread)
201                 trigger_posix_signal(sig_nr, info, get_cur_uth_ctx());
202         else
203                 trigger_posix_signal(sig_nr, info, &thread0_uth->u_ctx);
204         /* Legacy single-threaded programs, which often use thread0, expect
205          * signals to interrupt their syscall.  For most 2LSes, we can't match a
206          * process-wide signal to a particular thread; the kernel knows nothing
207          * of threads, we're just receiving an event.  However, thread0 has only
208          * one thread. */
209         if (thread0_uth->sysc)
210                 sys_abort_sysc(thread0_uth->sysc);
211 }
212
213 static void thread0_sync_init(uth_sync_t *s)
214 {
215         memset(s, 0x5a, sizeof(uth_sync_t));
216 }
217
218 static void thread0_sync_destroy(uth_sync_t *s)
219 {
220 }
221
222 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s)
223 {
224 }
225
226 static struct uthread *thread0_sync_get_next(uth_sync_t *s)
227 {
228         if (thread0_info.is_blocked) {
229                 /* Note we don't clear is_blocked.  Runnable does that, which
230                  * should be called before the next get_next (since we have only
231                  * one thread). */
232                 return thread0_uth;
233         } else {
234                 return NULL;
235         }
236 }
237
238 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
239 {
240         assert(uth == thread0_uth);
241         if (thread0_info.is_blocked) {
242                 /* Note we don't clear is_blocked.  Runnable does that. */
243                 return TRUE;
244         }
245         return FALSE;
246 }
247
248 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b)
249 {
250 }
251
252 static bool thread0_sync_is_empty(uth_sync_t *s)
253 {
254         return !thread0_info.is_blocked;
255 }