parlib: Add synchronization objects
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <stdlib.h>
18
19 static void thread0_sched_entry(void);
20 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
21 static void thread0_thread_refl_fault(struct uthread *uth,
22                                       struct user_context *ctx);
23 static void thread0_thread_runnable(struct uthread *uth);
24 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
25                                        int flags);
26 static uth_mutex_t thread0_mtx_alloc(void);
27 static void thread0_mtx_free(uth_mutex_t m);
28 static void thread0_mtx_lock(uth_mutex_t m);
29 static bool thread0_mtx_trylock(uth_mutex_t m);
30 static void thread0_mtx_unlock(uth_mutex_t m);
31 static uth_sync_t thread0_sync_alloc(void);
32 static void thread0_sync_free(uth_sync_t);
33 static struct uthread *thread0_sync_get_next(uth_sync_t);
34 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth);
35
36 /* externed into uthread.c */
37 struct schedule_ops thread0_2ls_ops = {
38         .sched_entry = thread0_sched_entry,
39         .thread_blockon_sysc = thread0_thread_blockon_sysc,
40         .thread_refl_fault = thread0_thread_refl_fault,
41         .thread_runnable = thread0_thread_runnable,
42         .thread_paused = thread0_thread_runnable,
43         .thread_has_blocked = thread0_thread_has_blocked,
44         .mutex_alloc = thread0_mtx_alloc,
45         .mutex_free = thread0_mtx_free,
46         .mutex_lock = thread0_mtx_lock,
47         .mutex_trylock = thread0_mtx_trylock,
48         .mutex_unlock = thread0_mtx_unlock,
49         .sync_alloc = thread0_sync_alloc,
50         .sync_free = thread0_sync_free,
51         .sync_get_next = thread0_sync_get_next,
52         .sync_get_uth = thread0_sync_get_uth,
53 };
54
55 /* externed into uthread.c */
56 struct uthread *thread0_uth;
57
58 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
59  * don't actually attach this mgmt info to it.  But since we just have one
60  * thread, it doesn't matter. */
61 struct thread0_info {
62         bool                                            is_blocked;
63 };
64 static struct thread0_info thread0_info;
65 static struct event_queue *sysc_evq;
66
67 void thread0_handle_syscall(struct event_msg *ev_msg,
68                             unsigned int ev_type, void *data)
69 {
70         thread0_info.is_blocked = FALSE;
71 }
72
73 void thread0_lib_init(void)
74 {
75         memset(&thread0_info, 0, sizeof(thread0_info));
76         /* we don't care about the message, so don't bother with a UCQ */
77         sysc_evq = get_eventq(EV_MBOX_BITMAP);
78         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
79 }
80
81 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
82 static void thread0_sched_entry(void)
83 {
84         /* TODO: support signal handling whenever we run a uthread */
85         if (current_uthread) {
86                 uthread_prep_pending_signals(current_uthread);
87                 run_current_uthread();
88                 assert(0);
89         }
90         while (1) {
91                 if (!thread0_info.is_blocked) {
92                         uthread_prep_pending_signals(thread0_uth);
93                         run_uthread(thread0_uth);
94                         assert(0);
95                 }
96                 sys_yield(FALSE);
97                 handle_events(0);
98         }
99 }
100
101 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
102 {
103         struct syscall *sysc = (struct syscall*)arg;
104         thread0_thread_has_blocked(uthread, NULL, 0);
105         if (!register_evq(sysc, sysc_evq))
106                 thread0_thread_runnable(uthread);
107 }
108
109 static void refl_error(struct uthread *uth, unsigned int trap_nr,
110                        unsigned int err, unsigned long aux)
111 {
112         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
113                trap_nr, err, aux);
114         /* Note that uthread.c already copied out our ctx into the uth
115          * struct */
116         print_user_context(&uth->u_ctx);
117         printf("Turn on printx to spew unhandled, malignant trap info\n");
118         exit(-1);
119 }
120
121 static bool handle_page_fault(struct uthread *uth, unsigned int err,
122                               unsigned long aux)
123 {
124         if (!(err & PF_VMR_BACKED))
125                 return FALSE;
126         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
127         __block_uthread_on_async_sysc(uth);
128         return TRUE;
129 }
130
131 static void thread0_thread_refl_fault(struct uthread *uth,
132                                       struct user_context *ctx)
133 {
134         unsigned int trap_nr = __arch_refl_get_nr(ctx);
135         unsigned int err = __arch_refl_get_err(ctx);
136         unsigned long aux = __arch_refl_get_aux(ctx);
137
138         assert(ctx->type == ROS_HW_CTX);
139         switch (trap_nr) {
140         case HW_TRAP_PAGE_FAULT:
141                 if (!handle_page_fault(uth, err, aux))
142                         refl_error(uth, trap_nr, err, aux);
143                 break;
144         default:
145                 refl_error(uth, trap_nr, err, aux);
146         }
147 }
148
149 static void thread0_thread_runnable(struct uthread *uth)
150 {
151         thread0_info.is_blocked = FALSE;
152 }
153
154 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
155                                        int flags)
156 {
157         assert(!thread0_info.is_blocked);
158         thread0_info.is_blocked = TRUE;
159 }
160
161 /* We only have one thread, so we don't *need* mutexes.  But we'll use a bool to
162  * catch code that could deadlock itself. */
163 static uth_mutex_t thread0_mtx_alloc(void)
164 {
165         bool *mtx = malloc(sizeof(bool));
166
167         assert(mtx);
168         *mtx = FALSE;
169         return (uth_mutex_t)mtx;
170 }
171
172 static void thread0_mtx_free(uth_mutex_t m)
173 {
174         bool *mtx = (bool*)m;
175
176         assert(*mtx == FALSE);
177         free((void*)m);
178 }
179
180 static void thread0_mtx_lock(uth_mutex_t m)
181 {
182         bool *mtx = (bool*)m;
183
184         assert(*mtx == FALSE);
185         *mtx = TRUE;
186 }
187
188 static bool thread0_mtx_trylock(uth_mutex_t m)
189 {
190         bool *mtx = (bool*)m;
191
192         if (*mtx)
193                 return FALSE;
194         *mtx = TRUE;
195         return TRUE;
196 }
197
198 static void thread0_mtx_unlock(uth_mutex_t m)
199 {
200         bool *mtx = (bool*)m;
201
202         assert(*mtx == TRUE);
203         *mtx = FALSE;
204 }
205
206 static uth_sync_t thread0_sync_alloc(void)
207 {
208         return (void*)0xf00baa;
209 }
210
211 static void thread0_sync_free(uth_sync_t s)
212 {
213 }
214
215 static struct uthread *thread0_sync_get_next(uth_sync_t s)
216 {
217         if (thread0_info.is_blocked) {
218                 /* Note we don't clear is_blocked.  Runnable does that, which should be
219                  * called before the next get_next (since we have only one thread). */
220                 return thread0_uth;
221         } else {
222                 return NULL;
223         }
224 }
225
226 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth)
227 {
228         assert(uth == thread0_uth);
229         if (thread0_info.is_blocked) {
230                 /* Note we don't clear is_blocked.  Runnable does that. */
231                 return TRUE;
232         }
233         return FALSE;
234 }